- while (1) {
- pthread_mutex_lock(&exec_lock);
- list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) {
- msg = running_with_devpath(loop_msg);
- if (msg == NULL) {
- /* move event to run list */
- pthread_mutex_lock(&running_lock);
- list_move_tail(&loop_msg->list, &running_list);
- pthread_mutex_unlock(&running_lock);
-
- pthread_create(&run_tid, &thr_attr, run_threads, (void *) loop_msg);
-
- dbg("moved seq %d to running list", loop_msg->seqnum);
- } else {
- dbg("delay seq %d, cause seq %d already working on '%s'",
- loop_msg->seqnum, msg->seqnum, msg->devpath);
+ for (i = 0; i < PATH_SIZE; i++) {
+ /* identical device event found */
+ if (running[i] == '\0' && waiting[i] == '\0')
+ return 1;
+
+ /* parent device event found */
+ if (running[i] == '\0' && waiting[i] == '/')
+ return 2;
+
+ /* child device event found */
+ if (running[i] == '/' && waiting[i] == '\0')
+ return 3;
+
+ /* no matching event */
+ if (running[i] != waiting[i])
+ break;
+ }
+
+ return 0;
+}
+
+/* returns still running task for the same device, its parent or its physical device */
+static int running_with_devpath(struct uevent_msg *msg, int limit)
+{
+ struct uevent_msg *loop_msg;
+ int childs_count = 0;
+
+ if (msg->devpath == NULL)
+ return 0;
+
+ /* skip any events with a timeout set */
+ if (msg->timeout != 0)
+ return 0;
+
+ list_for_each_entry(loop_msg, &running_list, node) {
+ if (limit && childs_count++ > limit) {
+ dbg("%llu, maximum number (%i) of child reached", msg->seqnum, childs_count);
+ return 1;
+ }
+ if (loop_msg->devpath == NULL)
+ continue;
+
+ /* return running parent/child device event */
+ if (compare_devpath(loop_msg->devpath, msg->devpath) != 0) {
+ dbg("%llu, child device event still running %llu (%s)",
+ msg->seqnum, loop_msg->seqnum, loop_msg->devpath);
+ return 2;
+ }
+
+ /* return running physical device event */
+ if (msg->physdevpath && msg->action && strcmp(msg->action, "add") == 0)
+ if (compare_devpath(loop_msg->devpath, msg->physdevpath) != 0) {
+ dbg("%llu, physical device event still running %llu (%s)",
+ msg->seqnum, loop_msg->seqnum, loop_msg->devpath);
+ return 3;
+ }
+ }
+
+ return 0;
+}
+
+/* exec queue management routine executes the events and serializes events in the same sequence */
+static void exec_queue_manager(void)
+{
+ struct uevent_msg *loop_msg;
+ struct uevent_msg *tmp_msg;
+ int running;
+
+ if (list_empty(&exec_list))
+ return;
+
+ running = running_processes();
+ dbg("%d processes runnning on system", running);
+ if (running < 0)
+ running = max_childs_running;
+
+ list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, node) {
+ /* check running processes in our session and possibly throttle */
+ if (running >= max_childs_running) {
+ running = running_processes_in_session(sid, max_childs_running+10);
+ dbg("at least %d processes running in session", running);
+ if (running >= max_childs_running) {
+ dbg("delay seq %llu, cause too many processes already running",
+ loop_msg->seqnum);
+ return;