[launchd-changes] [23857] trunk

source_changes at macosforge.org source_changes at macosforge.org
Tue Mar 10 20:13:06 PDT 2009


Revision: 23857
          http://trac.macosforge.org/projects/launchd/changeset/23857
Author:   dsorresso at apple.com
Date:     2009-03-10 20:13:03 -0700 (Tue, 10 Mar 2009)
Log Message:
-----------
<rdar://problem/6564965> File-based enabling/disabling mechanism for launchd jobs
<rdar://problem/6623625> 10A278 - Hang at shutdown with 7 processes still running

Modified Paths:
--------------
    trunk/launchd/src/launchctl.c
    trunk/launchd/src/launchd_core_logic.c
    trunk/launchd/src/launchd_runtime.c
    trunk/launchd.xcodeproj/project.pbxproj

Modified: trunk/launchd/src/launchctl.c
===================================================================
--- trunk/launchd/src/launchctl.c	2009-03-10 01:22:02 UTC (rev 23856)
+++ trunk/launchd/src/launchctl.c	2009-03-11 03:13:03 UTC (rev 23857)
@@ -349,7 +349,7 @@
 			exit(EXIT_FAILURE);
 		}
 	}
-
+	
 	if (NULL == readline) {
 		fprintf(stderr, "missing library: readline\n");
 		exit(EXIT_FAILURE);
@@ -1448,9 +1448,6 @@
 	}
 	
 	propertyList = CFPropertyListCreateFromXMLData(kCFAllocatorDefault, resourceData, kCFPropertyListMutableContainersAndLeaves, &errorString);
-	if (!propertyList) {
-		fprintf(stderr, "%s: propertyList is NULL\n", getprogname());
-	}
 	if( fileURL ) {
 		CFRelease(fileURL);
 	}
@@ -2238,7 +2235,7 @@
 		/* If we can't create or lock the overrides database, we'll fall back to writing to the
 		 * plist file directly.
 		 */
-		assumes((dbfd = open(db, O_RDONLY | O_EXLOCK | O_CREAT, S_IRUSR | S_IWUSR)) != -1);
+		assumes((dbfd = open(g_job_overrides_db_path, O_RDONLY | O_EXLOCK | O_CREAT, S_IRUSR | S_IWUSR)) != -1);
 		if( dbfd != -1 ) {
 			g_job_overrides_db = (CFMutableDictionaryRef)CreateMyPropertyListFromFile(g_job_overrides_db_path);
 			if( !g_job_overrides_db ) {

Modified: trunk/launchd/src/launchd_core_logic.c
===================================================================
--- trunk/launchd/src/launchd_core_logic.c	2009-03-10 01:22:02 UTC (rev 23856)
+++ trunk/launchd/src/launchd_core_logic.c	2009-03-11 03:13:03 UTC (rev 23857)
@@ -77,6 +77,7 @@
 #include <glob.h>
 #include <spawn.h>
 #include <libproc.h>
+#include <malloc/malloc.h>
 #if HAVE_SANDBOX
 #define __APPLE_API_PRIVATE
 #include <sandbox.h>
@@ -329,6 +330,20 @@
 
 #define MACHSERVICE_HASH_SIZE	37
 
+enum {
+	JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST,
+	JOBMGR_PHASE_NORMAL,
+	JOBMGR_PHASE_HOPEFULLY_EXITS_LAST,
+	JOBMGR_PHASE_LAST,
+};
+
+static char *s_phases[JOBMGR_PHASE_LAST + 1] = {
+	"HopefullyExitsFirst",
+	"Normal",
+	"HopefullyExitsLast",
+	"Finalized",
+};
+
 struct jobmgr_s {
 	kq_callback kqjobmgr_callback;
 	SLIST_ENTRY(jobmgr_s) sle;
@@ -344,6 +359,7 @@
 	mach_port_t init_audit_session;
 	jobmgr_t parentmgr;
 	int reboot_flags;
+	int shutdown_phase;
 	unsigned int global_on_demand_cnt;
 	unsigned int hopefully_first_cnt;
 	unsigned int normal_active_cnt;
@@ -353,6 +369,7 @@
 					killed_hopefully_first_jobs		:1,
 					killed_normal_jobs				:1,
 					killed_hopefully_last_jobs		:1,
+					killed_stray_jobs				:1,
 					created_via_subset				:1;
 	char sample_log_file[PATH_MAX];
 	union {
@@ -367,9 +384,6 @@
 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, mach_port_t session_port);
 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
 static jobmgr_t jobmgr_parent(jobmgr_t jm);
-static jobmgr_t jobmgr_do_hopefully_first_shutdown_phase(jobmgr_t jm);
-static jobmgr_t jobmgr_do_normal_shutdown_phase(jobmgr_t jm);
-static jobmgr_t jobmgr_do_hopefully_last_shutdown_phase(jobmgr_t jm);
 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
@@ -935,7 +949,7 @@
 static void
 jobmgr_still_alive_with_check(jobmgr_t jm)
 {
-	jobmgr_log(jm, LOG_NOTICE | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children", total_children, total_anon_children);
+	jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children. In %s phase of shutdown.", total_children, total_anon_children, s_phases[jm->shutdown_phase]);
 	jobmgr_log_active_jobs(jm);
 }
 
@@ -1162,7 +1176,6 @@
 	}
 	
 	kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
-	kevent_mod((uintptr_t)j, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
 	
 	LIST_REMOVE(j, sle);
 	LIST_REMOVE(j, label_hash_sle);
@@ -2568,7 +2581,7 @@
 	if (j->exit_timeout) {
 		kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
 	}
-
+	
 	LIST_REMOVE(j, pid_hash_sle);
 
 	if (j->wait_reply_port) {
@@ -2700,10 +2713,6 @@
 	j->lastlookup_gennum = 0;
 	j->p = 0;
 
-	if( !j->anonymous ) {
-		jobmgr_do_garbage_collection(j->mgr);
-	}
-
 	/*
 	 * We need to someday evaluate other jobs and find those who wish to track the
 	 * active/inactive state of this job. The current job_dispatch() logic makes
@@ -2915,17 +2924,23 @@
 void
 job_dispatch_curious_jobs(job_t j)
 {	
-	job_t ji = NULL;
-	SLIST_FOREACH( ji, &s_curious_jobs, curious_jobs_sle ) {
+	job_t ji = NULL, jt = NULL;
+	SLIST_FOREACH_SAFE( ji, &s_curious_jobs, curious_jobs_sle, jt ) {
 		struct semaphoreitem *si = NULL;
 		SLIST_FOREACH( si, &ji->semaphores, sle ) {			
-			if( si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED ) {
-				if( strncmp(si->what, j->label, strlen(j->label)) == 0 ) {
-					job_log(ji, LOG_NOTICE | LOG_CONSOLE, "Dispatching out of interest in \"%s\".", j->label);
-					job_assumes(ji, job_dispatch(ji, false) != NULL);
-					break;
-				}
+			if( !(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) ) {
+				continue;
 			}
+			
+			if( strncmp(si->what, j->label, strlen(j->label)) == 0 ) {
+				job_log(ji, LOG_NOTICE | LOG_CONSOLE, "Dispatching out of interest in \"%s\".", j->label);
+				
+				job_dispatch(ji, false);
+				/* ji could be removed here, so don't do anything with it or its semaphores
+				 * after this point.
+				 */
+				break;
+			}
 		}
 	}
 }
@@ -2973,7 +2988,7 @@
 			}
 		}
 	} else {
-		job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s), kickstart = %s.", job_active(j), kickstart ? "true" : "false");
+		job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
 	}
 
 	return j;
@@ -3126,6 +3141,13 @@
 		
 		/* Fake a kevent to keep our logic consistent. */
 		job_callback_proc(j, &kev);
+		
+		/* Normally, after getting a EVFILT_PROC event, we do garbage collection
+		 * on the root job manager. To make our fakery complete, we will do garbage
+		 * collection at the beginning of the next run loop cycle (after we're done
+		 * draining the current queue of kevents).
+		 */
+		job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
 	}
 	
 	if( jm ) {
@@ -3139,6 +3161,9 @@
 	bool program_changed = false;
 	int fflags = kev->fflags;
 	
+	job_log(j, LOG_DEBUG, "EVFILT_PROC event for job:");
+	log_kevent_struct(LOG_DEBUG, kev, 0);
+	
 	if( fflags & NOTE_EXIT ) {
 		if( s_update_pid == (pid_t)kev->ident ) {
 			int status = 0;
@@ -3222,11 +3247,11 @@
 	if (fflags & NOTE_EXIT) {
 		job_reap(j);
 
-		if (j->anonymous) {
+		if( !j->anonymous ) {
+			j = job_dispatch(j, false);
+		} else {
 			job_remove(j);
 			j = NULL;
-		} else {
-			j = job_dispatch(j, false);
 		}
 	}
 
@@ -3354,7 +3379,7 @@
 		jobmgr_reap_bulk(jmi, kev);
 	}
 
-	if ((j = jobmgr_find_by_pid(jm, (pid_t) kev->ident, false))) {
+	if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
 		kev->udata = j;
 		job_callback(j, kev);
 	}
@@ -3414,6 +3439,8 @@
 		} else if( kev->ident == (uintptr_t)jm ) {
 			jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
 			jobmgr_still_alive_with_check(jm);
+		} else if( kev->ident == (uintptr_t)&jm->reboot_flags ) {
+			jobmgr_do_garbage_collection(jm);
 		}
 		break;
 	case EVFILT_VNODE:
@@ -3425,6 +3452,20 @@
 				jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
 				s_no_hang_fd = _fd(_no_hang_fd);
 			}
+		} else if( kev->ident == (uintptr_t)fileno(g_console) ) {
+			int cfd = -1;
+			if( low_level_debug ) {
+				if( jobmgr_assumes(jm, (stdout = freopen(_PATH_CONSOLE, "w", stdout)) != NULL) ) {
+					cfd = fileno(stdout);
+				}
+				g_console = stdout;
+			} else {
+				if( jobmgr_assumes(jm, (g_console = freopen(_PATH_CONSOLE, "w", g_console)) != NULL) ) {
+					cfd = fileno(g_console);
+				}
+			}
+			jobmgr_assumes(jm, kevent_mod((uintptr_t)cfd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
+			_fd(cfd);
 		}
 		break;
 	default:
@@ -5422,212 +5463,104 @@
 }
 
 jobmgr_t
-jobmgr_do_hopefully_first_shutdown_phase(jobmgr_t jm)
+jobmgr_do_garbage_collection(jobmgr_t jm)
 {
 	if( !jm->shutting_down ) {
 		return jm;
 	}
-
-	if( jm->killed_hopefully_first_jobs ) {
-		return NULL;
-	}
-
-	jobmgr_log(jm, LOG_DEBUG, "Doing first phase of garbage collection.");
 	
-	uint32_t unkilled_cnt = 0;
-	job_t ji = NULL, jn = NULL;
-	LIST_FOREACH_SAFE( ji, &jm->jobs, sle, jn ) {
-		if( !ji->hopefully_exits_first ) {
-			continue;
-		}
-		
-		bool active = ji->p;
-		if( active && !ji->stopped ) {
-			/* If the job is active and we haven't told it to stop yet, stop it. */
-			job_stop(ji);
-			
-			/* We may have sent SIGKILL to the job in job_stop(). Clean jobs
-			 * get 1 second to exit.
-			 */
-			if( !ji->clean_kill ) {
-				unkilled_cnt += !ji->sent_sigkill ? 1 : 0;
-			} else {
-				unkilled_cnt += ji->clean_exit_timer_expired ? 1 : 0;
-			}
-		} else if( ji->stopped ) {
-			/* If the job is active and has been told to stop, disregard it
-			 * after we've sent SIGKILL.
-			 */
-			unkilled_cnt += !ji->sent_sigkill ? 1 : 0;
-		} else if( !active ) {
-			/* If the job was not active when shutdown began, remove it. */
-			job_remove(ji);
-		}
+	jobmgr_t jmi = NULL, jmn = NULL;
+	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
+		jobmgr_do_garbage_collection(jmi);
 	}
 	
-	/* If we've killed everyone, move on. */
-	if( unkilled_cnt == 0 ) {
-		jm->killed_hopefully_first_jobs = true;
-		jm = NULL;
+	if( SLIST_EMPTY(&jm->submgrs) ) {
+		jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
+	} else {
+		jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
 	}
 	
-	return jm;
-}
-
-jobmgr_t
-jobmgr_do_normal_shutdown_phase(jobmgr_t jm)
-{
-	if( !jm->shutting_down ) {
-		return jm;
-	}
-	
-	if( jm->killed_normal_jobs ) {
-		return NULL;
-	}
-	
-	jobmgr_log(jm, LOG_DEBUG, "Doing second phase of garbage collection.");
-	
-	uint32_t unkilled_cnt = 0;
-	job_t ji = NULL, jn = NULL;
-	LIST_FOREACH_SAFE( ji, &jm->jobs, sle, jn ) {
-		if( ji->migratory ) {
-			/* If we're shutting down, release the hold migratory jobs
-			 * have on us.
-			 */
-			job_remove(ji);			
-		}
-		
-		if( ji->anonymous || ji->hopefully_exits_first || ji->hopefully_exits_last ) {
-			continue;
-		}
-		
-		bool active = ji->p;
-		if( active && !ji->stopped ) {
-			/* If the job is active and we haven't told it to stop yet, stop it. */
-			job_stop(ji);
+	int phase = -1;
+	for( phase = jm->shutdown_phase; phase < JOBMGR_PHASE_LAST; phase++ ) {
+		if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_LAST ) {
+			if( jm == root_jobmgr ) {
+				simulate_pid1_crash();
+			}
 			
-			/* We may have sent SIGKILL to the job in job_stop(). Clean jobs
-			 * get 1 second to exit.
-			 */
-			if( !ji->clean_kill ) {
-				unkilled_cnt += !ji->sent_sigkill ? 1 : 0;
-			} else {
-				unkilled_cnt += ji->clean_exit_timer_expired ? 1 : 0;
+			if( jm == root_jobmgr && pid1_magic && !jm->killed_stray_jobs ) {
+				jobmgr_log_stray_children(jm, true);
+				jm->killed_stray_jobs = true;
 			}
-		} else if( ji->stopped ) {
-			/* If the job is active and has been told to stop, disregard it
-			 * after we've sent SIGKILL.
-			 */
-			unkilled_cnt += !ji->sent_sigkill ? 1 : 0;
-		} else if( !active ) {
-			/* If the job was not active when shutdown began, remove it. */
-			job_remove(ji);
 		}
-	}
-	
-	/* If we've killed everyone, move on. */
-	if( unkilled_cnt == 0 ) {
-		jm->killed_normal_jobs = true;
-		jm = NULL;
-	}
-	
-	return jm;
-}
 
-jobmgr_t
-jobmgr_do_hopefully_last_shutdown_phase(jobmgr_t jm)
-{
-	if( !jm->shutting_down ) {
-		return jm;
-	}
-	
-	if( jm == root_jobmgr ) {
-		simulate_pid1_crash();
-	}
-	
-	static bool killed_stray_jobs = false;
-	if( !killed_stray_jobs && pid1_magic && jm == root_jobmgr ) {
-		jobmgr_log_stray_children(jm, true);
-		killed_stray_jobs = true;
-	}
-	
-	if( jm->killed_hopefully_last_jobs || total_children == 0 ) {
-		return NULL;
-	}
-	
-	uint32_t unkilled_cnt = 0;
-	job_t ji = NULL, jn = NULL;
-	jobmgr_log(jm, LOG_DEBUG, "Doing third phase of garbage collection.");
-	LIST_FOREACH_SAFE( ji, &jm->jobs, sle, jn ) {
-		if( !ji->hopefully_exits_last ) {
-			continue;
-		}
-		
-		bool active = ji->p;
-		if( active && !ji->stopped ) {
-			/* If the job is active and we haven't told it to stop yet, stop it. */
-			job_stop(ji);
+		uint32_t unkilled_cnt = 0;
+		job_t ji = NULL, jn = NULL;
+		LIST_FOREACH_SAFE( ji, &jm->jobs, sle, jn ) {
+			if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST && !ji->hopefully_exits_first ) {
+				continue;
+			} else if( phase == JOBMGR_PHASE_NORMAL ) {
+				if( ji->migratory ) {
+					/* If we're shutting down, release the hold migratory jobs
+					 * have on us.
+					 */
+					job_remove(ji);			
+				}
+				
+				if( ji->hopefully_exits_first || ji->hopefully_exits_last ) {
+					continue;
+				}
+			} else if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_LAST && !ji->hopefully_exits_last ) {
+				continue;
+			}
 			
-			/* We may have sent SIGKILL to the job in job_stop(). Clean jobs
-			 * get 1 second to exit.
-			 */
-			if( !ji->clean_kill ) {
-				unkilled_cnt += !ji->sent_sigkill ? 1 : 0;
+			if( ji->anonymous ) {
+				continue;
+			}
+			
+			const char *active = job_active(ji);
+			if( !active ) {
+				job_log(ji, LOG_DEBUG, "Job is inactive. Removing.");
+				job_remove(ji);
 			} else {
-				unkilled_cnt += ji->clean_exit_timer_expired ? 1 : 0;
+				if( ji->p ) {
+					if( !ji->stopped ) {
+						job_log(ji, LOG_DEBUG, "Stopping job.");
+						job_stop(ji);
+						if( !ji->clean_kill ) {
+							unkilled_cnt++;
+						}
+					} else {
+						if( ji->clean_kill ) {
+							job_log(ji, LOG_DEBUG, "Job was clean and sent SIGKILL.");
+						} else {
+							job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
+						}
+						unkilled_cnt += !ji->sent_sigkill;
+					}
+				} else {
+					job_log(ji, LOG_DEBUG, "Job is active: %s", active);
+				}
 			}
-		} else if( ji->stopped ) {
-			/* If the job is active and has been told to stop, disregard it
-			 * after we've sent SIGKILL.
-			 */
-			unkilled_cnt += !ji->sent_sigkill ? 1 : 0;
-		} else if( !active ) {
-			/* If the job was not active when shutdown began, remove it. */
-			job_remove(ji);
+		} /* LIST_FOREACH_SAFE */
+		
+		if( unkilled_cnt == 0 ) {
+			jobmgr_log(jm, LOG_DEBUG, "Done with the %s bucket, advancing.", s_phases[jm->shutdown_phase]);
+			jm->shutdown_phase++;
+		} else {
+			jobmgr_log(jm, LOG_DEBUG, "Still %u unkilled job%s in %s bucket.", unkilled_cnt, unkilled_cnt > 1 ? "s" : "", s_phases[jm->shutdown_phase]);
+			phase = JOBMGR_PHASE_LAST;
 		}
-	}
+	} /* for */
 	
-	/* If we've killed everyone, move on. */
-	if( unkilled_cnt == 0 ) {
-		jm->killed_hopefully_last_jobs = true;
-		jm = NULL;
-	}
-	
-	return jm;
-}
-
-jobmgr_t
-jobmgr_do_garbage_collection(jobmgr_t jm)
-{
-	if( !jm->shutting_down ) {
-		return jm;
-	}
-	
-	jobmgr_t jmi = NULL, jmn = NULL;
-	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
-		jobmgr_do_garbage_collection(jmi);
-	}
-	
-	if( SLIST_EMPTY(&jm->submgrs) ) {
-		jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
-	} else {
-		jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
-	}
-	
-	jobmgr_t _jm = jobmgr_do_hopefully_first_shutdown_phase(jm);
-	if( !_jm ) {
-		_jm = jobmgr_do_normal_shutdown_phase(jm) ? : jobmgr_do_hopefully_last_shutdown_phase(jm);
-	}
-	
-	if( !_jm && SLIST_EMPTY(&jm->submgrs) ) {
+	jobmgr_t r = jm;
+	if( jm->shutdown_phase > JOBMGR_PHASE_HOPEFULLY_EXITS_LAST && SLIST_EMPTY(&jm->submgrs) ) {
 		jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Removing.");
 		jobmgr_log_stray_children(jm, false);
 		jobmgr_remove(jm);
-	} else {
-		_jm = jm;
+		r = NULL;
 	}
 	
-	return _jm;
+	return r;
 }
 
 void
@@ -5950,6 +5883,8 @@
 			/* Start the update job. */
 			jobmgr_assumes(jm, kevent_mod((uintptr_t)do_sync, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 30, bootstrapper) != -1);
 		#endif
+			
+			jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
 		}
 	}
 
@@ -7048,7 +6983,7 @@
 		j->abandon_pg = (bool)inval;
 		break;
 	case VPROC_GSK_GLOBAL_ON_DEMAND:
-		job_log(j, LOG_NOTICE, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
+		job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
 		kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
 		break;
 	case VPROC_GSK_BASIC_KEEPALIVE:
@@ -7250,7 +7185,7 @@
 	}
 
 #if !TARGET_OS_EMBEDDED
-	if( unlikely(ldc->euid) ) {
+	if (unlikely(ldc->euid)) {
 #else
 	if( unlikely(ldc->euid) && !j->embedded_shutdown_auth ) {
 #endif
@@ -7365,7 +7300,7 @@
 				if( !job_assumes(ji, sb.st_gid == 0) ) {
 					job_assumes(ji, chown(pu_db, which_user, 0) != -1);
 				}
-				if( !job_assumes(ji, sb.st_mode == S_IRWXU) ) {
+				if( !job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR)) ) {
 					job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
 				}
 			}

Modified: trunk/launchd/src/launchd_runtime.c
===================================================================
--- trunk/launchd/src/launchd_runtime.c	2009-03-10 01:22:02 UTC (rev 23856)
+++ trunk/launchd/src/launchd_runtime.c	2009-03-11 03:13:03 UTC (rev 23857)
@@ -376,7 +376,7 @@
 		else FLAGIF(EV_ONESHOT)
 		else FLAGIF(EV_ERROR)
 		else {
-			flags_off += sprintf(flags_off, "0x%x", flags);
+			flags_off += sprintf(flags_off, "0x%hx", flags);
 			flags = 0;
 		}
 	}
@@ -503,7 +503,7 @@
 		}
 		break;
 	default:
-		snprintf(filter_buf, sizeof(filter_buf), "%d", kev->filter);
+		snprintf(filter_buf, sizeof(filter_buf), "%hd", kev->filter);
 		filter_str = filter_buf;
 		break;
 	}
@@ -597,11 +597,13 @@
 			kevi = &kev[i];
 
 			if (kevi->filter) {
-				Dl_info dli;
-
+				runtime_syslog(LOG_DEBUG, "Dispatching kevent...");
+				log_kevent_struct(LOG_DEBUG, kev, i);
+			#if 0
 				/* Check if kevi->udata was either malloc(3)ed or is a valid function pointer. 
 				 * If neither, it's probably an invalid pointer and we should log it. 
 				 */
+				Dl_info dli;
 				if (launchd_assumes(malloc_size(kevi->udata) || dladdr(kevi->udata, &dli))) {
 					runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_START, kevi->ident, kevi->filter, kevi->fflags);
 					(*((kq_callback *)kevi->udata))(kevi->udata, kevi);
@@ -610,6 +612,11 @@
 					runtime_syslog(LOG_ERR, "The following kevent had invalid context data.");
 					log_kevent_struct(LOG_EMERG, kevi, i);
 				}
+			#else
+				runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_START, kevi->ident, kevi->filter, kevi->fflags);
+				(*((kq_callback *)kevi->udata))(kevi->udata, kevi);
+				runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_END);
+			#endif
 			}
 		}
 	}
@@ -854,8 +861,9 @@
 		int i = 0;
 		for( i = bulk_kev_i + 1; i < bulk_kev_cnt; i++ ) {
 			if( bulk_kev[i].filter == filter && bulk_kev[i].ident == ident ) {
-				runtime_syslog(LOG_DEBUG, "Skipping PROC event for PID %lu", ident);
-				bulk_kev[i].filter = 0;
+				runtime_syslog(LOG_DEBUG, "Pruning the following kevent:");
+				log_kevent_struct(LOG_DEBUG, &bulk_kev[i], i);
+				bulk_kev[i].filter = (short)0;
 			}
 		}
 	}

Modified: trunk/launchd.xcodeproj/project.pbxproj
===================================================================
--- trunk/launchd.xcodeproj/project.pbxproj	2009-03-10 01:22:02 UTC (rev 23856)
+++ trunk/launchd.xcodeproj/project.pbxproj	2009-03-11 03:13:03 UTC (rev 23857)
@@ -529,8 +529,8 @@
 				FC59A0B50E8C8A1F00D41150 /* launchd_runtime.c */,
 				FC59A0B60E8C8A1F00D41150 /* launchd_core_logic.h */,
 				FC59A0B70E8C8A1F00D41150 /* launchd_core_logic.c */,
+				72FDB15E0EA7D7B200B2AC84 /* launchd_ktrace.h */,
 				72FDB15D0EA7D7B200B2AC84 /* launchd_ktrace.c */,
-				72FDB15E0EA7D7B200B2AC84 /* launchd_ktrace.h */,
 			);
 			name = Source;
 			sourceTree = "<group>";
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/launchd-changes/attachments/20090310/2c21d69e/attachment-0001.html>


More information about the launchd-changes mailing list