1: /*
2: * ntp_loopfilter.c - implements the NTP loop filter algorithm
3: *
4: * ATTENTION: Get approval from Dave Mills on all changes to this file!
5: *
6: */
7: #ifdef HAVE_CONFIG_H
8: # include <config.h>
9: #endif
10:
11: #include "ntpd.h"
12: #include "ntp_io.h"
13: #include "ntp_unixtime.h"
14: #include "ntp_stdlib.h"
15:
16: #include <stdio.h>
17: #include <ctype.h>
18:
19: #include <signal.h>
20: #include <setjmp.h>
21:
22: #if defined(VMS) && defined(VMS_LOCALUNIT) /*wjm*/
23: #include "ntp_refclock.h"
24: #endif /* VMS */
25:
26: #ifdef KERNEL_PLL
27: #include "ntp_syscall.h"
28: #endif /* KERNEL_PLL */
29:
30: /*
31: * This is an implementation of the clock discipline algorithm described
32: * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
33: * hybrid phase/frequency-lock loop. A number of sanity checks are
34: * included to protect against timewarps, timespikes and general mayhem.
35: * All units are in s and s/s, unless noted otherwise.
36: */
37: #define CLOCK_MAX .128 /* default step threshold (s) */
38: #define CLOCK_MINSTEP 900. /* default stepout threshold (s) */
39: #define CLOCK_PANIC 1000. /* default panic threshold (s) */
40: #define CLOCK_PHI 15e-6 /* max frequency error (s/s) */
41: #define CLOCK_PLL 16. /* PLL loop gain (log2) */
42: #define CLOCK_AVG 8. /* parameter averaging constant */
43: #define CLOCK_FLL .25 /* FLL loop gain */
44: #define CLOCK_ALLAN 11 /* Allan intercept (log2 s) */
45: #define CLOCK_DAY 86400. /* one day in seconds (s) */
46: #define CLOCK_JUNE (CLOCK_DAY * 30) /* June in seconds (s) */
47: #define CLOCK_LIMIT 30 /* poll-adjust threshold */
48: #define CLOCK_PGATE 4. /* poll-adjust gate */
49: #define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */
50: #define FREQTOD(x) ((x) / 65536e6) /* NTP to double */
51: #define DTOFREQ(x) ((int32)((x) * 65536e6)) /* double to NTP */
52:
53: /*
54: * Clock discipline state machine. This is used to control the
55: * synchronization behavior during initialization and following a
56: * timewarp.
57: *
58: * State < step > step Comments
59: * ========================================================
60: * NSET FREQ step, FREQ freq not set
61: *
62: * FSET SYNC step, SYNC freq set
63: *
64: * FREQ if (mu < 900) if (mu < 900) set freq direct
65: * ignore ignore
66: * else else
67: * freq, SYNC freq, step, SYNC
68: *
69: * SYNC SYNC SPIK, ignore adjust phase/freq
70: *
71: * SPIK SYNC if (mu < 900) adjust phase/freq
72: * ignore
73: * step, SYNC
74: */
75: /*
76: * Kernel PLL/PPS state machine. This is used with the kernel PLL
77: * modifications described in the documentation.
78: *
79: * If kernel support for the ntp_adjtime() system call is available, the
80: * ntp_control flag is set. The ntp_enable and kern_enable flags can be
81: * set at configuration time or run time using ntpdc. If ntp_enable is
82: * false, the discipline loop is unlocked and no corrections of any kind
83: * are made. If both ntp_control and kern_enable are set, the kernel
84: * support is used as described above; if false, the kernel is bypassed
85: * entirely and the daemon discipline used instead.
86: *
87: * There have been three versions of the kernel discipline code. The
88: * first (microkernel) now in Solaris discipilnes the microseconds. The
89: * second and third (nanokernel) disciplines the clock in nanoseconds.
90: * These versions are identifed if the symbol STA_PLL is present in the
91: * header file /usr/include/sys/timex.h. The third and current version
92: * includes TAI offset and is identified by the symbol NTP_API with
93: * value 4.
94: *
95: * Each PPS time/frequency discipline can be enabled by the atom driver
96: * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
97: * set in the kernel status word; otherwise, these bits are cleared.
98: * These bits are also cleard if the kernel reports an error.
99: *
100: * If an external clock is present, the clock driver sets STA_CLK in the
101: * status word. When the local clock driver sees this bit, it updates
102: * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
103: * set to zero, in which case the system clock is not adjusted. This is
104: * also a signal for the external clock driver to discipline the system
105: * clock. Unless specified otherwise, all times are in seconds.
106: */
107: /*
108: * Program variables that can be tinkered.
109: */
110: double clock_max = CLOCK_MAX; /* step threshold */
111: double clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
112: double clock_panic = CLOCK_PANIC; /* panic threshold */
113: double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */
114: u_char allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
115:
116: /*
117: * Program variables
118: */
119: static double clock_offset; /* offset */
120: double clock_jitter; /* offset jitter */
121: double drift_comp; /* frequency (s/s) */
122: double clock_stability; /* frequency stability (wander) (s/s) */
123: double clock_codec; /* audio codec frequency (samples/s) */
124: static u_long clock_epoch; /* last update */
125: u_int sys_tai; /* TAI offset from UTC */
126: static void rstclock (int, double); /* transition function */
127: static double direct_freq(double); /* direct set frequency */
128: static void set_freq(double); /* set frequency */
129:
130: #ifdef KERNEL_PLL
131: static struct timex ntv; /* ntp_adjtime() parameters */
132: int pll_status; /* last kernel status bits */
133: #if defined(STA_NANO) && NTP_API == 4
134: static u_int loop_tai; /* last TAI offset */
135: #endif /* STA_NANO */
136: #endif /* KERNEL_PLL */
137:
138: /*
139: * Clock state machine control flags
140: */
141: int ntp_enable = 1; /* clock discipline enabled */
142: int pll_control; /* kernel support available */
143: int kern_enable = 1; /* kernel support enabled */
144: int pps_enable; /* kernel PPS discipline enabled */
145: int ext_enable; /* external clock enabled */
146: int pps_stratum; /* pps stratum */
147: int allow_panic = FALSE; /* allow panic correction */
148: int mode_ntpdate = FALSE; /* exit on first clock set */
149:
150: /*
151: * Clock state machine variables
152: */
153: int state; /* clock discipline state */
154: u_char sys_poll; /* time constant/poll (log2 s) */
155: int tc_counter; /* jiggle counter */
156: double last_offset; /* last offset (s) */
157: static u_long last_step; /* last clock step */
158:
159: /*
160: * Huff-n'-puff filter variables
161: */
162: static double *sys_huffpuff; /* huff-n'-puff filter */
163: static int sys_hufflen; /* huff-n'-puff filter stages */
164: static int sys_huffptr; /* huff-n'-puff filter pointer */
165: static double sys_mindly; /* huff-n'-puff filter min delay */
166:
167: #if defined(KERNEL_PLL)
168: /* Emacs cc-mode goes nuts if we split the next line... */
169: #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
170: MOD_STATUS | MOD_TIMECONST)
171: #ifdef SIGSYS
172: static void pll_trap (int); /* configuration trap */
173: static struct sigaction sigsys; /* current sigaction status */
174: static struct sigaction newsigsys; /* new sigaction status */
175: static sigjmp_buf env; /* environment var. for pll_trap() */
176: #endif /* SIGSYS */
177: #endif /* KERNEL_PLL */
178:
179: /*
180: * init_loopfilter - initialize loop filter data
181: */
182: void
183: init_loopfilter(void)
184: {
185: /*
186: * Initialize state variables.
187: */
188: sys_poll = ntp_minpoll;
189: clock_jitter = LOGTOD(sys_precision);
190: }
191:
192: /*
193: * local_clock - the NTP logical clock loop filter.
194: *
195: * Return codes:
196: * -1 update ignored: exceeds panic threshold
197: * 0 update ignored: popcorn or exceeds step threshold
198: * 1 clock was slewed
199: * 2 clock was stepped
200: *
201: * LOCKCLOCK: The only thing this routine does is set the
202: * sys_rootdisp variable equal to the peer dispersion.
203: */
204: int
205: local_clock(
206: struct peer *peer, /* synch source peer structure */
207: double fp_offset /* clock offset (s) */
208: )
209: {
210: int rval; /* return code */
211: int osys_poll; /* old system poll */
212: double mu; /* interval since last update */
213: double clock_frequency; /* clock frequency */
214: double dtemp, etemp; /* double temps */
215: char tbuf[80]; /* report buffer */
216:
217: /*
218: * If the loop is opened or the NIST LOCKCLOCK is in use,
219: * monitor and record the offsets anyway in order to determine
220: * the open-loop response and then go home.
221: */
222: #ifdef LOCKCLOCK
223: return (0);
224:
225: #else /* LOCKCLOCK */
226: if (!ntp_enable) {
227: record_loop_stats(fp_offset, drift_comp, clock_jitter,
228: clock_stability, sys_poll);
229: return (0);
230: }
231:
232: /*
233: * If the clock is way off, panic is declared. The clock_panic
234: * defaults to 1000 s; if set to zero, the panic will never
235: * occur. The allow_panic defaults to FALSE, so the first panic
236: * will exit. It can be set TRUE by a command line option, in
237: * which case the clock will be set anyway and time marches on.
238: * But, allow_panic will be set FALSE when the update is less
239: * than the step threshold; so, subsequent panics will exit.
240: */
241: if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
242: !allow_panic) {
243: snprintf(tbuf, sizeof(tbuf),
244: "%+.0f s; set clock manually within %.0f s.",
245: fp_offset, clock_panic);
246: report_event(EVNT_SYSFAULT, NULL, tbuf);
247: return (-1);
248: }
249:
250: /*
251: * This section simulates ntpdate. If the offset exceeds the
252: * step threshold (128 ms), step the clock to that time and
253: * exit. Othewise, slew the clock to that time and exit. Note
254: * that the slew will persist and eventually complete beyond the
255: * life of this program. Note that while ntpdate is active, the
256: * terminal does not detach, so the termination message prints
257: * directly to the terminal.
258: */
259: if (mode_ntpdate) {
260: if (fabs(fp_offset) > clock_max && clock_max > 0) {
261: step_systime(fp_offset);
262: msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
263: fp_offset);
264: printf("ntpd: time set %+.6fs\n", fp_offset);
265: } else {
266: adj_systime(fp_offset);
267: msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
268: fp_offset);
269: printf("ntpd: time slew %+.6fs\n", fp_offset);
270: }
271: record_loop_stats(fp_offset, drift_comp, clock_jitter,
272: clock_stability, sys_poll);
273: exit (0);
274: }
275:
276: /*
277: * The huff-n'-puff filter finds the lowest delay in the recent
278: * interval. This is used to correct the offset by one-half the
279: * difference between the sample delay and minimum delay. This
280: * is most effective if the delays are highly assymetric and
281: * clockhopping is avoided and the clock frequency wander is
282: * relatively small.
283: */
284: if (sys_huffpuff != NULL) {
285: if (peer->delay < sys_huffpuff[sys_huffptr])
286: sys_huffpuff[sys_huffptr] = peer->delay;
287: if (peer->delay < sys_mindly)
288: sys_mindly = peer->delay;
289: if (fp_offset > 0)
290: dtemp = -(peer->delay - sys_mindly) / 2;
291: else
292: dtemp = (peer->delay - sys_mindly) / 2;
293: fp_offset += dtemp;
294: #ifdef DEBUG
295: if (debug)
296: printf(
297: "local_clock: size %d mindly %.6f huffpuff %.6f\n",
298: sys_hufflen, sys_mindly, dtemp);
299: #endif
300: }
301:
302: /*
303: * Clock state machine transition function which defines how the
304: * system reacts to large phase and frequency excursion. There
305: * are two main regimes: when the offset exceeds the step
306: * threshold (128 ms) and when it does not. Under certain
307: * conditions updates are suspended until the stepout theshold
308: * (900 s) is exceeded. See the documentation on how these
309: * thresholds interact with commands and command line options.
310: *
311: * Note the kernel is disabled if step is disabled or greater
312: * than 0.5 s or in ntpdate mode.
313: */
314: osys_poll = sys_poll;
315: if (sys_poll < peer->minpoll)
316: sys_poll = peer->minpoll;
317: if (sys_poll > peer->maxpoll)
318: sys_poll = peer->maxpoll;
319: mu = current_time - clock_epoch;
320: clock_frequency = drift_comp;
321: rval = 1;
322: if (fabs(fp_offset) > clock_max && clock_max > 0) {
323: switch (state) {
324:
325: /*
326: * In SYNC state we ignore the first outlyer and switch
327: * to SPIK state.
328: */
329: case EVNT_SYNC:
330: snprintf(tbuf, sizeof(tbuf), "%+.6f s",
331: fp_offset);
332: report_event(EVNT_SPIK, NULL, tbuf);
333: state = EVNT_SPIK;
334: return (0);
335:
336: /*
337: * In FREQ state we ignore outlyers and inlyers. At the
338: * first outlyer after the stepout threshold, compute
339: * the apparent frequency correction and step the phase.
340: */
341: case EVNT_FREQ:
342: if (mu < clock_minstep)
343: return (0);
344:
345: clock_frequency = direct_freq(fp_offset);
346:
347: /* fall through to S_SPIK */
348:
349: /*
350: * In SPIK state we ignore succeeding outlyers until
351: * either an inlyer is found or the stepout threshold is
352: * exceeded.
353: */
354: case EVNT_SPIK:
355: if (mu < clock_minstep)
356: return (0);
357:
358: /* fall through to default */
359:
360: /*
361: * We get here by default in NSET and FSET states and
362: * from above in FREQ or SPIK states.
363: *
364: * In NSET state an initial frequency correction is not
365: * available, usually because the frequency file has not
366: * yet been written. Since the time is outside the step
367: * threshold, the clock is stepped. The frequency will
368: * be set directly following the stepout interval.
369: *
370: * In FSET state the initial frequency has been set from
371: * the frequency file. Since the time is outside the
372: * step threshold, the clock is stepped immediately,
373: * rather than after the stepout interval. Guys get
374: * nervous if it takes 15 minutes to set the clock for
375: * the first time.
376: *
377: * In FREQ and SPIK states the stepout threshold has
378: * expired and the phase is still above the step
379: * threshold. Note that a single spike greater than the
380: * step threshold is always suppressed, even with a
381: * long time constant.
382: */
383: default:
384: snprintf(tbuf, sizeof(tbuf), "%+.6f s",
385: fp_offset);
386: report_event(EVNT_CLOCKRESET, NULL, tbuf);
387: step_systime(fp_offset);
388: reinit_timer();
389: tc_counter = 0;
390: clock_jitter = LOGTOD(sys_precision);
391: rval = 2;
392: if (state == EVNT_NSET || (current_time -
393: last_step) < clock_minstep * 2) {
394: rstclock(EVNT_FREQ, 0);
395: return (rval);
396: }
397: last_step = current_time;
398: break;
399: }
400: rstclock(EVNT_SYNC, 0);
401: } else {
402:
403: /*
404: * The offset is less than the step threshold. Calculate
405: * the jitter as the exponentially weighted offset
406: * differences.
407: */
408: etemp = SQUARE(clock_jitter);
409: dtemp = SQUARE(max(fabs(fp_offset - last_offset),
410: LOGTOD(sys_precision)));
411: clock_jitter = SQRT(etemp + (dtemp - etemp) /
412: CLOCK_AVG);
413: switch (state) {
414:
415: /*
416: * In NSET state this is the first update received and
417: * the frequency has not been initialized. Adjust the
418: * phase, but do not adjust the frequency until after
419: * the stepout threshold.
420: */
421: case EVNT_NSET:
422: rstclock(EVNT_FREQ, fp_offset);
423: break;
424:
425: /*
426: * In FSET state this is the first update received and
427: * the frequency has been initialized. Adjust the phase,
428: * but do not adjust the frequency until the next
429: * update.
430: */
431: case EVNT_FSET:
432: rstclock(EVNT_SYNC, fp_offset);
433: break;
434:
435: /*
436: * In FREQ state ignore updates until the stepout
437: * threshold. After that, compute the new frequency, but
438: * do not adjust the phase or frequency until the next
439: * update.
440: */
441: case EVNT_FREQ:
442: if (mu < clock_minstep)
443: return (0);
444:
445: clock_frequency = direct_freq(fp_offset);
446: rstclock(EVNT_SYNC, 0);
447: break;
448:
449:
450: /*
451: * We get here by default in SYNC and SPIK states. Here
452: * we compute the frequency update due to PLL and FLL
453: * contributions.
454: */
455: default:
456: allow_panic = FALSE;
457:
458: /*
459: * The FLL and PLL frequency gain constants
460: * depend on the time constant and Allan
461: * intercept. The PLL is always used, but
462: * becomes ineffective above the Allan intercept
463: * where the FLL becomes effective.
464: */
465: if (sys_poll >= allan_xpt)
466: clock_frequency += (fp_offset -
467: clock_offset) /
468: max(ULOGTOD(sys_poll), mu) *
469: CLOCK_FLL;
470:
471: /*
472: * The PLL frequency gain (numerator) depends on
473: * the minimum of the update interval and Allan
474: * intercept. This reduces the PLL gain when the
475: * FLL becomes effective.
476: */
477: etemp = min(ULOGTOD(allan_xpt), mu);
478: dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
479: clock_frequency += fp_offset * etemp / (dtemp *
480: dtemp);
481: rstclock(EVNT_SYNC, fp_offset);
482: break;
483: }
484: }
485:
486: #ifdef KERNEL_PLL
487: /*
488: * This code segment works when clock adjustments are made using
489: * precision time kernel support and the ntp_adjtime() system
490: * call. This support is available in Solaris 2.6 and later,
491: * Digital Unix 4.0 and later, FreeBSD, Linux and specially
492: * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
493: * DECstation 5000/240 and Alpha AXP, additional kernel
494: * modifications provide a true microsecond clock and nanosecond
495: * clock, respectively.
496: *
497: * Important note: The kernel discipline is used only if the
498: * step threshold is less than 0.5 s, as anything higher can
499: * lead to overflow problems. This might occur if some misguided
500: * lad set the step threshold to something ridiculous.
501: */
502: if (pll_control && kern_enable) {
503:
504: /*
505: * We initialize the structure for the ntp_adjtime()
506: * system call. We have to convert everything to
507: * microseconds or nanoseconds first. Do not update the
508: * system variables if the ext_enable flag is set. In
509: * this case, the external clock driver will update the
510: * variables, which will be read later by the local
511: * clock driver. Afterwards, remember the time and
512: * frequency offsets for jitter and stability values and
513: * to update the frequency file.
514: */
515: memset(&ntv, 0, sizeof(ntv));
516: if (ext_enable) {
517: ntv.modes = MOD_STATUS;
518: } else {
519: #ifdef STA_NANO
520: ntv.modes = MOD_BITS | MOD_NANO;
521: #else /* STA_NANO */
522: ntv.modes = MOD_BITS;
523: #endif /* STA_NANO */
524: if (clock_offset < 0)
525: dtemp = -.5;
526: else
527: dtemp = .5;
528: #ifdef STA_NANO
529: ntv.offset = (int32)(clock_offset * 1e9 +
530: dtemp);
531: ntv.constant = sys_poll;
532: #else /* STA_NANO */
533: ntv.offset = (int32)(clock_offset * 1e6 +
534: dtemp);
535: ntv.constant = sys_poll - 4;
536: #endif /* STA_NANO */
537: ntv.esterror = (u_int32)(clock_jitter * 1e6);
538: ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
539: sys_rootdisp) * 1e6);
540: ntv.status = STA_PLL;
541:
542: /*
543: * Enable/disable the PPS if requested.
544: */
545: if (pps_enable) {
546: if (!(pll_status & STA_PPSTIME))
547: report_event(EVNT_KERN,
548: NULL, "PPS enabled");
549: ntv.status |= STA_PPSTIME | STA_PPSFREQ;
550: } else {
551: if (pll_status & STA_PPSTIME)
552: report_event(EVNT_KERN,
553: NULL, "PPS disabled");
554: ntv.status &= ~(STA_PPSTIME |
555: STA_PPSFREQ);
556: }
557: if (sys_leap == LEAP_ADDSECOND)
558: ntv.status |= STA_INS;
559: else if (sys_leap == LEAP_DELSECOND)
560: ntv.status |= STA_DEL;
561: }
562:
563: /*
564: * Pass the stuff to the kernel. If it squeals, turn off
565: * the pps. In any case, fetch the kernel offset,
566: * frequency and jitter.
567: */
568: if (ntp_adjtime(&ntv) == TIME_ERROR) {
569: if (!(ntv.status & STA_PPSSIGNAL))
570: report_event(EVNT_KERN, NULL,
571: "PPS no signal");
572: }
573: pll_status = ntv.status;
574: #ifdef STA_NANO
575: clock_offset = ntv.offset / 1e9;
576: #else /* STA_NANO */
577: clock_offset = ntv.offset / 1e6;
578: #endif /* STA_NANO */
579: clock_frequency = FREQTOD(ntv.freq);
580:
581: /*
582: * If the kernel PPS is lit, monitor its performance.
583: */
584: if (ntv.status & STA_PPSTIME) {
585: #ifdef STA_NANO
586: clock_jitter = ntv.jitter / 1e9;
587: #else /* STA_NANO */
588: clock_jitter = ntv.jitter / 1e6;
589: #endif /* STA_NANO */
590: }
591:
592: #if defined(STA_NANO) && NTP_API == 4
593: /*
594: * If the TAI changes, update the kernel TAI.
595: */
596: if (loop_tai != sys_tai) {
597: loop_tai = sys_tai;
598: ntv.modes = MOD_TAI;
599: ntv.constant = sys_tai;
600: ntp_adjtime(&ntv);
601: }
602: #endif /* STA_NANO */
603: }
604: #endif /* KERNEL_PLL */
605:
606: /*
607: * Clamp the frequency within the tolerance range and calculate
608: * the frequency difference since the last update.
609: */
610: if (fabs(clock_frequency) > NTP_MAXFREQ)
611: msyslog(LOG_NOTICE,
612: "frequency error %.0f PPM exceeds tolerance %.0f PPM",
613: clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
614: dtemp = SQUARE(clock_frequency - drift_comp);
615: if (clock_frequency > NTP_MAXFREQ)
616: drift_comp = NTP_MAXFREQ;
617: else if (clock_frequency < -NTP_MAXFREQ)
618: drift_comp = -NTP_MAXFREQ;
619: else
620: drift_comp = clock_frequency;
621:
622: /*
623: * Calculate the wander as the exponentially weighted RMS
624: * frequency differences. Record the change for the frequency
625: * file update.
626: */
627: etemp = SQUARE(clock_stability);
628: clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
629: drift_file_sw = TRUE;
630:
631: /*
632: * Here we adjust the timeconstan by comparing the current
633: * offset with the clock jitter. If the offset is less than the
634: * clock jitter times a constant, then the averaging interval is
635: * increased, otherwise it is decreased. A bit of hysteresis
636: * helps calm the dance. Works best using burst mode.
637: */
638: if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
639: tc_counter += sys_poll;
640: if (tc_counter > CLOCK_LIMIT) {
641: tc_counter = CLOCK_LIMIT;
642: if (sys_poll < peer->maxpoll) {
643: tc_counter = 0;
644: sys_poll++;
645: }
646: }
647: } else {
648: tc_counter -= sys_poll << 1;
649: if (tc_counter < -CLOCK_LIMIT) {
650: tc_counter = -CLOCK_LIMIT;
651: if (sys_poll > peer->minpoll) {
652: tc_counter = 0;
653: sys_poll--;
654: }
655: }
656: }
657:
658: /*
659: * If the time constant has changed, update the poll variables.
660: */
661: if (osys_poll != sys_poll)
662: poll_update(peer, sys_poll);
663:
664: /*
665: * Yibbidy, yibbbidy, yibbidy; that'h all folks.
666: */
667: record_loop_stats(clock_offset, drift_comp, clock_jitter,
668: clock_stability, sys_poll);
669: #ifdef DEBUG
670: if (debug)
671: printf(
672: "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
673: clock_offset, clock_jitter, drift_comp * 1e6,
674: clock_stability * 1e6, sys_poll);
675: #endif /* DEBUG */
676: return (rval);
677: #endif /* LOCKCLOCK */
678: }
679:
680:
681: /*
682: * adj_host_clock - Called once every second to update the local clock.
683: *
684: * LOCKCLOCK: The only thing this routine does is increment the
685: * sys_rootdisp variable.
686: */
687: void
688: adj_host_clock(
689: void
690: )
691: {
692: double adjustment;
693:
694: /*
695: * Update the dispersion since the last update. In contrast to
696: * NTPv3, NTPv4 does not declare unsynchronized after one day,
697: * since the dispersion check serves this function. Also,
698: * since the poll interval can exceed one day, the old test
699: * would be counterproductive.
700: */
701: sys_rootdisp += clock_phi;
702:
703: #ifndef LOCKCLOCK
704: /*
705: * If clock discipline is disabled or if the kernel is enabled,
706: * get out of Dodge quick.
707: */
708: if (!ntp_enable || mode_ntpdate || (pll_control &&
709: kern_enable))
710: return;
711:
712: /*
713: * Implement the phase and frequency adjustments. The gain
714: * factor (denominator) increases with poll interval, so is
715: * dominated by the FLL above the Allan intercept.
716: */
717: adjustment = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
718: clock_offset -= adjustment;
719: adj_systime(adjustment + drift_comp);
720: #endif /* LOCKCLOCK */
721: }
722:
723:
724: /*
725: * Clock state machine. Enter new state and set state variables.
726: */
727: static void
728: rstclock(
729: int trans, /* new state */
730: double offset /* new offset */
731: )
732: {
733: #ifdef DEBUG
734: if (debug > 1)
735: printf("local_clock: mu %lu state %d poll %d count %d\n",
736: current_time - clock_epoch, trans, sys_poll,
737: tc_counter);
738: #endif
739: if (trans != state && trans != EVNT_FSET)
740: report_event(trans, NULL, NULL);
741: state = trans;
742: last_offset = clock_offset = offset;
743: clock_epoch = current_time;
744: }
745:
746: /*
747: * calc_freq - calculate frequency directly
748: *
749: * This is very carefully done. When the offset is first computed at the
750: * first update, a residual frequency component results. Subsequently,
751: * updates are suppresed until the end of the measurement interval while
752: * the offset is amortized. At the end of the interval the frequency is
753: * calculated from the current offset, residual offset, length of the
754: * interval and residual frequency component. At the same time the
755: * frequenchy file is armed for update at the next hourly stats.
756: */
757: static double
758: direct_freq(
759: double fp_offset
760: )
761: {
762:
763: #ifdef KERNEL_PLL
764: /*
765: * If the kernel is enabled, we need the residual offset to
766: * calculate the frequency correction.
767: */
768: if (pll_control && kern_enable) {
769: memset(&ntv, 0, sizeof(ntv));
770: ntp_adjtime(&ntv);
771: #ifdef STA_NANO
772: clock_offset = ntv.offset / 1e9;
773: #else /* STA_NANO */
774: clock_offset = ntv.offset / 1e6;
775: #endif /* STA_NANO */
776: drift_comp = FREQTOD(ntv.freq);
777: }
778: #endif /* KERNEL_PLL */
779: set_freq((fp_offset - clock_offset) / (current_time -
780: clock_epoch) + drift_comp);
781: wander_resid = 0;
782: return (drift_comp);
783: }
784:
785:
786: /*
787: * set_freq - set clock frequency
788: */
789: static void
790: set_freq(
791: double freq /* frequency update */
792: )
793: {
794: char tbuf[80];
795:
796: drift_comp = freq;
797:
798: #ifdef KERNEL_PLL
799: /*
800: * If the kernel is enabled, update the kernel frequency.
801: */
802: if (pll_control && kern_enable) {
803: memset(&ntv, 0, sizeof(ntv));
804: ntv.modes = MOD_FREQUENCY;
805: ntv.freq = DTOFREQ(drift_comp);
806: ntp_adjtime(&ntv);
807: snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM",
808: drift_comp * 1e6);
809: report_event(EVNT_FSET, NULL, tbuf);
810: } else {
811: snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM",
812: drift_comp * 1e6);
813: report_event(EVNT_FSET, NULL, tbuf);
814: }
815: #else /* KERNEL_PLL */
816: snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp *
817: 1e6);
818: report_event(EVNT_FSET, NULL, tbuf);
819: #endif /* KERNEL_PLL */
820: }
821:
822: /*
823: * huff-n'-puff filter
824: */
825: void
826: huffpuff()
827: {
828: int i;
829:
830: if (sys_huffpuff == NULL)
831: return;
832:
833: sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
834: sys_huffpuff[sys_huffptr] = 1e9;
835: sys_mindly = 1e9;
836: for (i = 0; i < sys_hufflen; i++) {
837: if (sys_huffpuff[i] < sys_mindly)
838: sys_mindly = sys_huffpuff[i];
839: }
840: }
841:
842:
843: /*
844: * loop_config - configure the loop filter
845: *
846: * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
847: */
848: void
849: loop_config(
850: int item,
851: double freq
852: )
853: {
854: int i;
855:
856: #ifdef DEBUG
857: if (debug > 1)
858: printf("loop_config: item %d freq %f\n", item, freq);
859: #endif
860: switch (item) {
861:
862: /*
863: * We first assume the kernel supports the ntp_adjtime()
864: * syscall. If that syscall works, initialize the kernel time
865: * variables. Otherwise, continue leaving no harm behind.
866: */
867: case LOOP_DRIFTINIT:
868: #ifndef LOCKCLOCK
869: #ifdef KERNEL_PLL
870: if (mode_ntpdate)
871: break;
872:
873: pll_control = 1;
874: memset(&ntv, 0, sizeof(ntv));
875: ntv.modes = MOD_BITS;
876: ntv.status = STA_PLL;
877: ntv.maxerror = MAXDISPERSE;
878: ntv.esterror = MAXDISPERSE;
879: ntv.constant = sys_poll;
880: #ifdef SIGSYS
881: /*
882: * Use sigsetjmp() to save state and then call
883: * ntp_adjtime(); if it fails, then siglongjmp() is used
884: * to return control
885: */
886: newsigsys.sa_handler = pll_trap;
887: newsigsys.sa_flags = 0;
888: if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
889: msyslog(LOG_ERR,
890: "sigaction() fails to save SIGSYS trap: %m");
891: pll_control = 0;
892: }
893: if (sigsetjmp(env, 1) == 0)
894: ntp_adjtime(&ntv);
895: if ((sigaction(SIGSYS, &sigsys,
896: (struct sigaction *)NULL))) {
897: msyslog(LOG_ERR,
898: "sigaction() fails to restore SIGSYS trap: %m");
899: pll_control = 0;
900: }
901: #else /* SIGSYS */
902: ntp_adjtime(&ntv);
903: #endif /* SIGSYS */
904:
905: /*
906: * Save the result status and light up an external clock
907: * if available.
908: */
909: pll_status = ntv.status;
910: if (pll_control) {
911: #ifdef STA_NANO
912: if (pll_status & STA_CLK)
913: ext_enable = 1;
914: #endif /* STA_NANO */
915: report_event(EVNT_KERN, NULL,
916: "kernel time sync enabled");
917: }
918: #endif /* KERNEL_PLL */
919: #endif /* LOCKCLOCK */
920: break;
921:
922: /*
923: * Initialize the frequency. If the frequency file is missing or
924: * broken, set the initial frequency to zero and set the state
925: * to NSET. Otherwise, set the initial frequency to the given
926: * value and the state to FSET.
927: */
928: case LOOP_DRIFTCOMP:
929: #ifndef LOCKCLOCK
930: if (freq > NTP_MAXFREQ || freq < -NTP_MAXFREQ) {
931: set_freq(0);
932: rstclock(EVNT_NSET, 0);
933: } else {
934: set_freq(freq);
935: rstclock(EVNT_FSET, 0);
936: }
937: #endif /* LOCKCLOCK */
938: break;
939:
940: /*
941: * Disable the kernel at shutdown. The microkernel just abandons
942: * ship. The nanokernel carefully cleans up so applications can
943: * see this. Note the last programmed offset and frequency are
944: * left in place.
945: */
946: case LOOP_KERN_CLEAR:
947: #ifndef LOCKCLOCK
948: #ifdef KERNEL_PLL
949: if (pll_control && kern_enable) {
950: memset((char *)&ntv, 0, sizeof(ntv));
951: ntv.modes = MOD_STATUS;
952: ntv.status = STA_UNSYNC;
953: ntp_adjtime(&ntv);
954: report_event(EVNT_KERN, NULL,
955: "kernel time sync disabledx");
956: }
957: #endif /* KERNEL_PLL */
958: #endif /* LOCKCLOCK */
959: break;
960:
961: /*
962: * Tinker command variables for Ulrich Windl. Very dangerous.
963: */
964: case LOOP_ALLAN: /* Allan intercept (log2) (allan) */
965: allan_xpt = (u_char)freq;
966: break;
967:
968: case LOOP_CODEC: /* audio codec frequency (codec) */
969: clock_codec = freq / 1e6;
970: break;
971:
972: case LOOP_PHI: /* dispersion threshold (dispersion) */
973: clock_phi = freq / 1e6;
974: break;
975:
976: case LOOP_FREQ: /* initial frequency (freq) */
977: set_freq(freq / 1e6);
978: rstclock(EVNT_FSET, 0);
979: break;
980:
981: case LOOP_HUFFPUFF: /* huff-n'-puff length (huffpuff) */
982: if (freq < HUFFPUFF)
983: freq = HUFFPUFF;
984: sys_hufflen = (int)(freq / HUFFPUFF);
985: sys_huffpuff = (double *)emalloc(sizeof(double) *
986: sys_hufflen);
987: for (i = 0; i < sys_hufflen; i++)
988: sys_huffpuff[i] = 1e9;
989: sys_mindly = 1e9;
990: break;
991:
992: case LOOP_PANIC: /* panic threshold (panic) */
993: clock_panic = freq;
994: break;
995:
996: case LOOP_MAX: /* step threshold (step) */
997: clock_max = freq;
998: if (clock_max == 0 || clock_max > 0.5)
999: kern_enable = 0;
1000: break;
1001:
1002: case LOOP_MINSTEP: /* stepout threshold (stepout) */
1003: clock_minstep = freq;
1004: break;
1005:
1006: case LOOP_LEAP: /* not used */
1007: default:
1008: msyslog(LOG_NOTICE,
1009: "loop_config: unsupported option %d", item);
1010: }
1011: }
1012:
1013:
1014: #if defined(KERNEL_PLL) && defined(SIGSYS)
1015: /*
1016: * _trap - trap processor for undefined syscalls
1017: *
1018: * This nugget is called by the kernel when the SYS_ntp_adjtime()
1019: * syscall bombs because the silly thing has not been implemented in
1020: * the kernel. In this case the phase-lock loop is emulated by
1021: * the stock adjtime() syscall and a lot of indelicate abuse.
1022: */
1023: static RETSIGTYPE
1024: pll_trap(
1025: int arg
1026: )
1027: {
1028: pll_control = 0;
1029: siglongjmp(env, 1);
1030: }
1031: #endif /* KERNEL_PLL && SIGSYS */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>