1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: tasks.c,v 1.8.2.2 2012/05/31 14:45:10 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47:
48:
49: #pragma GCC visibility push(hidden)
50:
51: inline sched_task_t *
52: _sched_useTask(sched_root_task_t * __restrict root)
53: {
54: sched_task_t *task, *tmp;
55:
56: TAILQ_FOREACH_SAFE(task, &root->root_unuse, task_node, tmp) {
57: if (!TASK_ISLOCKED(task)) {
58: #ifdef HAVE_LIBPTHREAD
59: pthread_mutex_lock(&root->root_mtx[taskUNUSE]);
60: #endif
61: TAILQ_REMOVE(&root->root_unuse, task, task_node);
62: #ifdef HAVE_LIBPTHREAD
63: pthread_mutex_unlock(&root->root_mtx[taskUNUSE]);
64: #endif
65: break;
66: }
67: }
68:
69: if (!task) {
70: task = malloc(sizeof(sched_task_t));
71: if (!task) {
72: LOGERR;
73: return NULL;
74: }
75: }
76:
77: return task;
78: }
79:
80: inline sched_task_t *
81: _sched_unuseTask(sched_task_t * __restrict task)
82: {
83: TASK_UNLOCK(task);
84: TASK_TYPE(task) = taskUNUSE;
85: #ifdef HAVE_LIBPTHREAD
86: pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
87: #endif
88: TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, task, task_node);
89: #ifdef HAVE_LIBPTHREAD
90: pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
91: #endif
92: task = NULL;
93:
94: return task;
95: }
96:
97: #pragma GCC visibility pop
98:
99:
100: /*
101: * schedRead() - Add READ I/O task to scheduler queue
102: *
103: * @root = root task
104: * @func = task execution function
105: * @arg = 1st func argument
106: * @fd = fd handle
107: * @opt_data = Optional data
108: * @opt_dlen = Optional data length
109: * return: NULL error or !=NULL new queued task
110: */
111: sched_task_t *
112: schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,
113: void *opt_data, size_t opt_dlen)
114: {
115: sched_task_t *task;
116: void *ptr;
117:
118: if (!root || !func)
119: return NULL;
120:
121: /* get new task */
122: if (!(task = _sched_useTask(root)))
123: return NULL;
124:
125: memset(task, 0, sizeof(sched_task_t));
126: task->task_id = 0;
127: task->task_lock = 0;
128: task->task_func = func;
129: TASK_TYPE(task) = taskREAD;
130: TASK_ROOT(task) = root;
131:
132: TASK_ARG(task) = arg;
133: TASK_FD(task) = fd;
134:
135: TASK_DATA(task) = opt_data;
136: TASK_DATLEN(task) = opt_dlen;
137:
138: if (root->root_hooks.hook_add.read)
139: ptr = root->root_hooks.hook_add.read(task, NULL);
140: else
141: ptr = NULL;
142:
143: if (!ptr) {
144: #ifdef HAVE_LIBPTHREAD
145: pthread_mutex_lock(&root->root_mtx[taskREAD]);
146: #endif
147: TAILQ_INSERT_TAIL(&root->root_read, task, task_node);
148: #ifdef HAVE_LIBPTHREAD
149: pthread_mutex_unlock(&root->root_mtx[taskREAD]);
150: #endif
151: } else
152: task = _sched_unuseTask(task);
153:
154: return task;
155: }
156:
157: /*
158: * schedWrite() - Add WRITE I/O task to scheduler queue
159: *
160: * @root = root task
161: * @func = task execution function
162: * @arg = 1st func argument
163: * @fd = fd handle
164: * @opt_data = Optional data
165: * @opt_dlen = Optional data length
166: * return: NULL error or !=NULL new queued task
167: */
168: sched_task_t *
169: schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,
170: void *opt_data, size_t opt_dlen)
171: {
172: sched_task_t *task;
173: void *ptr;
174:
175: if (!root || !func)
176: return NULL;
177:
178: /* get new task */
179: if (!(task = _sched_useTask(root)))
180: return NULL;
181:
182: memset(task, 0, sizeof(sched_task_t));
183: task->task_id = 0;
184: task->task_lock = 0;
185: task->task_func = func;
186: TASK_TYPE(task) = taskWRITE;
187: TASK_ROOT(task) = root;
188:
189: TASK_ARG(task) = arg;
190: TASK_FD(task) = fd;
191:
192: TASK_DATA(task) = opt_data;
193: TASK_DATLEN(task) = opt_dlen;
194:
195: if (root->root_hooks.hook_add.write)
196: ptr = root->root_hooks.hook_add.write(task, NULL);
197: else
198: ptr = NULL;
199:
200: if (!ptr) {
201: #ifdef HAVE_LIBPTHREAD
202: pthread_mutex_lock(&root->root_mtx[taskWRITE]);
203: #endif
204: TAILQ_INSERT_TAIL(&root->root_write, task, task_node);
205: #ifdef HAVE_LIBPTHREAD
206: pthread_mutex_unlock(&root->root_mtx[taskWRITE]);
207: #endif
208: } else
209: task = _sched_unuseTask(task);
210:
211: return task;
212: }
213:
214: /*
215: * schedNode() - Add NODE task to scheduler queue
216: *
217: * @root = root task
218: * @func = task execution function
219: * @arg = 1st func argument
220: * @fd = fd handle
221: * @opt_data = Optional data
222: * @opt_dlen = Optional data length
223: * return: NULL error or !=NULL new queued task
224: */
225: sched_task_t *
226: schedNode(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,
227: void *opt_data, size_t opt_dlen)
228: {
229: sched_task_t *task;
230: void *ptr;
231:
232: if (!root || !func)
233: return NULL;
234:
235: /* get new task */
236: if (!(task = _sched_useTask(root)))
237: return NULL;
238:
239: memset(task, 0, sizeof(sched_task_t));
240: task->task_id = 0;
241: task->task_lock = 0;
242: task->task_func = func;
243: TASK_TYPE(task) = taskNODE;
244: TASK_ROOT(task) = root;
245:
246: TASK_ARG(task) = arg;
247: TASK_FD(task) = fd;
248:
249: TASK_DATA(task) = opt_data;
250: TASK_DATLEN(task) = opt_dlen;
251:
252: if (root->root_hooks.hook_add.node)
253: ptr = root->root_hooks.hook_add.node(task, NULL);
254: else
255: ptr = NULL;
256:
257: if (!ptr) {
258: #ifdef HAVE_LIBPTHREAD
259: pthread_mutex_lock(&root->root_mtx[taskNODE]);
260: #endif
261: TAILQ_INSERT_TAIL(&root->root_node, task, task_node);
262: #ifdef HAVE_LIBPTHREAD
263: pthread_mutex_unlock(&root->root_mtx[taskNODE]);
264: #endif
265: } else
266: task = _sched_unuseTask(task);
267:
268: return task;
269: }
270:
271: /*
272: * schedProc() - Add PROC task to scheduler queue
273: *
274: * @root = root task
275: * @func = task execution function
276: * @arg = 1st func argument
277: * @pid = PID
278: * @opt_data = Optional data
279: * @opt_dlen = Optional data length
280: * return: NULL error or !=NULL new queued task
281: */
282: sched_task_t *
283: schedProc(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long pid,
284: void *opt_data, size_t opt_dlen)
285: {
286: sched_task_t *task;
287: void *ptr;
288:
289: if (!root || !func)
290: return NULL;
291:
292: /* get new task */
293: if (!(task = _sched_useTask(root)))
294: return NULL;
295:
296: memset(task, 0, sizeof(sched_task_t));
297: task->task_id = 0;
298: task->task_lock = 0;
299: task->task_func = func;
300: TASK_TYPE(task) = taskPROC;
301: TASK_ROOT(task) = root;
302:
303: TASK_ARG(task) = arg;
304: TASK_VAL(task) = pid;
305:
306: TASK_DATA(task) = opt_data;
307: TASK_DATLEN(task) = opt_dlen;
308:
309: if (root->root_hooks.hook_add.proc)
310: ptr = root->root_hooks.hook_add.proc(task, NULL);
311: else
312: ptr = NULL;
313:
314: if (!ptr) {
315: #ifdef HAVE_LIBPTHREAD
316: pthread_mutex_lock(&root->root_mtx[taskPROC]);
317: #endif
318: TAILQ_INSERT_TAIL(&root->root_proc, task, task_node);
319: #ifdef HAVE_LIBPTHREAD
320: pthread_mutex_unlock(&root->root_mtx[taskPROC]);
321: #endif
322: } else
323: task = _sched_unuseTask(task);
324:
325: return task;
326: }
327:
328: /*
329: * schedUser() - Add trigger USER task to scheduler queue
330: *
331: * @root = root task
332: * @func = task execution function
333: * @arg = 1st func argument
334: * @id = Trigger ID
335: * @opt_data = Optional data
336: * @opt_dlen = Optional user's trigger flags
337: * return: NULL error or !=NULL new queued task
338: */
339: sched_task_t *
340: schedUser(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id,
341: void *opt_data, size_t opt_dlen)
342: {
343: sched_task_t *task;
344: void *ptr;
345:
346: if (!root || !func)
347: return NULL;
348:
349: /* get new task */
350: if (!(task = _sched_useTask(root)))
351: return NULL;
352:
353: memset(task, 0, sizeof(sched_task_t));
354: task->task_id = 0;
355: task->task_lock = 0;
356: task->task_func = func;
357: TASK_TYPE(task) = taskUSER;
358: TASK_ROOT(task) = root;
359:
360: TASK_ARG(task) = arg;
361: TASK_VAL(task) = id;
362:
363: TASK_DATA(task) = opt_data;
364: TASK_DATLEN(task) = opt_dlen;
365:
366: if (root->root_hooks.hook_add.user)
367: ptr = root->root_hooks.hook_add.user(task, NULL);
368: else
369: ptr = NULL;
370:
371: if (!ptr) {
372: #ifdef HAVE_LIBPTHREAD
373: pthread_mutex_lock(&root->root_mtx[taskUSER]);
374: #endif
375: TAILQ_INSERT_TAIL(&root->root_user, task, task_node);
376: #ifdef HAVE_LIBPTHREAD
377: pthread_mutex_unlock(&root->root_mtx[taskUSER]);
378: #endif
379: } else
380: task = _sched_unuseTask(task);
381:
382: return task;
383: }
384:
385: /*
386: * schedSignal() - Add SIGNAL task to scheduler queue
387: *
388: * @root = root task
389: * @func = task execution function
390: * @arg = 1st func argument
391: * @sig = Signal
392: * @opt_data = Optional data
393: * @opt_dlen = Optional data length
394: * return: NULL error or !=NULL new queued task
395: */
396: sched_task_t *
397: schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig,
398: void *opt_data, size_t opt_dlen)
399: {
400: sched_task_t *task;
401: void *ptr;
402:
403: if (!root || !func)
404: return NULL;
405:
406: /* get new task */
407: if (!(task = _sched_useTask(root)))
408: return NULL;
409:
410: memset(task, 0, sizeof(sched_task_t));
411: task->task_id = 0;
412: task->task_lock = 0;
413: task->task_func = func;
414: TASK_TYPE(task) = taskSIGNAL;
415: TASK_ROOT(task) = root;
416:
417: TASK_ARG(task) = arg;
418: TASK_VAL(task) = sig;
419:
420: TASK_DATA(task) = opt_data;
421: TASK_DATLEN(task) = opt_dlen;
422:
423: if (root->root_hooks.hook_add.signal)
424: ptr = root->root_hooks.hook_add.signal(task, NULL);
425: else
426: ptr = NULL;
427:
428: if (!ptr) {
429: #ifdef HAVE_LIBPTHREAD
430: pthread_mutex_lock(&root->root_mtx[taskSIGNAL]);
431: #endif
432: TAILQ_INSERT_TAIL(&root->root_signal, task, task_node);
433: #ifdef HAVE_LIBPTHREAD
434: pthread_mutex_unlock(&root->root_mtx[taskSIGNAL]);
435: #endif
436: } else
437: task = _sched_unuseTask(task);
438:
439: return task;
440: }
441:
442: /*
443: * schedAlarm() - Add ALARM task to scheduler queue
444: *
445: * @root = root task
446: * @func = task execution function
447: * @arg = 1st func argument
448: * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!
449: * @opt_data = Optional data
450: * @opt_dlen = Optional data length
451: * return: NULL error or !=NULL new queued task
452: */
453: sched_task_t *
454: schedAlarm(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts,
455: void *opt_data, size_t opt_dlen)
456: {
457: sched_task_t *task;
458: void *ptr;
459:
460: if (!root || !func)
461: return NULL;
462:
463: /* get new task */
464: if (!(task = _sched_useTask(root)))
465: return NULL;
466:
467: memset(task, 0, sizeof(sched_task_t));
468: task->task_id = 0;
469: task->task_lock = 0;
470: task->task_func = func;
471: TASK_TYPE(task) = taskALARM;
472: TASK_ROOT(task) = root;
473:
474: TASK_ARG(task) = arg;
475: TASK_TS(task) = ts;
476:
477: TASK_DATA(task) = opt_data;
478: TASK_DATLEN(task) = opt_dlen;
479:
480: if (root->root_hooks.hook_add.alarm)
481: ptr = root->root_hooks.hook_add.alarm(task, NULL);
482: else
483: ptr = NULL;
484:
485: if (!ptr) {
486: #ifdef HAVE_LIBPTHREAD
487: pthread_mutex_lock(&root->root_mtx[taskALARM]);
488: #endif
489: TAILQ_INSERT_TAIL(&root->root_alarm, task, task_node);
490: #ifdef HAVE_LIBPTHREAD
491: pthread_mutex_unlock(&root->root_mtx[taskALARM]);
492: #endif
493: } else
494: task = _sched_unuseTask(task);
495:
496: return task;
497: }
498:
499: /*
500: * schedTimer() - Add TIMER task to scheduler queue
501: *
502: * @root = root task
503: * @func = task execution function
504: * @arg = 1st func argument
505: * @ts = timeout argument structure
506: * @opt_data = Optional data
507: * @opt_dlen = Optional data length
508: * return: NULL error or !=NULL new queued task
509: */
510: sched_task_t *
511: schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts,
512: void *opt_data, size_t opt_dlen)
513: {
514: sched_task_t *task, *t = NULL;
515: void *ptr;
516: struct timespec now;
517:
518: if (!root || !func)
519: return NULL;
520:
521: /* get new task */
522: if (!(task = _sched_useTask(root)))
523: return NULL;
524:
525: memset(task, 0, sizeof(sched_task_t));
526: task->task_id = 0;
527: task->task_lock = 0;
528: task->task_func = func;
529: TASK_TYPE(task) = taskTIMER;
530: TASK_ROOT(task) = root;
531:
532: TASK_ARG(task) = arg;
533:
534: TASK_DATA(task) = opt_data;
535: TASK_DATLEN(task) = opt_dlen;
536:
537: /* calculate timeval structure */
538: clock_gettime(CLOCK_MONOTONIC, &now);
539: now.tv_sec += ts.tv_sec;
540: now.tv_nsec += ts.tv_nsec;
541: if (now.tv_nsec >= 1000000000L) {
542: now.tv_sec++;
543: now.tv_nsec -= 1000000000L;
544: } else if (now.tv_nsec < 0) {
545: now.tv_sec--;
546: now.tv_nsec += 1000000000L;
547: }
548: TASK_TS(task) = now;
549:
550: if (root->root_hooks.hook_add.timer)
551: ptr = root->root_hooks.hook_add.timer(task, NULL);
552: else
553: ptr = NULL;
554:
555: if (!ptr) {
556: #ifdef HAVE_LIBPTHREAD
557: pthread_mutex_lock(&root->root_mtx[taskTIMER]);
558: #endif
559: #ifdef TIMER_WITHOUT_SORT
560: TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
561: #else
562: TAILQ_FOREACH(t, &root->root_timer, task_node)
563: if (sched_timespeccmp(&TASK_TS(task), &TASK_TS(t), -) < 1)
564: break;
565: if (!t)
566: TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
567: else
568: TAILQ_INSERT_BEFORE(t, task, task_node);
569: #endif
570: #ifdef HAVE_LIBPTHREAD
571: pthread_mutex_unlock(&root->root_mtx[taskTIMER]);
572: #endif
573: } else
574: task = _sched_unuseTask(task);
575:
576: return task;
577: }
578:
579: /*
580: * schedEvent() - Add EVENT task to scheduler queue
581: *
582: * @root = root task
583: * @func = task execution function
584: * @arg = 1st func argument
585: * @val = additional func argument
586: * @opt_data = Optional data
587: * @opt_dlen = Optional data length
588: * return: NULL error or !=NULL new queued task
589: */
590: sched_task_t *
591: schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val,
592: void *opt_data, size_t opt_dlen)
593: {
594: sched_task_t *task;
595: void *ptr;
596:
597: if (!root || !func)
598: return NULL;
599:
600: /* get new task */
601: if (!(task = _sched_useTask(root)))
602: return NULL;
603:
604: memset(task, 0, sizeof(sched_task_t));
605: task->task_id = 0;
606: task->task_lock = 0;
607: task->task_func = func;
608: TASK_TYPE(task) = taskEVENT;
609: TASK_ROOT(task) = root;
610:
611: TASK_ARG(task) = arg;
612: TASK_VAL(task) = val;
613:
614: TASK_DATA(task) = opt_data;
615: TASK_DATLEN(task) = opt_dlen;
616:
617: if (root->root_hooks.hook_add.event)
618: ptr = root->root_hooks.hook_add.event(task, NULL);
619: else
620: ptr = NULL;
621:
622: if (!ptr) {
623: #ifdef HAVE_LIBPTHREAD
624: pthread_mutex_lock(&root->root_mtx[taskEVENT]);
625: #endif
626: TAILQ_INSERT_TAIL(&root->root_event, task, task_node);
627: #ifdef HAVE_LIBPTHREAD
628: pthread_mutex_unlock(&root->root_mtx[taskEVENT]);
629: #endif
630: } else
631: task = _sched_unuseTask(task);
632:
633: return task;
634: }
635:
636:
637: /*
638: * schedEventLo() - Add EVENT_Lo task to scheduler queue
639: *
640: * @root = root task
641: * @func = task execution function
642: * @arg = 1st func argument
643: * @val = additional func argument
644: * @opt_data = Optional data
645: * @opt_dlen = Optional data length
646: * return: NULL error or !=NULL new queued task
647: */
648: sched_task_t *
649: schedEventLo(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val,
650: void *opt_data, size_t opt_dlen)
651: {
652: sched_task_t *task;
653: void *ptr;
654:
655: if (!root || !func)
656: return NULL;
657:
658: /* get new task */
659: if (!(task = _sched_useTask(root)))
660: return NULL;
661:
662: memset(task, 0, sizeof(sched_task_t));
663: task->task_id = 0;
664: task->task_lock = 0;
665: task->task_func = func;
666: TASK_TYPE(task) = taskEVENT;
667: TASK_ROOT(task) = root;
668:
669: TASK_ARG(task) = arg;
670: TASK_VAL(task) = val;
671:
672: TASK_DATA(task) = opt_data;
673: TASK_DATLEN(task) = opt_dlen;
674:
675: if (root->root_hooks.hook_add.eventlo)
676: ptr = root->root_hooks.hook_add.eventlo(task, NULL);
677: else
678: ptr = NULL;
679:
680: if (!ptr) {
681: #ifdef HAVE_LIBPTHREAD
682: pthread_mutex_lock(&root->root_mtx[taskEVENTLO]);
683: #endif
684: TAILQ_INSERT_TAIL(&root->root_eventlo, task, task_node);
685: #ifdef HAVE_LIBPTHREAD
686: pthread_mutex_unlock(&root->root_mtx[taskEVENTLO]);
687: #endif
688: } else
689: task = _sched_unuseTask(task);
690:
691: return task;
692: }
693:
694: /*
695: * schedCallOnce() - Call once from scheduler
696: *
697: * @root = root task
698: * @func = task execution function
699: * @arg = 1st func argument
700: * @val = additional func argument
701: * @opt_data = Optional data
702: * @opt_dlen = Optional data length
703: * return: return value from called func
704: */
705: sched_task_t *
706: schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val,
707: void *opt_data, size_t opt_dlen)
708: {
709: sched_task_t *task;
710: void *ret;
711:
712: if (!root || !func)
713: return NULL;
714:
715: /* get new task */
716: if (!(task = _sched_useTask(root)))
717: return NULL;
718:
719: memset(task, 0, sizeof(sched_task_t));
720: task->task_id = 0;
721: task->task_lock = 0;
722: task->task_func = func;
723: TASK_TYPE(task) = taskEVENT;
724: TASK_ROOT(task) = root;
725:
726: TASK_ARG(task) = arg;
727: TASK_VAL(task) = val;
728:
729: TASK_DATA(task) = opt_data;
730: TASK_DATLEN(task) = opt_dlen;
731:
732: ret = schedCall(task);
733:
734: _sched_unuseTask(task);
735: return ret;
736: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>