Diff for /libaitsched/src/tasks.c between versions 1.14.2.1 and 1.19.2.1

version 1.14.2.1, 2012/09/10 15:03:08 version 1.19.2.1, 2013/08/26 13:26:56
Line 12  terms: Line 12  terms:
 All of the documentation and software included in the ELWIX and AITNET  All of the documentation and software included in the ELWIX and AITNET
 Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>  Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   
Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
         by Michael Pounov <misho@elwix.org>.  All rights reserved.          by Michael Pounov <misho@elwix.org>.  All rights reserved.
   
 Redistribution and use in source and binary forms, with or without  Redistribution and use in source and binary forms, with or without
Line 52  SUCH DAMAGE. Line 52  SUCH DAMAGE.
  * @root = root task   * @root = root task
  * return: NULL error or !=NULL prepared task   * return: NULL error or !=NULL prepared task
  */   */
inline sched_task_t *sched_task_t *
 sched_useTask(sched_root_task_t * __restrict root)  sched_useTask(sched_root_task_t * __restrict root)
 {  {
         sched_task_t *task, *tmp;          sched_task_t *task, *tmp;
Line 89  sched_useTask(sched_root_task_t * __restrict root) Line 89  sched_useTask(sched_root_task_t * __restrict root)
  * @task = task   * @task = task
  * return: always is NULL   * return: always is NULL
  */   */
inline sched_task_t *sched_task_t *
 sched_unuseTask(sched_task_t * __restrict task)  sched_unuseTask(sched_task_t * __restrict task)
 {  {
         TASK_UNLOCK(task);          TASK_UNLOCK(task);
Line 129  _sched_threadWrapper(sched_task_t *t) Line 129  _sched_threadWrapper(sched_task_t *t)
 {  {
         void *ret = NULL;          void *ret = NULL;
         sem_t *s = NULL;          sem_t *s = NULL;
           sched_root_task_t *r;
   
         if (!t || !TASK_ROOT(t) || !TASK_RET(t))          if (!t || !TASK_ROOT(t) || !TASK_RET(t))
                 pthread_exit(ret);                  pthread_exit(ret);
        else        else {
                 s = (sem_t*) TASK_RET(t);                  s = (sem_t*) TASK_RET(t);
                   r = TASK_ROOT(t);
           }
   
         pthread_cleanup_push((void (*)(void*)) _sched_threadCleanup, t);          pthread_cleanup_push((void (*)(void*)) _sched_threadCleanup, t);
   
         pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);          pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
         pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);          pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_lock(&r->root_mtx[taskTHREAD]);
   #endif
           TAILQ_REMOVE(&r->root_thread, t, task_node);
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_unlock(&r->root_mtx[taskTHREAD]);
   #endif
           t->task_type = taskUNUSE;
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
   #endif
           TAILQ_INSERT_TAIL(&r->root_unuse, t, task_node);
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
   #endif
   
         /* notify parent, thread is ready for execution */          /* notify parent, thread is ready for execution */
         sem_post(s);          sem_post(s);
         pthread_testcancel();          pthread_testcancel();
Line 152  _sched_threadWrapper(sched_task_t *t) Line 171  _sched_threadWrapper(sched_task_t *t)
 }  }
 #endif  #endif
   
   #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME)
   void *
   _sched_rtcWrapper(sched_task_t *t)
   {
           void *ret = NULL;
           sched_task_func_t func;
           sched_task_t *task;
           sched_root_task_t *r;
   
           if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
                   return NULL;
           else {
                   r = TASK_ROOT(t);
                   task = (sched_task_t*) TASK_DATA(t);
                   func = TASK_FUNC(task);
           }
   
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_lock(&r->root_mtx[taskRTC]);
   #endif
           TAILQ_REMOVE(&r->root_rtc, task, task_node);
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_unlock(&r->root_mtx[taskRTC]);
   #endif
           task->task_type = taskUNUSE;
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
   #endif
           TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
   #ifdef HAVE_LIBPTHREAD
           pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
   #endif
   
           ret = func(task);
   
           timer_delete((timer_t) TASK_DATLEN(t));
           return ret;
   }
   #endif
   
 #pragma GCC visibility pop  #pragma GCC visibility pop
   
 /*  /*
Line 161  _sched_threadWrapper(sched_task_t *t) Line 220  _sched_threadWrapper(sched_task_t *t)
  * @retcode = return code   * @retcode = return code
  * return: return code   * return: return code
  */   */
inline void *void *
 sched_taskExit(sched_task_t *task, intptr_t retcode)  sched_taskExit(sched_task_t *task, intptr_t retcode)
 {  {
         if (!task || !TASK_ROOT(task))          if (!task || !TASK_ROOT(task))
Line 511  schedSignal(sched_root_task_t * __restrict root, sched Line 570  schedSignal(sched_root_task_t * __restrict root, sched
  * @func = task execution function   * @func = task execution function
  * @arg = 1st func argument   * @arg = 1st func argument
  * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!   * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!
 * @opt_data = Optional data * @opt_data = Alarm timer ID
  * @opt_dlen = Optional data length   * @opt_dlen = Optional data length
  * return: NULL error or !=NULL new queued task   * return: NULL error or !=NULL new queued task
  */   */
Line 625  schedAIO(sched_root_task_t * __restrict root, sched_ta Line 684  schedAIO(sched_root_task_t * __restrict root, sched_ta
  * @offset = Offset from start of file, if =-1 from current position   * @offset = Offset from start of file, if =-1 from current position
  * return: NULL error or !=NULL new queued task   * return: NULL error or !=NULL new queued task
  */   */
inline sched_task_t *sched_task_t *
 schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,   schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
                 void *buffer, size_t buflen, off_t offset)                  void *buffer, size_t buflen, off_t offset)
 {  {
Line 679  schedAIORead(sched_root_task_t * __restrict root, sche Line 738  schedAIORead(sched_root_task_t * __restrict root, sche
  * @offset = Offset from start of file, if =-1 from current position   * @offset = Offset from start of file, if =-1 from current position
  * return: NULL error or !=NULL new queued task   * return: NULL error or !=NULL new queued task
  */   */
inline sched_task_t *sched_task_t *
 schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,   schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
                 void *buffer, size_t buflen, off_t offset)                  void *buffer, size_t buflen, off_t offset)
 {  {
Line 860  schedLIORead(sched_root_task_t * __restrict root, sche Line 919  schedLIORead(sched_root_task_t * __restrict root, sche
  * @offset = Offset from start of file, if =-1 from current position   * @offset = Offset from start of file, if =-1 from current position
  * return: NULL error or !=NULL new queued task   * return: NULL error or !=NULL new queued task
  */   */
inline sched_task_t *sched_task_t *
 schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,   schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
                 struct iovec *bufs, size_t nbufs, off_t offset)                  struct iovec *bufs, size_t nbufs, off_t offset)
 {  {
Line 1322  schedThread(sched_root_task_t * __restrict root, sched Line 1381  schedThread(sched_root_task_t * __restrict root, sched
         return task;          return task;
 }  }
   
   /*
    * schedRTC() - Add RTC task to scheduler queue
    *
    * @root = root task
    * @func = task execution function
    * @arg = 1st func argument
    * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!
    * @opt_data = Optional RTC ID
    * @opt_dlen = Optional data length
    * return: NULL error or !=NULL new queued task
    */
   sched_task_t *
   schedRTC(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, 
                   void *opt_data, size_t opt_dlen)
   {
   #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME)
           sched_task_t *task;
           void *ptr;
   
           if (!root || !func)
                   return NULL;
   
           /* get new task */
           if (!(task = sched_useTask(root)))
                   return NULL;
   
           task->task_func = func;
           TASK_TYPE(task) = taskRTC;
           TASK_ROOT(task) = root;
   
           TASK_ARG(task) = arg;
           TASK_TS(task) = ts;
   
           TASK_DATA(task) = opt_data;
           TASK_DATLEN(task) = opt_dlen;
   
           if (root->root_hooks.hook_add.rtc)
                   ptr = root->root_hooks.hook_add.rtc(task, NULL);
           else
                   ptr = NULL;
   
           if (!ptr) {
   #ifdef HAVE_LIBPTHREAD
                   pthread_mutex_lock(&root->root_mtx[taskRTC]);
   #endif
                   TAILQ_INSERT_TAIL(&root->root_rtc, TASK_ID(task), task_node);
   #ifdef HAVE_LIBPTHREAD
                   pthread_mutex_unlock(&root->root_mtx[taskRTC]);
   #endif
           } else
                   task = sched_unuseTask(task);
   
           return task;
   #else
           sched_SetErr(ENOTSUP, "Not supported realtime clock extensions");
           return NULL;
   #endif
   }

Removed from v.1.14.2.1  
changed lines
  Added in v.1.19.2.1


FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>