--- libaitrpc/src/srv.c 2013/11/15 09:05:19 1.21.2.5 +++ libaitrpc/src/srv.c 2014/11/17 23:28:55 1.23.6.1 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: srv.c,v 1.21.2.5 2013/11/15 09:05:19 misho Exp $ +* $Id: srv.c,v 1.23.6.1 2014/11/17 23:28:55 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 +Copyright 2004 - 2014 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -66,7 +66,11 @@ static sched_task_func_t cbProto[SOCK_RAW + 1][4] = { { NULL, NULL, NULL, NULL } /* SOCK_RAW */ }; +/* Global Signal Argument when kqueue support disabled */ +static volatile uintptr_t _glSigArg = 0; + + void rpc_freeCli(rpc_cli_t * __restrict c) { @@ -223,7 +227,7 @@ txPacket(sched_task_t *task) if (ret) LOGERR; else - rpc_SetErr(ETIMEDOUT, "Timeout reached! Server not respond"); + rpc_SetErr(ETIMEDOUT, "Timeout reached! Client not respond"); /* close connection */ schedEvent(TASK_ROOT(task), cbProto[s->srv_proto][CB_CLOSECLIENT], TASK_ARG(task), 0, NULL, 0); @@ -346,7 +350,7 @@ rxPacket(sched_task_t *task) if (rlen) LOGERR; else - rpc_SetErr(ETIMEDOUT, "Timeout reached! Server not respond"); + rpc_SetErr(ETIMEDOUT, "Timeout reached! Client not respond"); schedEvent(TASK_ROOT(task), cbProto[s->srv_proto][CB_CLOSECLIENT], TASK_ARG(task), 0, NULL, 0); return NULL; @@ -502,7 +506,7 @@ txUDPPacket(sched_task_t *task) if (ret) LOGERR; else - rpc_SetErr(ETIMEDOUT, "Timeout reached! Server not respond"); + rpc_SetErr(ETIMEDOUT, "Timeout reached! Client not respond"); /* close connection */ schedEvent(TASK_ROOT(task), cbProto[s->srv_proto][CB_CLOSECLIENT], TASK_ARG(task), 0, NULL, 0); @@ -575,7 +579,7 @@ rxUDPPacket(sched_task_t *task) if (rlen) LOGERR; else - rpc_SetErr(ETIMEDOUT, "Timeout reached! Server not respond"); + rpc_SetErr(ETIMEDOUT, "Timeout reached! Client not respond"); schedEvent(TASK_ROOT(task), cbProto[srv->srv_proto][CB_CLOSECLIENT], c, 0, NULL, 0); return NULL; @@ -760,7 +764,8 @@ end: static void * flushBLOB(sched_task_t *task) { - rpc_srv_t *srv = TASK_ARG(task); + uintptr_t sigArg = atomic_load_acq_ptr(&_glSigArg); + rpc_srv_t *srv = sigArg ? (void*) sigArg : TASK_ARG(task); rpc_blob_t *b, *tmp; TAILQ_FOREACH_SAFE(b, &srv->srv_blob.blobs, blob_node, tmp) { @@ -770,7 +775,17 @@ flushBLOB(sched_task_t *task) e_free(b); } - schedSignalSelf(task); + if (!schedSignalSelf(task)) { + /* disabled kqueue support in libaitsched */ + struct sigaction sa; + + memset(&sa, 0, sizeof sa); + sigemptyset(&sa.sa_mask); + sa.sa_handler = (void (*)(int)) flushBLOB; + sa.sa_flags = SA_RESTART | SA_RESETHAND; + sigaction(SIGFBLOB, &sa, NULL); + } + return NULL; } @@ -984,7 +999,19 @@ rpc_srv_loopBLOBServer(rpc_srv_t * __restrict srv) return -1; } - schedSignal(srv->srv_blob.root, flushBLOB, srv, SIGFBLOB, NULL, 0); + if (!schedSignal(srv->srv_blob.root, flushBLOB, srv, SIGFBLOB, NULL, 0)) { + /* disabled kqueue support in libaitsched */ + struct sigaction sa; + + atomic_store_rel_ptr(&_glSigArg, (uintptr_t) srv); + + memset(&sa, 0, sizeof sa); + sigemptyset(&sa.sa_mask); + sa.sa_handler = (void (*)(int)) flushBLOB; + sa.sa_flags = SA_RESTART | SA_RESETHAND; + sigaction(SIGFBLOB, &sa, NULL); + } + if (!schedRead(srv->srv_blob.root, acceptBLOBClients, srv, srv->srv_blob.server.cli_sock, NULL, 0)) { rpc_SetErr(sched_GetErrno(), "%s", sched_GetError()); @@ -1263,3 +1290,136 @@ rpc_srv_execCall(rpc_cli_t * __restrict cli, struct ta func = AIT_GET_LIKE(&funcname, rpc_callback_t); return func(cli, rpc, args); } + + +/* + * rpc_srv_initServer2() - Init & create layer2 RPC Server + * + * @InstID = Instance for authentication & recognition + * @concurentClients = Concurent clients at same time to this server + * @netBuf = Network buffer length (min:512 bytes), if =0 == BUFSIZ (also meaning max RPC packet) + * @csIface = Interface name for bind server, if NULL first interface on host + * @protoNum = Protocol ethernet number for bind server, if Port == 0 default port is selected + * return: NULL == error or !=NULL bind and created RPC server instance + */ +rpc_srv_t * +rpc_srv_initServer2(u_char InstID, int concurentClients, int netBuf, + const char *csIface, u_short protoNum) +{ + int n = 1; + rpc_srv_t *srv = NULL; + sockaddr_t sa = E_SOCKADDR_INIT; + char szIface[64], szStr[STRSIZ]; + register int i; + struct ifreq ifr; + + if (!concurentClients) { + rpc_SetErr(EINVAL, "Invalid parameters can`t init RPC server"); + return NULL; + } + if (!csIface) { + if (e_get1stiface(szIface, sizeof szIface)) + return NULL; + } else + strlcpy(szIface, csIface, sizeof szIface); + if (e_getifacebyname(szIface, &sa)) + return NULL; + if (!protoNum) + protoNum = RPC_DEFPORT; + +#ifdef HAVE_SRANDOMDEV + srandomdev(); +#else + time_t tim; + + srandom((time(&tim) ^ getpid())); +#endif + + srv = e_malloc(sizeof(rpc_srv_t)); + if (!srv) { + LOGERR; + return NULL; + } else + memset(srv, 0, sizeof(rpc_srv_t)); + + srv->srv_proto = protoNum; + srv->srv_netbuf = netBuf; + srv->srv_session.sess_version = RPC_VERSION; + srv->srv_session.sess_instance = InstID; + + srv->srv_server.cli_parent = srv; + memcpy(&srv->srv_server.cli_sa, &sa, sizeof srv->srv_server.cli_sa); + + /* init functions */ + pthread_mutex_init(&srv->srv_funcs.mtx, NULL); + SLIST_INIT(&srv->srv_funcs); + AVL_INIT(&srv->srv_funcs); + + /* init scheduler */ + srv->srv_root = schedBegin(); + if (!srv->srv_root) { + rpc_SetErr(sched_GetErrno(), "%s", sched_GetError()); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + e_free(srv); + return NULL; + } + + /* init pool for clients */ + srv->srv_clients = array_Init(concurentClients); + if (!srv->srv_clients) { + rpc_SetErr(elwix_GetErrno(), "%s", elwix_GetError()); + schedEnd(&srv->srv_root); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + e_free(srv); + return NULL; + } + + /* create server handler */ + for (i = 0; i < 10; i++) { + memset(szStr, 0, sizeof szStr); + snprintf(szStr, sizeof szStr, "/dev/bpf%d", i); + srv->srv_server.cli_sock = open(szStr, O_RDWR); + if (srv->srv_server.cli_sock > STDERR_FILENO) + break; + } + if (srv->srv_server.cli_sock < 3) { + LOGERR; + array_Destroy(&srv->srv_clients); + schedEnd(&srv->srv_root); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + e_free(srv); + return NULL; + } + + if (ioctl(srv->srv_server.cli_sock, BIOCIMMEDIATE, &n) == -1) { + LOGERR; + goto err; + } + n = (netBuf < RPC_MIN_BUFSIZ) ? getpagesize() : E_ALIGN(netBuf, 2); + if (ioctl(srv->srv_server.cli_sock, BIOCSBLEN, &n) == -1) { + LOGERR; + goto err; + } else + srv->srv_netbuf = n; + + memset(&ifr, 0, sizeof ifr); + strlcpy(ifr.ifr_name, szIface, sizeof ifr.ifr_name); + if (ioctl(srv->srv_server.cli_sock, BIOCSETIF, &ifr) == -1) { + LOGERR; + goto err; + } else + fcntl(srv->srv_server.cli_sock, F_SETFL, + fcntl(srv->srv_server.cli_sock, F_GETFL) | O_NONBLOCK); + + rpc_register_srvPing(srv); + + return srv; +err: /* error condition */ + close(srv->srv_server.cli_sock); + array_Destroy(&srv->srv_clients); + schedEnd(&srv->srv_root); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + e_free(srv); + return NULL; +} +