--- libaitrpc/src/srv.c 2012/05/17 15:18:48 1.9.2.24 +++ libaitrpc/src/srv.c 2012/11/16 10:20:55 1.12.2.3 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: srv.c,v 1.9.2.24 2012/05/17 15:18:48 misho Exp $ +* $Id: srv.c,v 1.12.2.3 2012/11/16 10:20:55 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -46,7 +46,103 @@ SUCH DAMAGE. #include "global.h" +/* SOCK_STREAM */ +static void *acceptClients(sched_task_t *); +static void *closeClient(sched_task_t *); +static void *rxPacket(sched_task_t *); +static void *txPacket(sched_task_t *); + +/* SOCK_DGRAM */ +static void *connectClients(sched_task_t *); +static void *disconnectClient(sched_task_t *); + +/* SOCK_RAW */ + +static sched_task_func_t cbProto[SOCK_RAW + 1][4] = { + { acceptClients, closeClient, rxPacket, txPacket }, /* SOCK_STREAM */ + { acceptClients, closeClient, rxPacket, txPacket }, /* SOCK_STREAM */ + { connectClients, disconnectClient, NULL, NULL }, + { NULL, NULL, NULL, NULL } /* SOCK_RAW */ +}; + + +static rpc_cli_t * +_allocClient(rpc_srv_t * __restrict srv) +{ + rpc_cli_t *c = NULL; + register int i; + + /* check free slots for connect */ + for (i = 0; i < io_arraySize(srv->srv_clients) && + (c = io_array(srv->srv_clients, i, rpc_cli_t*)); i++); + if (c) /* no more free slots! */ + return NULL; + + c = io_malloc(sizeof(rpc_cli_t)); + if (!c) { + LOGERR; + srv->srv_kill = 1; + return NULL; + } else { + memset(c, 0, sizeof(rpc_cli_t)); + io_arraySet(srv->srv_clients, i, c); + c->cli_id = i; + c->cli_parent = srv; + } + + /* alloc empty buffer */ + AIT_SET_BUF2(&c->cli_buf, 0, srv->srv_netbuf); + + return c; +} + + static void * +connectClients(sched_task_t *task) +{ + rpc_srv_t *srv = TASK_ARG(task); + rpc_cli_t *c = NULL; + socklen_t salen = sizeof(io_sockaddr_t); + + c = _allocClient(srv); + if (!c) + goto end; + + /* accept client */ + c->cli_sock = accept(TASK_FD(task), &c->cli_sa.sa, &salen); + if (c->cli_sock == -1) { + LOGERR; + AIT_FREE_VAL(&c->cli_buf); + io_arrayDel(srv->srv_clients, c->cli_id, 42); + goto end; + } else + fcntl(c->cli_sock, F_SETFL, fcntl(c->cli_sock, F_GETFL) | O_NONBLOCK); + + schedRead(TASK_ROOT(task), cbProto[srv->srv_proto][CB_RXPACKET], c, + c->cli_sock, NULL, 0); +end: + schedReadSelf(task); + return NULL; +} + +static void * +disconnectClient(sched_task_t *task) +{ + rpc_cli_t *c = TASK_ARG(task); + rpc_srv_t *s = c->cli_parent; + + schedCancelby(TASK_ROOT(task), taskMAX, CRITERIA_ARG, TASK_ARG(task), NULL); + + /* free buffer */ + AIT_FREE_VAL(&c->cli_buf); + + io_arrayDel(s->srv_clients, c->cli_id, 0); + if (c) + io_free(c); + return NULL; +} + +static void * closeClient(sched_task_t *task) { rpc_cli_t *c = TASK_ARG(task); @@ -62,7 +158,9 @@ closeClient(sched_task_t *task) /* free buffer */ AIT_FREE_VAL(&c->cli_buf); - io_arrayDel(s->srv_clients, c->cli_id, 42); + io_arrayDel(s->srv_clients, c->cli_id, 0); + if (c) + io_free(c); return NULL; } @@ -87,11 +185,11 @@ txPacket(sched_task_t *task) rpc->call_rep.ret = RPC_ERROR(-1); rpc->call_rep.eno = RPC_ERROR(rpc_Errno); } else { - rpc->call_argc = htons(io_arraySize(f->func_vars)); + rpc->call_argc = htons(io_arraySize(RPC_RETVARS(c))); /* Go Encapsulate variables */ - ret = io_vars2buffer(buf + wlen, sizeof buf - wlen, f->func_vars); + ret = io_vars2buffer(buf + wlen, sizeof buf - wlen, RPC_RETVARS(c)); /* Free return values */ - io_clrVars(f->func_vars); + io_freeVars(&c->cli_vars); if (ret == -1) { rpc_SetErr(EBADRPC, "Prepare RPC packet failed"); rpc->call_argc ^= rpc->call_argc; @@ -112,7 +210,8 @@ txPacket(sched_task_t *task) ret = send(TASK_FD(task), buf, wlen, MSG_NOSIGNAL); if (ret == -1 || ret != wlen) { /* close connection */ - schedEvent(TASK_ROOT(task), closeClient, c, 42, NULL, 0); + schedEvent(TASK_ROOT(task), cbProto[s->srv_proto][CB_CLOSECLIENT], + c, 42, NULL, 0); } return NULL; @@ -133,7 +232,7 @@ execCall(sched_task_t *task) if (argc) { arr = io_buffer2vars(buf + sizeof(struct tagRPCCall), AIT_LEN(&c->cli_buf) - TASK_VAL(task) - sizeof(struct tagRPCCall), - argc, 1); + argc, 42); if (!arr) { rpc_SetErr(ERPCMISMATCH, "#%d - %s", io_GetErrno(), io_GetError()); rpc->call_argc ^= rpc->call_argc; @@ -151,8 +250,8 @@ execCall(sched_task_t *task) rpc->call_rep.eno = RPC_ERROR(rpc_Errno); } else { /* if client doesn't want reply */ - argc = rpc->call_req.flags & RPC_NOREPLY; - rpc->call_rep.ret = RPC_ERROR(rpc_srv_execCall(f, rpc, arr)); + argc = RPC_CHK_NOREPLY(rpc); + rpc->call_rep.ret = RPC_ERROR(rpc_srv_execCall(c, rpc, f->func_name, arr)); if (rpc->call_rep.ret == htonl(-1)) { rpc->call_rep.eno = RPC_ERROR(errno); rpc->call_argc ^= rpc->call_argc; @@ -160,11 +259,11 @@ execCall(sched_task_t *task) rpc->call_rep.eno ^= rpc->call_rep.eno; if (argc) { /* without reply */ - io_clrVars(f->func_vars); + io_freeVars(&c->cli_vars); rpc->call_argc ^= rpc->call_argc; } else { /* reply */ - rpc->call_argc = htons(io_arraySize(f->func_vars)); + rpc->call_argc = htons(io_arraySize(RPC_RETVARS(c))); } } } @@ -183,11 +282,13 @@ rxPacket(sched_task_t *task) u_char *buf = AIT_GET_BUF(&c->cli_buf); struct tagRPCCall *rpc; - memset(buf, 0, AIT_LEN(&c->cli_buf)); + if (!off) + memset(buf, 0, AIT_LEN(&c->cli_buf)); rlen = recv(TASK_FD(task), buf + off, AIT_LEN(&c->cli_buf) - off, 0); if (rlen < 1) { /* close connection */ - schedEvent(TASK_ROOT(task), closeClient, c, 42, NULL, 0); + schedEvent(TASK_ROOT(task), cbProto[s->srv_proto][CB_CLOSECLIENT], + c, 42, NULL, 0); return NULL; } else { rlen += off; /* add reminded bytes from previous rxPacket, if exists! */ @@ -227,7 +328,7 @@ rxPacket(sched_task_t *task) continue; } - noreply = rpc->call_req.flags & RPC_NOREPLY; + noreply = RPC_CHK_NOREPLY(rpc); /* check RPC packet session info */ if (rpc_chkPktSession(&rpc->call_session, &s->srv_session)) { @@ -242,7 +343,8 @@ rxPacket(sched_task_t *task) /* send RPC reply */ if (!noreply) - schedWrite(TASK_ROOT(task), txPacket, TASK_ARG(task), TASK_FD(task), rpc, len); + schedWrite(TASK_ROOT(task), cbProto[s->srv_proto][CB_TXPACKET], + TASK_ARG(task), TASK_FD(task), rpc, len); off += len; } while (rlen > 0); @@ -257,40 +359,24 @@ acceptClients(sched_task_t *task) { rpc_srv_t *srv = TASK_ARG(task); rpc_cli_t *c = NULL; - register int i; socklen_t salen = sizeof(io_sockaddr_t); - /* check free slots for connect */ - for (i = 0; i < io_arraySize(srv->srv_clients) && - (c = io_array(srv->srv_clients, i, rpc_cli_t*)); i++); - if (c) /* no more free slots! */ + c = _allocClient(srv); + if (!c) goto end; - c = malloc(sizeof(rpc_cli_t)); - if (!c) { - LOGERR; - srv->srv_kill = 1; - return NULL; - } else { - memset(c, 0, sizeof(rpc_cli_t)); - io_arraySet(srv->srv_clients, i, c); - c->cli_id = i; - c->cli_parent = srv; - } - /* alloc empty buffer */ - AIT_SET_BUF2(&c->cli_buf, 0, srv->srv_netbuf); - /* accept client */ c->cli_sock = accept(TASK_FD(task), &c->cli_sa.sa, &salen); if (c->cli_sock == -1) { LOGERR; AIT_FREE_VAL(&c->cli_buf); - io_arrayDel(srv->srv_clients, i, 42); + io_arrayDel(srv->srv_clients, c->cli_id, 42); goto end; } else fcntl(c->cli_sock, F_SETFL, fcntl(c->cli_sock, F_GETFL) | O_NONBLOCK); - schedRead(TASK_ROOT(task), rxPacket, c, c->cli_sock, NULL, 0); + schedRead(TASK_ROOT(task), cbProto[srv->srv_proto][CB_RXPACKET], c, + c->cli_sock, NULL, 0); end: schedReadSelf(task); return NULL; @@ -314,7 +400,9 @@ closeBLOBClient(sched_task_t *task) /* free buffer */ AIT_FREE_VAL(&c->cli_buf); - io_arrayDel(s->srv_blob.clients, c->cli_id, 42); + io_arrayDel(s->srv_blob.clients, c->cli_id, 0); + if (c) + io_free(c); return NULL; } @@ -417,7 +505,7 @@ rxBLOB(sched_task_t *task) } break; case unset: - if (rpc_srv_unregisterBLOB(s, blob.hdr_var) == -1) { + if (rpc_srv_unregisterBLOB(s, ntohl(blob.hdr_var)) == -1) { blob.hdr_cmd = error; blob.hdr_ret = RPC_ERROR(-1); } @@ -442,13 +530,16 @@ acceptBLOBClients(sched_task_t *task) rpc_cli_t *c = NULL; register int i; socklen_t salen = sizeof(io_sockaddr_t); +#ifdef TCP_NOPUSH + int n = 1; +#endif /* check free slots for connect */ for (i = 0; i < io_arraySize(srv->srv_blob.clients) && (c = io_array(srv->srv_blob.clients, i, rpc_cli_t*)); i++); if (c) /* no more free slots! */ goto end; - c = malloc(sizeof(rpc_cli_t)); + c = io_malloc(sizeof(rpc_cli_t)); if (!c) { LOGERR; srv->srv_kill = srv->srv_blob.kill = 1; @@ -470,8 +561,12 @@ acceptBLOBClients(sched_task_t *task) AIT_FREE_VAL(&c->cli_buf); io_arrayDel(srv->srv_blob.clients, i, 42); goto end; - } else + } else { +#ifdef TCP_NOPUSH + setsockopt(c->cli_sock, IPPROTO_TCP, TCP_NOPUSH, &n, sizeof n); +#endif fcntl(c->cli_sock, F_SETFL, fcntl(c->cli_sock, F_GETFL) | O_NONBLOCK); + } schedRead(TASK_ROOT(task), rxBLOB, c, c->cli_sock, NULL, 0); end: @@ -592,47 +687,13 @@ rpc_srv_initBLOBServer(rpc_srv_t * __restrict srv, u_s * @srv = RPC Server instance * return: none */ -void +inline void rpc_srv_endBLOBServer(rpc_srv_t * __restrict srv) { - rpc_cli_t *c; - register int i; - rpc_blob_t *b, *tmp; - if (!srv) return; - if (srv->srv_blob.tid) { - pthread_cancel(srv->srv_blob.tid); - pthread_join(srv->srv_blob.tid, NULL); - } - - /* close all clients connections & server socket */ - for (i = 0; i < io_arraySize(srv->srv_blob.clients); i++) { - c = io_array(srv->srv_blob.clients, i, rpc_cli_t*); - if (c) { - shutdown(c->cli_sock, SHUT_RDWR); - close(c->cli_sock); - - schedCancelby(srv->srv_blob.root, taskMAX, CRITERIA_ARG, c, NULL); - AIT_FREE_VAL(&c->cli_buf); - } - io_arrayDel(srv->srv_blob.clients, i, 42); - } - io_arrayDestroy(&srv->srv_blob.clients); - - close(srv->srv_blob.server.cli_sock); - - /* detach blobs */ - TAILQ_FOREACH_SAFE(b, &srv->srv_blob.blobs, blob_node, tmp) { - TAILQ_REMOVE(&srv->srv_blob.blobs, b, blob_node); - - rpc_srv_blobFree(srv, b); - free(b); - } - - schedEnd(&srv->srv_blob.root); - AIT_FREE_VAL(&srv->srv_blob.dir); + srv->srv_blob.kill = 1; } /* @@ -644,6 +705,11 @@ rpc_srv_endBLOBServer(rpc_srv_t * __restrict srv) int rpc_srv_loopBLOBServer(rpc_srv_t * __restrict srv) { + rpc_cli_t *c; + register int i; + rpc_blob_t *b, *tmp; + struct timespec ts = { RPC_SCHED_POLLING, 0 }; + if (!srv || srv->srv_kill) { rpc_SetErr(EINVAL, "Invalid parameter can`t start BLOB server"); return -1; @@ -663,8 +729,36 @@ rpc_srv_loopBLOBServer(rpc_srv_t * __restrict srv) return -1; } + schedPolling(srv->srv_blob.root, &ts, NULL); /* main rpc loop */ schedRun(srv->srv_blob.root, &srv->srv_blob.kill); + + /* close all clients connections & server socket */ + for (i = 0; i < io_arraySize(srv->srv_blob.clients); i++) { + c = io_array(srv->srv_blob.clients, i, rpc_cli_t*); + if (c) { + shutdown(c->cli_sock, SHUT_RDWR); + close(c->cli_sock); + + schedCancelby(srv->srv_blob.root, taskMAX, CRITERIA_ARG, c, NULL); + AIT_FREE_VAL(&c->cli_buf); + } + io_arrayDel(srv->srv_blob.clients, i, 42); + } + io_arrayDestroy(&srv->srv_blob.clients); + + close(srv->srv_blob.server.cli_sock); + + /* detach blobs */ + TAILQ_FOREACH_SAFE(b, &srv->srv_blob.blobs, blob_node, tmp) { + TAILQ_REMOVE(&srv->srv_blob.blobs, b, blob_node); + + rpc_srv_blobFree(srv, b); + io_free(b); + } + + schedEnd(&srv->srv_blob.root); + AIT_FREE_VAL(&srv->srv_blob.dir); return 0; } @@ -678,20 +772,23 @@ rpc_srv_loopBLOBServer(rpc_srv_t * __restrict srv) * @netBuf = Network buffer length (min:512 bytes), if =0 == BUFSIZ (also meaning max RPC packet) * @csHost = Host name or address for bind server, if NULL any address * @Port = Port for bind server, if Port == 0 default port is selected + * @proto = Protocol, if == 0 choose SOCK_STREAM * return: NULL == error or !=NULL bind and created RPC server instance */ rpc_srv_t * rpc_srv_initServer(u_int regProgID, u_char regProcID, int concurentClients, - int netBuf, const char *csHost, u_short Port) + int netBuf, const char *csHost, u_short Port, int proto) { int n = 1; rpc_srv_t *srv = NULL; - io_sockaddr_t sa; + io_sockaddr_t sa = IO_SOCKADDR_INIT; - if (!concurentClients || !regProgID) { + if (!concurentClients || !regProgID || (proto < 0 || proto > SOCK_DGRAM)) { rpc_SetErr(EINVAL, "Invalid parameters can`t init RPC server"); return NULL; } + if (!proto) + proto = SOCK_STREAM; if (!io_gethostbyname(csHost, Port, &sa)) return NULL; if (!Port) @@ -699,7 +796,7 @@ rpc_srv_initServer(u_int regProgID, u_char regProcID, if (netBuf < RPC_MIN_BUFSIZ) netBuf = BUFSIZ; else - netBuf = io_align(netBuf, 1); /* align netBuf length */ + netBuf = io_align(netBuf, 2); /* align netBuf length */ #ifdef HAVE_SRANDOMDEV srandomdev(); @@ -709,13 +806,14 @@ rpc_srv_initServer(u_int regProgID, u_char regProcID, srandom((time(&tim) ^ getpid())); #endif - srv = malloc(sizeof(rpc_srv_t)); + srv = io_malloc(sizeof(rpc_srv_t)); if (!srv) { LOGERR; return NULL; } else memset(srv, 0, sizeof(rpc_srv_t)); + srv->srv_proto = proto; srv->srv_netbuf = netBuf; srv->srv_session.sess_version = RPC_VERSION; srv->srv_session.sess_program = regProgID; @@ -724,14 +822,17 @@ rpc_srv_initServer(u_int regProgID, u_char regProcID, srv->srv_server.cli_parent = srv; memcpy(&srv->srv_server.cli_sa, &sa, sizeof srv->srv_server.cli_sa); - /* init functions list */ - TAILQ_INIT(&srv->srv_funcs); + /* init functions */ + pthread_mutex_init(&srv->srv_funcs.mtx, NULL); + SLIST_INIT(&srv->srv_funcs); + AVL_INIT(&srv->srv_funcs); /* init scheduler */ srv->srv_root = schedBegin(); if (!srv->srv_root) { rpc_SetErr(sched_GetErrno(), "%s", sched_GetError()); - free(srv); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + io_free(srv); return NULL; } @@ -740,17 +841,19 @@ rpc_srv_initServer(u_int regProgID, u_char regProcID, if (!srv->srv_clients) { rpc_SetErr(io_GetErrno(), "%s", io_GetError()); schedEnd(&srv->srv_root); - free(srv); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + io_free(srv); return NULL; } /* create server socket */ - srv->srv_server.cli_sock = socket(srv->srv_server.cli_sa.sa.sa_family, SOCK_STREAM, 0); + srv->srv_server.cli_sock = socket(srv->srv_server.cli_sa.sa.sa_family, srv->srv_proto, 0); if (srv->srv_server.cli_sock == -1) { LOGERR; io_arrayDestroy(&srv->srv_clients); schedEnd(&srv->srv_root); - free(srv); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + io_free(srv); return NULL; } if (setsockopt(srv->srv_server.cli_sock, SOL_SOCKET, SO_REUSEADDR, &n, sizeof n) == -1) { @@ -779,7 +882,8 @@ err: /* error condition */ close(srv->srv_server.cli_sock); io_arrayDestroy(&srv->srv_clients); schedEnd(&srv->srv_root); - free(srv); + pthread_mutex_destroy(&srv->srv_funcs.mtx); + io_free(srv); return NULL; } @@ -789,46 +893,21 @@ err: /* error condition */ * @psrv = RPC Server instance * return: none */ -void +inline void rpc_srv_endServer(rpc_srv_t ** __restrict psrv) { - rpc_cli_t *c; - register int i; - rpc_func_t *f, *tmp; - if (!psrv || !*psrv) return; + /* if send kill to blob server */ if (!(*psrv)->srv_blob.kill) rpc_srv_endBLOBServer(*psrv); - /* close all clients connections & server socket */ - for (i = 0; i < io_arraySize((*psrv)->srv_clients); i++) { - c = io_array((*psrv)->srv_clients, i, rpc_cli_t*); - if (c) { - shutdown(c->cli_sock, SHUT_RDWR); - close(c->cli_sock); + (*psrv)->srv_kill = 1; + sleep(RPC_SCHED_POLLING); - schedCancelby((*psrv)->srv_root, taskMAX, CRITERIA_ARG, c, NULL); - AIT_FREE_VAL(&c->cli_buf); - } - io_arrayDel((*psrv)->srv_clients, i, 42); - } - io_arrayDestroy(&(*psrv)->srv_clients); - - close((*psrv)->srv_server.cli_sock); - - /* detach exported calls */ - TAILQ_FOREACH_SAFE(f, &(*psrv)->srv_funcs, func_node, tmp) { - TAILQ_REMOVE(&(*psrv)->srv_funcs, f, func_node); - - io_freeVars(&f->func_vars); - AIT_FREE_VAL(&f->func_name); - free(f); - } - - schedEnd(&(*psrv)->srv_root); - free(*psrv); + pthread_mutex_destroy(&(*psrv)->srv_funcs.mtx); + io_free(*psrv); *psrv = NULL; } @@ -841,26 +920,62 @@ rpc_srv_endServer(rpc_srv_t ** __restrict psrv) int rpc_srv_loopServer(rpc_srv_t * __restrict srv) { + rpc_cli_t *c; + register int i; + rpc_func_t *f; + struct timespec ts = { RPC_SCHED_POLLING, 0 }; + if (!srv) { rpc_SetErr(EINVAL, "Invalid parameter can`t start RPC server"); return -1; } - fcntl(srv->srv_server.cli_sock, F_SETFL, - fcntl(srv->srv_server.cli_sock, F_GETFL) | O_NONBLOCK); - if (listen(srv->srv_server.cli_sock, io_arraySize(srv->srv_clients)) == -1) { LOGERR; return -1; - } + } else + fcntl(srv->srv_server.cli_sock, F_SETFL, + fcntl(srv->srv_server.cli_sock, F_GETFL) | O_NONBLOCK); - if (!schedRead(srv->srv_root, acceptClients, srv, srv->srv_server.cli_sock, NULL, 0)) { + if (!schedRead(srv->srv_root, cbProto[srv->srv_proto][CB_ACCEPTCLIENT], srv, + srv->srv_server.cli_sock, NULL, 0)) { rpc_SetErr(sched_GetErrno(), "%s", sched_GetError()); return -1; } + schedPolling(srv->srv_root, &ts, NULL); /* main rpc loop */ schedRun(srv->srv_root, &srv->srv_kill); + + /* close all clients connections & server socket */ + for (i = 0; i < io_arraySize(srv->srv_clients); i++) { + c = io_array(srv->srv_clients, i, rpc_cli_t*); + if (c) { + shutdown(c->cli_sock, SHUT_RDWR); + close(c->cli_sock); + + schedCancelby(srv->srv_root, taskMAX, CRITERIA_ARG, c, NULL); + io_freeVars(&RPC_RETVARS(c)); + AIT_FREE_VAL(&c->cli_buf); + } + io_arrayDel(srv->srv_clients, i, 42); + } + io_arrayDestroy(&srv->srv_clients); + + close(srv->srv_server.cli_sock); + + /* detach exported calls */ + RPC_FUNCS_LOCK(&srv->srv_funcs); + while ((f = SLIST_FIRST(&srv->srv_funcs))) { + SLIST_REMOVE_HEAD(&srv->srv_funcs, func_next); + + AIT_FREE_VAL(&f->func_name); + io_free(f); + } + srv->srv_funcs.avlh_root = NULL; + RPC_FUNCS_UNLOCK(&srv->srv_funcs); + + schedEnd(&srv->srv_root); return 0; } @@ -868,22 +983,23 @@ rpc_srv_loopServer(rpc_srv_t * __restrict srv) /* * rpc_srv_execCall() Execute registered call from RPC server * - * @call = Register RPC call + * @cli = RPC client * @rpc = IN RPC call structure + * @funcname = Execute RPC function * @args = IN RPC calling arguments from RPC client * return: -1 error, !=-1 ok */ int -rpc_srv_execCall(rpc_func_t * __restrict call, struct tagRPCCall * __restrict rpc, - array_t * __restrict args) +rpc_srv_execCall(rpc_cli_t * __restrict cli, struct tagRPCCall * __restrict rpc, + ait_val_t funcname, array_t * __restrict args) { rpc_callback_t func; - if (!call || !rpc || !call->func_parent || !AIT_ADDR(&call->func_name)) { + if (!cli || !rpc || !AIT_ADDR(&funcname)) { rpc_SetErr(EINVAL, "Invalid parameter can`t exec function"); return -1; } - func = AIT_GET_LIKE(&call->func_name, rpc_callback_t); - return func(call, rpc, args); + func = AIT_GET_LIKE(&funcname, rpc_callback_t); + return func(cli, rpc, args); }