--- libaitio/src/bpf.c 2013/06/25 08:45:50 1.1.2.7 +++ libaitio/src/bpf.c 2013/06/25 10:58:22 1.1.2.11 @@ -97,11 +97,19 @@ allocZCbuf(u_int len) int io_etherOpen(const char *csIface, int flags, int whdr, u_int *buflen, void **zcbuf) { - int n = 1, eth = -1; + int eth = -1; register int i; char szStr[STRSIZ]; struct ifreq ifr; + u_int n = 1; +#ifndef __FreeBSD__ + if (zcbuf) { + io_SetErr(ENOTSUP, "bpf zero copy buffer mode is not supported"); + return -1; + } +#endif + for (i = 0; i < BPF_DEV_MAX; i++) { memset(szStr, 0, sizeof szStr); snprintf(szStr, sizeof szStr, "/dev/bpf%d", i); @@ -124,45 +132,56 @@ io_etherOpen(const char *csIface, int flags, int whdr, close(eth); return -1; } - if (buflen && *buflen) { - if (!zcbuf) { - if (ioctl(eth, BIOCSBLEN, buflen) == -1) { - LOGERR; - close(eth); - return -1; - } - } else { + + if (!zcbuf) { + if (ioctl(eth, BIOCGBLEN, &n) == -1) { + LOGERR; + close(eth); + return -1; + } else + n = (buflen && *buflen) ? MIN(n, *buflen) : n; + if (ioctl(eth, BIOCSBLEN, &n) == -1) { + LOGERR; + close(eth); + return -1; + } + if (buflen && *buflen) + *buflen = n; + } else { #ifdef __FreeBSD__ - n = BPF_BUFMODE_ZBUF; - if (ioctl(eth, BIOCSETBUFMODE, &n) == -1) { - LOGERR; - close(eth); - return -1; - } - if (ioctl(eth, BIOCGETZMAX, &n) == -1) { - LOGERR; - close(eth); - return -1; - } else - *buflen = MIN(n, *buflen); - if (!(*zcbuf = allocZCbuf(*buflen))) { - close(eth); - return -1; - } - if (ioctl(eth, BIOCSETZBUF, (struct bpf_zbuf*) *zcbuf) == -1) { - LOGERR; - io_etherClose(eth, zcbuf); - return -1; - } -#endif + n = BPF_BUFMODE_ZBUF; + if (ioctl(eth, BIOCSETBUFMODE, &n) == -1) { + LOGERR; + close(eth); + return -1; } + if (ioctl(eth, BIOCGETZMAX, &n) == -1) { + LOGERR; + close(eth); + return -1; + } else + n = (buflen && *buflen) ? MIN(n, *buflen) : n; + if (!(*zcbuf = allocZCbuf(n))) { + close(eth); + return -1; + } + if (ioctl(eth, BIOCSETZBUF, (struct bpf_zbuf*) *zcbuf) == -1) { + LOGERR; + io_etherClose(eth, zcbuf); + return -1; + } + if (buflen && *buflen) + *buflen = n; +#endif } + if (csIface) strlcpy(szStr, csIface, sizeof szStr); else if (io_get1stiface(szStr, sizeof szStr) == -1) { io_etherClose(eth, zcbuf); return -1; } + memset(&ifr, 0, sizeof ifr); strlcpy(ifr.ifr_name, szStr, sizeof ifr.ifr_name); if (ioctl(eth, BIOCSETIF, &ifr) == -1) { LOGERR; @@ -198,6 +217,19 @@ io_etherSend(int eth, const void *buf, size_t buflen) } #ifdef __FreeBSD__ +static inline void +ackZCbuf(struct bpf_zbuf_header * __restrict bzh) +{ + atomic_store_rel_int(&bzh->bzh_user_gen, bzh->bzh_kernel_gen); +} + +static inline int +chkZCbuf(struct bpf_zbuf_header * __restrict bzh) +{ + /* return true if userspace owns buffer, and false otherwise. */ + return (bzh->bzh_user_gen != atomic_load_acq_int(&bzh->bzh_kernel_gen)); +} + static inline ssize_t nextZCbuf(void ** __restrict zcache, struct bpf_zbuf * __restrict zbuf, const void * __restrict buf) @@ -207,20 +239,24 @@ nextZCbuf(void ** __restrict zcache, struct bpf_zbuf * if (!*zcache || *zcache == zbuf->bz_bufb) { bzh = (struct bpf_zbuf_header *) zbuf->bz_bufa; - if (bzh->bzh_user_gen != atomic_load_acq_int(&bzh->bzh_kernel_gen)) { + if (chkZCbuf(bzh)) { rlen = atomic_load_acq_int(&bzh->bzh_kernel_len); *zcache = zbuf->bz_bufa; if (buf) buf = ((caddr_t) *zcache) + sizeof(struct bpf_zbuf_header); - } + ackZCbuf(bzh); + } else + io_SetErr(EAGAIN, "kernel owns the buffer"); } else if (*zcache == zbuf->bz_bufa) { bzh = (struct bpf_zbuf_header *) zbuf->bz_bufb; - if (bzh->bzh_user_gen != atomic_load_acq_int(&bzh->bzh_kernel_gen)) { + if (chkZCbuf(bzh)) { rlen = atomic_load_acq_int(&bzh->bzh_kernel_len); *zcache = zbuf->bz_bufb; if (buf) buf = ((caddr_t) *zcache) + sizeof(struct bpf_zbuf_header); - } + ackZCbuf(bzh); + } else + io_SetErr(EAGAIN, "kernel owns the buffer"); } return rlen;