--- libaitio/src/bpf.c 2013/06/25 08:18:57 1.1.2.6 +++ libaitio/src/bpf.c 2013/06/25 08:53:22 1.1.2.8 @@ -198,23 +198,43 @@ io_etherSend(int eth, const void *buf, size_t buflen) } #ifdef __FreeBSD__ +static inline void +ackZCbuf(struct bpf_zbuf_header * __restrict bzh) +{ + atomic_store_rel_int(&bzh->bzh_user_gen, bzh->bzh_kernel_gen); +} + +static inline int +chkZCbuf(struct bpf_zbuf_header * __restrict bzh) +{ + /* return true if userspace owns buffer, and false otherwise. */ + return (bzh->bzh_user_gen != atomic_load_acq_int(&bzh->bzh_kernel_gen)); +} + static inline ssize_t -nextZCbuf(void ** __restrict zcache, struct bpf_zbuf * __restrict zbuf) +nextZCbuf(void ** __restrict zcache, struct bpf_zbuf * __restrict zbuf, + const void * __restrict buf) { ssize_t rlen = -1; struct bpf_zbuf_header *bzh; if (!*zcache || *zcache == zbuf->bz_bufb) { bzh = (struct bpf_zbuf_header *) zbuf->bz_bufa; - if (bzh->bzh_user_gen != atomic_load_acq_int(&bzh->bzh_kernel_gen)) { + if (chkZCbuf(bzh)) { rlen = atomic_load_acq_int(&bzh->bzh_kernel_len); *zcache = zbuf->bz_bufa; + if (buf) + buf = ((caddr_t) *zcache) + sizeof(struct bpf_zbuf_header); + ackZCbuf(bzh); } } else if (*zcache == zbuf->bz_bufa) { bzh = (struct bpf_zbuf_header *) zbuf->bz_bufb; - if (bzh->bzh_user_gen != atomic_load_acq_int(&bzh->bzh_kernel_gen)) { + if (chkZCbuf(bzh)) { rlen = atomic_load_acq_int(&bzh->bzh_kernel_len); *zcache = zbuf->bz_bufb; + if (buf) + buf = ((caddr_t) *zcache) + sizeof(struct bpf_zbuf_header); + ackZCbuf(bzh); } } @@ -248,8 +268,7 @@ io_etherRecv(int eth, void * __restrict buf, size_t bu LOGERR; } else { #ifdef __FreeBSD__ - rlen = nextZCbuf(zcache, (struct bpf_zbuf*) zcbuf); - buf = *zcache; + rlen = nextZCbuf(zcache, (struct bpf_zbuf*) zcbuf, buf); #else io_SetErr(ENOTSUP, "bpf zero copy buffer mode is not supported"); #endif