Index: linux-2.4.20/include/linux/skbuff.h =================================================================== --- linux-2.4.20.orig/include/linux/skbuff.h 2003-05-16 05:28:45.000000000 +0400 +++ linux-2.4.20/include/linux/skbuff.h 2003-12-04 20:56:32.000000000 +0300 @@ -116,6 +116,30 @@ __u16 size; }; +/* Support for callback when skb data has been released */ +typedef struct zccd /* Zero Copy Callback Descriptor */ +{ /* (embed as first member of custom struct) */ + atomic_t zccd_count; /* reference count */ + void (*zccd_destructor)(struct zccd *); /* callback when refcount reaches zero */ +} zccd_t; + +static inline void zccd_init (zccd_t *d, void (*callback)(zccd_t *)) +{ + atomic_set (&d->zccd_count, 1); + d->zccd_destructor = callback; +} + +static inline void zccd_get (zccd_t *d) /* take a reference */ +{ + atomic_inc (&d->zccd_count); +} + +static inline void zccd_put (zccd_t *d) /* release a reference */ +{ + if (atomic_dec_and_test (&d->zccd_count)) + (d->zccd_destructor)(d); +} + /* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */ @@ -123,6 +147,12 @@ atomic_t dataref; unsigned int nr_frags; struct sk_buff *frag_list; + zccd_t *zccd; /* zero copy descriptor */ + zccd_t *zccd2; /* 2nd zero copy descriptor */ + /* NB we expect zero-copy data to be at least 1 packet, so + * having 2 zccds means we don't unneccessarily split the packet + * where consecutive zero-copy sends abutt. + */ skb_frag_t frags[MAX_SKB_FRAGS]; }; Index: linux-2.4.20/include/net/tcp.h =================================================================== --- linux-2.4.20.orig/include/net/tcp.h 2003-05-16 05:29:15.000000000 +0400 +++ linux-2.4.20/include/net/tcp.h 2003-12-04 20:56:32.000000000 +0300 @@ -638,6 +638,8 @@ extern int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size); extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); +extern ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, size_t size, + int flags, zccd_t *zccd); extern int tcp_ioctl(struct sock *sk, int cmd, @@ -731,6 +733,9 @@ struct msghdr *msg, int len, int nonblock, int flags, int *addr_len); +extern int tcp_recvpackets(struct sock *sk, + struct sk_buff_head *packets, + int len, int nonblock); extern int tcp_listen_start(struct sock *sk); Index: linux-2.4.20/net/netsyms.c =================================================================== --- linux-2.4.20.orig/net/netsyms.c 2003-05-16 05:29:15.000000000 +0400 +++ linux-2.4.20/net/netsyms.c 2003-12-04 20:56:44.000000000 +0300 @@ -408,6 +408,8 @@ #endif +EXPORT_SYMBOL(tcp_sendpage_zccd); +EXPORT_SYMBOL(tcp_recvpackets); EXPORT_SYMBOL(tcp_read_sock); EXPORT_SYMBOL(netlink_set_err); Index: linux-2.4.20/net/core/skbuff.c =================================================================== --- linux-2.4.20.orig/net/core/skbuff.c 2003-05-16 05:28:46.000000000 +0400 +++ linux-2.4.20/net/core/skbuff.c 2003-12-04 20:56:32.000000000 +0300 @@ -208,6 +208,8 @@ atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->frag_list = NULL; + skb_shinfo(skb)->zccd = NULL; /* skbuffs kick off with NO user zero copy descriptors */ + skb_shinfo(skb)->zccd2 = NULL; return skb; nodata: @@ -276,6 +278,10 @@ { if (!skb->cloned || atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) { + if (skb_shinfo(skb)->zccd != NULL) /* zero copy callback descriptor? */ + zccd_put (skb_shinfo(skb)->zccd); /* release hold */ + if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd zero copy callback descriptor? */ + zccd_put (skb_shinfo(skb)->zccd2); /* release hold */ if (skb_shinfo(skb)->nr_frags) { int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) @@ -532,6 +538,8 @@ atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->frag_list = NULL; + skb_shinfo(skb)->zccd = NULL; /* copied data => no user zero copy descriptor */ + skb_shinfo(skb)->zccd2 = NULL; /* We are no longer a clone, even if we were. */ skb->cloned = 0; @@ -578,6 +586,14 @@ n->data_len = skb->data_len; n->len = skb->len; + if (skb_shinfo(skb)->zccd != NULL) /* user zero copy descriptor? */ + zccd_get (skb_shinfo(skb)->zccd); /* 1 more ref (pages are shared) */ + skb_shinfo(n)->zccd = skb_shinfo(skb)->zccd; + + if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd user zero copy descriptor? */ + zccd_get (skb_shinfo(skb)->zccd2); /* 1 more ref (pages are shared) */ + skb_shinfo(n)->zccd2 = skb_shinfo(skb)->zccd2; + if (skb_shinfo(skb)->nr_frags) { int i; @@ -620,6 +636,8 @@ u8 *data; int size = nhead + (skb->end - skb->head) + ntail; long off; + zccd_t *zccd = skb_shinfo(skb)->zccd; /* stash user zero copy descriptor */ + zccd_t *zccd2 = skb_shinfo(skb)->zccd2; /* stash 2nd user zero copy descriptor */ if (skb_shared(skb)) BUG(); @@ -641,6 +659,11 @@ if (skb_shinfo(skb)->frag_list) skb_clone_fraglist(skb); + if (zccd != NULL) /* user zero copy descriptor? */ + zccd_get (zccd); /* extra ref (pages are shared) */ + if (zccd2 != NULL) /* 2nd user zero copy descriptor? */ + zccd_get (zccd2); /* extra ref (pages are shared) */ + skb_release_data(skb); off = (data+nhead) - skb->head; @@ -655,6 +678,8 @@ skb->nh.raw += off; skb->cloned = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); + skb_shinfo(skb)->zccd = zccd; + skb_shinfo(skb)->zccd2 = zccd2; return 0; nodata: Index: linux-2.4.20/net/ipv4/tcp.c =================================================================== --- linux-2.4.20.orig/net/ipv4/tcp.c 2003-05-16 05:29:15.000000000 +0400 +++ linux-2.4.20/net/ipv4/tcp.c 2003-12-04 20:56:32.000000000 +0300 @@ -745,7 +745,7 @@ goto out; } -ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags); +ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags, zccd_t *zccd); static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page, int off) @@ -824,7 +824,8 @@ return err; } -ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags) +/* Extra parameter: user zero copy descriptor (or NULL if not doing that) */ +ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags, zccd_t *zccd) { struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); int mss_now; @@ -872,6 +873,17 @@ copy = size; i = skb_shinfo(skb)->nr_frags; + + if (zccd != NULL && /* this is a zcc I/O */ + skb_shinfo(skb)->zccd != NULL && /* skb is part of a zcc I/O */ + skb_shinfo(skb)->zccd2 != NULL && + skb_shinfo(skb)->zccd != zccd && /* not the same one */ + skb_shinfo(skb)->zccd2 != zccd) + { + tcp_mark_push (tp, skb); + goto new_segment; + } + if (can_coalesce(skb, i, page, offset)) { skb_shinfo(skb)->frags[i-1].size += copy; } else if (i < MAX_SKB_FRAGS) { @@ -882,6 +894,20 @@ goto new_segment; } + if (zccd != NULL && /* this is a zcc I/O */ + skb_shinfo(skb)->zccd != zccd && /* not already referencing this zccd */ + skb_shinfo(skb)->zccd2 != zccd) + { + zccd_get (zccd); /* bump ref count */ + + BUG_TRAP (skb_shinfo(skb)->zccd2 == NULL); + + if (skb_shinfo(skb)->zccd == NULL) /* reference this zccd */ + skb_shinfo(skb)->zccd = zccd; + else + skb_shinfo(skb)->zccd2 = zccd; + } + skb->len += copy; skb->data_len += copy; skb->ip_summed = CHECKSUM_HW; @@ -945,7 +971,31 @@ lock_sock(sk); TCP_CHECK_TIMER(sk); - res = do_tcp_sendpages(sk, &page, offset, size, flags); + res = do_tcp_sendpages(sk, &page, offset, size, flags, NULL); + TCP_CHECK_TIMER(sk); + release_sock(sk); + return res; +} + +ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, size_t size, + int flags, zccd_t *zccd) +{ + ssize_t res; + struct sock *sk = sock->sk; + +#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) + + if (!(sk->route_caps & NETIF_F_SG) || /* caller shouldn't waste her time */ + !(sk->route_caps & TCP_ZC_CSUM_FLAGS)) /* on double mapping */ + BUG (); + +#undef TCP_ZC_CSUM_FLAGS + + lock_sock(sk); + TCP_CHECK_TIMER(sk); + + res = do_tcp_sendpages(sk, &page, offset, size, flags, zccd); + TCP_CHECK_TIMER(sk); release_sock(sk); return res; @@ -1769,6 +1819,202 @@ goto out; } +int tcp_recvpackets (struct sock *sk, struct sk_buff_head *packets, + int len, int nonblock) +{ + struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); + int copied; + long timeo; + + BUG_TRAP (len > 0); + /*BUG_TRAP ((flags & (MSG_OOB | MSG_PEEK | MSG_TRUNC)) == 0);*/ + + lock_sock(sk); + + TCP_CHECK_TIMER(sk); + + copied = -ENOTCONN; + if (sk->state == TCP_LISTEN) + goto out; + + copied = 0; + timeo = sock_rcvtimeo(sk, nonblock); + + do { + struct sk_buff * skb; + u32 offset; + unsigned long used; + int exhausted; + int eaten; + + /* Are we at urgent data? Stop if we have read anything. */ + if (copied && tp->urg_data && tp->urg_seq == tp->copied_seq) + break; + + /* We need to check signals first, to get correct SIGURG + * handling. FIXME: Need to check this doesnt impact 1003.1g + * and move it down to the bottom of the loop + */ + if (signal_pending(current)) { + if (copied) + break; + copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; + break; + } + + /* Next get a buffer. */ + + skb = skb_peek(&sk->receive_queue); + + if (skb == NULL) /* nothing ready */ + { + if (copied) { + if (sk->err || + sk->state == TCP_CLOSE || + (sk->shutdown & RCV_SHUTDOWN) || + !timeo || + (0)) + break; + } else { + if (sk->done) + break; + + if (sk->err) { + copied = sock_error(sk); + break; + } + + if (sk->shutdown & RCV_SHUTDOWN) + break; + + if (sk->state == TCP_CLOSE) { + if (!sk->done) { + /* This occurs when user tries to read + * from never connected socket. + */ + copied = -ENOTCONN; + break; + } + break; + } + + if (!timeo) { + copied = -EAGAIN; + break; + } + } + + cleanup_rbuf(sk, copied); + timeo = tcp_data_wait(sk, timeo); + continue; + } + + BUG_TRAP (atomic_read (&skb->users) == 1); + + exhausted = eaten = 0; + + offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; + if (skb->h.th->syn) + offset--; + + used = skb->len - offset; + + if (tp->urg_data) { + u32 urg_offset = tp->urg_seq - tp->copied_seq; + if (urg_offset < used) { + if (!urg_offset) { /* at urgent date */ + if (!sk->urginline) { + tp->copied_seq++; /* discard the single byte of urgent data */ + offset++; + used--; + } + } else /* truncate read */ + used = urg_offset; + } + } + + BUG_TRAP (used >= 0); + if (len < used) + used = len; + + if (used == 0) + exhausted = 1; + else + { + if (skb_is_nonlinear (skb)) + { + int rc = skb_linearize (skb, GFP_KERNEL); + + printk ("tcp_recvpackets(): linearising: %d\n", rc); + + if (rc) + { + if (!copied) + copied = rc; + break; + } + } + + if ((offset + used) == skb->len) /* consuming the whole packet */ + { + __skb_unlink (skb, &sk->receive_queue); + dst_release (skb->dst); + skb_orphan (skb); + __skb_pull (skb, offset); + __skb_queue_tail (packets, skb); + exhausted = eaten = 1; + } + else /* consuming only part of the packet */ + { + struct sk_buff *skb2 = skb_clone (skb, GFP_KERNEL); + + if (skb2 == NULL) + { + if (!copied) + copied = -ENOMEM; + break; + } + + dst_release (skb2->dst); + __skb_pull (skb2, offset); + __skb_trim (skb2, used); + __skb_queue_tail (packets, skb2); + } + + tp->copied_seq += used; + copied += used; + len -= used; + } + + if (tp->urg_data && after(tp->copied_seq,tp->urg_seq)) { + tp->urg_data = 0; + tcp_fast_path_check(sk, tp); + } + + if (!exhausted) + continue; + + if (skb->h.th->fin) + { + tp->copied_seq++; + if (!eaten) + tcp_eat_skb (sk, skb); + break; + } + + if (!eaten) + tcp_eat_skb (sk, skb); + + } while (len > 0); + + out: + /* Clean up data we have read: This will do ACK frames. */ + cleanup_rbuf(sk, copied); + TCP_CHECK_TIMER(sk); + release_sock(sk); + return copied; +} + /* * State processing on a close. This implements the state shift for * sending our FIN frame. Note that we only send a FIN for some