1 Index: linux-2.4.19-pre1/include/linux/skbuff.h
2 ===================================================================
3 --- linux-2.4.19-pre1.orig/include/linux/skbuff.h 2001-11-22 22:46:26.000000000 +0300
4 +++ linux-2.4.19-pre1/include/linux/skbuff.h 2004-01-14 01:15:13.000000000 +0300
9 +/* Support for callback when skb data has been released */
10 +typedef struct zccd /* Zero Copy Callback Descriptor */
11 +{ /* (embed as first member of custom struct) */
12 + atomic_t zccd_count; /* reference count */
13 + void (*zccd_destructor)(struct zccd *); /* callback when refcount reaches zero */
16 +static inline void zccd_init (zccd_t *d, void (*callback)(zccd_t *))
18 + atomic_set (&d->zccd_count, 1);
19 + d->zccd_destructor = callback;
22 +static inline void zccd_get (zccd_t *d) /* take a reference */
24 + atomic_inc (&d->zccd_count);
27 +static inline void zccd_put (zccd_t *d) /* release a reference */
29 + if (atomic_dec_and_test (&d->zccd_count))
30 + (d->zccd_destructor)(d);
33 /* This data is invariant across clones and lives at
34 * the end of the header data, ie. at skb->end.
38 unsigned int nr_frags;
39 struct sk_buff *frag_list;
40 + zccd_t *zccd; /* zero copy descriptor */
41 + zccd_t *zccd2; /* 2nd zero copy descriptor */
42 + /* NB we expect zero-copy data to be at least 1 packet, so
43 + * having 2 zccds means we don't unneccessarily split the packet
44 + * where consecutive zero-copy sends abutt.
46 skb_frag_t frags[MAX_SKB_FRAGS];
49 Index: linux-2.4.19-pre1/include/net/tcp.h
50 ===================================================================
51 --- linux-2.4.19-pre1.orig/include/net/tcp.h 2001-11-22 22:47:22.000000000 +0300
52 +++ linux-2.4.19-pre1/include/net/tcp.h 2004-01-14 01:15:13.000000000 +0300
55 extern int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
56 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
57 +extern ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, size_t size,
58 + int flags, zccd_t *zccd);
60 extern int tcp_ioctl(struct sock *sk,
64 int len, int nonblock,
65 int flags, int *addr_len);
66 +extern int tcp_recvpackets(struct sock *sk,
67 + struct sk_buff_head *packets,
68 + int len, int nonblock);
70 extern int tcp_listen_start(struct sock *sk);
72 Index: linux-2.4.19-pre1/net/netsyms.c
73 ===================================================================
74 --- linux-2.4.19-pre1.orig/net/netsyms.c 2004-01-14 01:10:37.000000000 +0300
75 +++ linux-2.4.19-pre1/net/netsyms.c 2004-01-14 01:15:54.000000000 +0300
80 +EXPORT_SYMBOL(tcp_sendpage_zccd);
81 +EXPORT_SYMBOL(tcp_recvpackets);
83 EXPORT_SYMBOL(netlink_set_err);
84 EXPORT_SYMBOL(netlink_broadcast);
85 EXPORT_SYMBOL(netlink_unicast);
86 Index: linux-2.4.19-pre1/net/core/skbuff.c
87 ===================================================================
88 --- linux-2.4.19-pre1.orig/net/core/skbuff.c 2001-12-21 20:42:05.000000000 +0300
89 +++ linux-2.4.19-pre1/net/core/skbuff.c 2004-01-14 01:15:13.000000000 +0300
91 atomic_set(&(skb_shinfo(skb)->dataref), 1);
92 skb_shinfo(skb)->nr_frags = 0;
93 skb_shinfo(skb)->frag_list = NULL;
94 + skb_shinfo(skb)->zccd = NULL; /* skbuffs kick off with NO user zero copy descriptors */
95 + skb_shinfo(skb)->zccd2 = NULL;
102 atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
103 + if (skb_shinfo(skb)->zccd != NULL) /* zero copy callback descriptor? */
104 + zccd_put (skb_shinfo(skb)->zccd); /* release hold */
105 + if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd zero copy callback descriptor? */
106 + zccd_put (skb_shinfo(skb)->zccd2); /* release hold */
107 if (skb_shinfo(skb)->nr_frags) {
109 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
111 atomic_set(&(skb_shinfo(skb)->dataref), 1);
112 skb_shinfo(skb)->nr_frags = 0;
113 skb_shinfo(skb)->frag_list = NULL;
114 + skb_shinfo(skb)->zccd = NULL; /* copied data => no user zero copy descriptor */
115 + skb_shinfo(skb)->zccd2 = NULL;
117 /* We are no longer a clone, even if we were. */
120 n->data_len = skb->data_len;
123 + if (skb_shinfo(skb)->zccd != NULL) /* user zero copy descriptor? */
124 + zccd_get (skb_shinfo(skb)->zccd); /* 1 more ref (pages are shared) */
125 + skb_shinfo(n)->zccd = skb_shinfo(skb)->zccd;
127 + if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd user zero copy descriptor? */
128 + zccd_get (skb_shinfo(skb)->zccd2); /* 1 more ref (pages are shared) */
129 + skb_shinfo(n)->zccd2 = skb_shinfo(skb)->zccd2;
131 if (skb_shinfo(skb)->nr_frags) {
136 int size = nhead + (skb->end - skb->head) + ntail;
138 + zccd_t *zccd = skb_shinfo(skb)->zccd; /* stash user zero copy descriptor */
139 + zccd_t *zccd2 = skb_shinfo(skb)->zccd2; /* stash 2nd user zero copy descriptor */
144 if (skb_shinfo(skb)->frag_list)
145 skb_clone_fraglist(skb);
147 + if (zccd != NULL) /* user zero copy descriptor? */
148 + zccd_get (zccd); /* extra ref (pages are shared) */
149 + if (zccd2 != NULL) /* 2nd user zero copy descriptor? */
150 + zccd_get (zccd2); /* extra ref (pages are shared) */
152 skb_release_data(skb);
154 off = (data+nhead) - skb->head;
158 atomic_set(&skb_shinfo(skb)->dataref, 1);
159 + skb_shinfo(skb)->zccd = zccd;
160 + skb_shinfo(skb)->zccd2 = zccd2;
164 Index: linux-2.4.19-pre1/net/ipv4/tcp.c
165 ===================================================================
166 --- linux-2.4.19-pre1.orig/net/ipv4/tcp.c 2001-12-21 20:42:05.000000000 +0300
167 +++ linux-2.4.19-pre1/net/ipv4/tcp.c 2004-01-14 01:15:13.000000000 +0300
172 -ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags);
173 +ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags, zccd_t *zccd);
176 can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
181 -ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags)
182 +/* Extra parameter: user zero copy descriptor (or NULL if not doing that) */
183 +ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags, zccd_t *zccd)
185 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
190 i = skb_shinfo(skb)->nr_frags;
192 + if (zccd != NULL && /* this is a zcc I/O */
193 + skb_shinfo(skb)->zccd != NULL && /* skb is part of a zcc I/O */
194 + skb_shinfo(skb)->zccd2 != NULL &&
195 + skb_shinfo(skb)->zccd != zccd && /* not the same one */
196 + skb_shinfo(skb)->zccd2 != zccd)
198 + tcp_mark_push (tp, skb);
202 if (can_coalesce(skb, i, page, offset)) {
203 skb_shinfo(skb)->frags[i-1].size += copy;
204 } else if (i < MAX_SKB_FRAGS) {
209 + if (zccd != NULL && /* this is a zcc I/O */
210 + skb_shinfo(skb)->zccd != zccd && /* not already referencing this zccd */
211 + skb_shinfo(skb)->zccd2 != zccd)
213 + zccd_get (zccd); /* bump ref count */
215 + BUG_TRAP (skb_shinfo(skb)->zccd2 == NULL);
217 + if (skb_shinfo(skb)->zccd == NULL) /* reference this zccd */
218 + skb_shinfo(skb)->zccd = zccd;
220 + skb_shinfo(skb)->zccd2 = zccd;
224 skb->data_len += copy;
225 skb->ip_summed = CHECKSUM_HW;
230 - res = do_tcp_sendpages(sk, &page, offset, size, flags);
231 + res = do_tcp_sendpages(sk, &page, offset, size, flags, NULL);
232 + TCP_CHECK_TIMER(sk);
237 +ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, size_t size,
238 + int flags, zccd_t *zccd)
241 + struct sock *sk = sock->sk;
243 +#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
245 + if (!(sk->route_caps & NETIF_F_SG) || /* caller shouldn't waste her time */
246 + !(sk->route_caps & TCP_ZC_CSUM_FLAGS)) /* on double mapping */
249 +#undef TCP_ZC_CSUM_FLAGS
252 + TCP_CHECK_TIMER(sk);
254 + res = do_tcp_sendpages(sk, &page, offset, size, flags, zccd);
259 @@ -1683,6 +1733,202 @@
263 +int tcp_recvpackets (struct sock *sk, struct sk_buff_head *packets,
264 + int len, int nonblock)
266 + struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
270 + BUG_TRAP (len > 0);
271 + /*BUG_TRAP ((flags & (MSG_OOB | MSG_PEEK | MSG_TRUNC)) == 0);*/
275 + TCP_CHECK_TIMER(sk);
277 + copied = -ENOTCONN;
278 + if (sk->state == TCP_LISTEN)
282 + timeo = sock_rcvtimeo(sk, nonblock);
285 + struct sk_buff * skb;
287 + unsigned long used;
291 + /* Are we at urgent data? Stop if we have read anything. */
292 + if (copied && tp->urg_data && tp->urg_seq == tp->copied_seq)
295 + /* We need to check signals first, to get correct SIGURG
296 + * handling. FIXME: Need to check this doesnt impact 1003.1g
297 + * and move it down to the bottom of the loop
299 + if (signal_pending(current)) {
302 + copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
306 + /* Next get a buffer. */
308 + skb = skb_peek(&sk->receive_queue);
310 + if (skb == NULL) /* nothing ready */
314 + sk->state == TCP_CLOSE ||
315 + (sk->shutdown & RCV_SHUTDOWN) ||
324 + copied = sock_error(sk);
328 + if (sk->shutdown & RCV_SHUTDOWN)
331 + if (sk->state == TCP_CLOSE) {
333 + /* This occurs when user tries to read
334 + * from never connected socket.
336 + copied = -ENOTCONN;
348 + cleanup_rbuf(sk, copied);
349 + timeo = tcp_data_wait(sk, timeo);
353 + BUG_TRAP (atomic_read (&skb->users) == 1);
355 + exhausted = eaten = 0;
357 + offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
358 + if (skb->h.th->syn)
361 + used = skb->len - offset;
363 + if (tp->urg_data) {
364 + u32 urg_offset = tp->urg_seq - tp->copied_seq;
365 + if (urg_offset < used) {
366 + if (!urg_offset) { /* at urgent date */
367 + if (!sk->urginline) {
368 + tp->copied_seq++; /* discard the single byte of urgent data */
372 + } else /* truncate read */
377 + BUG_TRAP (used >= 0);
385 + if (skb_is_nonlinear (skb))
387 + int rc = skb_linearize (skb, GFP_KERNEL);
389 + printk ("tcp_recvpackets(): linearising: %d\n", rc);
399 + if ((offset + used) == skb->len) /* consuming the whole packet */
401 + __skb_unlink (skb, &sk->receive_queue);
402 + dst_release (skb->dst);
404 + __skb_pull (skb, offset);
405 + __skb_queue_tail (packets, skb);
406 + exhausted = eaten = 1;
408 + else /* consuming only part of the packet */
410 + struct sk_buff *skb2 = skb_clone (skb, GFP_KERNEL);
419 + dst_release (skb2->dst);
420 + __skb_pull (skb2, offset);
421 + __skb_trim (skb2, used);
422 + __skb_queue_tail (packets, skb2);
425 + tp->copied_seq += used;
430 + if (tp->urg_data && after(tp->copied_seq,tp->urg_seq)) {
432 + tcp_fast_path_check(sk, tp);
438 + if (skb->h.th->fin)
442 + tcp_eat_skb (sk, skb);
447 + tcp_eat_skb (sk, skb);
452 + /* Clean up data we have read: This will do ACK frames. */
453 + cleanup_rbuf(sk, copied);
454 + TCP_CHECK_TIMER(sk);
460 * State processing on a close. This implements the state shift for
461 * sending our FIN frame. Note that we only send a FIN for some