Whamcloud - gitweb
b=5902
authorphil <phil>
Thu, 5 May 2005 18:07:11 +0000 (18:07 +0000)
committerphil <phil>
Thu, 5 May 2005 18:07:11 +0000 (18:07 +0000)
Remove the tcp-zero-copy patch from the 2.4.21-suse-171 series.  We are
virtually certain that the corruption is caused in the TCP stack, and we are
completely unable to reproduce it with zero-copy disabled.  Because this series
is of almost zero interest, just remove the patch and move on.

lustre/ChangeLog
lustre/kernel_patches/patches/tcp-zero-copy-2.4.21-suse-171.patch [deleted file]
lustre/kernel_patches/series/suse-2.4.21-jvn

index 529b618..bb2dc4a 100644 (file)
@@ -52,6 +52,7 @@ tbd         Cluster File Systems, Inc. <info@clusterfs.com>
        - introduce a lookup cache for lconf to avoid repeated DB scans (6204)
        - Vanilla 2.4.29 support
        - increase maximum number of obd devices to 520 (6242)
+       - remove the tcp-zero-copy patch from the suse-2.4 series (5902)
 
 2005-03-22  Cluster File Systems, Inc. <info@clusterfs.com>
        * version 1.4.1
diff --git a/lustre/kernel_patches/patches/tcp-zero-copy-2.4.21-suse-171.patch b/lustre/kernel_patches/patches/tcp-zero-copy-2.4.21-suse-171.patch
deleted file mode 100644 (file)
index 8ef6cd2..0000000
+++ /dev/null
@@ -1,456 +0,0 @@
-Index: linux-2.4.21-171/include/linux/skbuff.h
-===================================================================
---- linux-2.4.21-171.orig/include/linux/skbuff.h       2004-03-31 14:58:38.000000000 -0500
-+++ linux-2.4.21-171/include/linux/skbuff.h    2004-04-03 16:43:15.000000000 -0500
-@@ -116,6 +116,30 @@
-       __u16 size;
- };
-+/* Support for callback when skb data has been released */
-+typedef struct zccd                           /* Zero Copy Callback Descriptor */
-+{                                             /* (embed as first member of custom struct) */
-+      atomic_t        zccd_count;             /* reference count */
-+      void           (*zccd_destructor)(struct zccd *); /* callback when refcount reaches zero */
-+} zccd_t;
-+
-+static inline void zccd_init (zccd_t *d, void (*callback)(zccd_t *))
-+{
-+      atomic_set (&d->zccd_count, 1);
-+      d->zccd_destructor = callback;
-+}
-+
-+static inline void zccd_get (zccd_t *d)               /* take a reference */
-+{
-+      atomic_inc (&d->zccd_count);
-+}
-+
-+static inline void zccd_put (zccd_t *d)               /* release a reference */
-+{
-+      if (atomic_dec_and_test (&d->zccd_count))
-+              (d->zccd_destructor)(d);
-+}
-+
- /* This data is invariant across clones and lives at
-  * the end of the header data, ie. at skb->end.
-  */
-@@ -123,6 +147,12 @@
-       atomic_t        dataref;
-       unsigned int    nr_frags;
-       struct sk_buff  *frag_list;
-+      zccd_t          *zccd;                  /* zero copy descriptor */
-+      zccd_t          *zccd2;                 /* 2nd zero copy descriptor */
-+      /* NB we expect zero-copy data to be at least 1 packet, so
-+       * having 2 zccds means we don't unneccessarily split the packet
-+       * where consecutive zero-copy sends abutt.
-+       */
-       skb_frag_t      frags[MAX_SKB_FRAGS];
- };
-Index: linux-2.4.21-171/include/net/tcp.h
-===================================================================
---- linux-2.4.21-171.orig/include/net/tcp.h    2004-03-31 15:07:31.000000000 -0500
-+++ linux-2.4.21-171/include/net/tcp.h 2004-04-03 16:46:15.000000000 -0500
-@@ -646,6 +646,8 @@
- extern int                    tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
- extern ssize_t                        tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
-+extern ssize_t                        tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, ssize_t size,
-+                                                int flags, zccd_t *zccd);
- extern int                    tcp_ioctl(struct sock *sk, 
-                                         int cmd, 
-@@ -742,6 +744,9 @@
-                                           struct msghdr *msg,
-                                           int len, int nonblock, 
-                                           int flags, int *addr_len);
-+extern int                    tcp_recvpackets(struct sock *sk,
-+                                              struct sk_buff_head *packets,
-+                                              int len, int nonblock);
- extern int                    tcp_listen_start(struct sock *sk);
-Index: linux-2.4.21-171/net/netsyms.c
-===================================================================
---- linux-2.4.21-171.orig/net/netsyms.c        2004-02-24 14:03:22.000000000 -0500
-+++ linux-2.4.21-171/net/netsyms.c     2004-04-03 16:13:53.000000000 -0500
-@@ -407,6 +407,8 @@
- #endif
-+EXPORT_SYMBOL(tcp_sendpage_zccd);
-+EXPORT_SYMBOL(tcp_recvpackets);
- EXPORT_SYMBOL(tcp_read_sock);
- EXPORT_SYMBOL(netlink_set_err);
-Index: linux-2.4.21-171/net/core/skbuff.c
-===================================================================
---- linux-2.4.21-171.orig/net/core/skbuff.c    2004-02-24 14:03:22.000000000 -0500
-+++ linux-2.4.21-171/net/core/skbuff.c 2004-04-03 16:13:53.000000000 -0500
-@@ -208,6 +208,8 @@
-       atomic_set(&(skb_shinfo(skb)->dataref), 1);
-       skb_shinfo(skb)->nr_frags = 0;
-       skb_shinfo(skb)->frag_list = NULL;
-+      skb_shinfo(skb)->zccd = NULL;           /* skbuffs kick off with NO user zero copy descriptors */
-+      skb_shinfo(skb)->zccd2 = NULL;
-       return skb;
- nodata:
-@@ -277,6 +279,10 @@
- {
-       if (!skb->cloned ||
-           atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
-+              if (skb_shinfo(skb)->zccd != NULL) /* zero copy callback descriptor? */
-+                      zccd_put (skb_shinfo(skb)->zccd); /* release hold */
-+              if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd zero copy callback descriptor? */
-+                      zccd_put (skb_shinfo(skb)->zccd2); /* release hold */
-               if (skb_shinfo(skb)->nr_frags) {
-                       int i;
-                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-@@ -535,6 +541,8 @@
-       atomic_set(&(skb_shinfo(skb)->dataref), 1);
-       skb_shinfo(skb)->nr_frags = 0;
-       skb_shinfo(skb)->frag_list = NULL;
-+      skb_shinfo(skb)->zccd = NULL;           /* copied data => no user zero copy descriptor */
-+      skb_shinfo(skb)->zccd2 = NULL;
-       /* We are no longer a clone, even if we were. */
-       skb->cloned = 0;
-@@ -581,6 +589,14 @@
-       n->data_len = skb->data_len;
-       n->len = skb->len;
-+      if (skb_shinfo(skb)->zccd != NULL)      /* user zero copy descriptor? */
-+              zccd_get (skb_shinfo(skb)->zccd); /* 1 more ref (pages are shared) */
-+      skb_shinfo(n)->zccd = skb_shinfo(skb)->zccd;
-+
-+      if (skb_shinfo(skb)->zccd2 != NULL)     /* 2nd user zero copy descriptor? */
-+              zccd_get (skb_shinfo(skb)->zccd2); /* 1 more ref (pages are shared) */
-+      skb_shinfo(n)->zccd2 = skb_shinfo(skb)->zccd2;
-+
-       if (skb_shinfo(skb)->nr_frags) {
-               int i;
-@@ -623,6 +639,8 @@
-       u8 *data;
-       int size = nhead + (skb->end - skb->head) + ntail;
-       long off;
-+      zccd_t *zccd = skb_shinfo(skb)->zccd;   /* stash user zero copy descriptor */
-+      zccd_t *zccd2 = skb_shinfo(skb)->zccd2; /* stash 2nd user zero copy descriptor */
-       if (skb_shared(skb))
-               BUG();
-@@ -644,6 +662,11 @@
-       if (skb_shinfo(skb)->frag_list)
-               skb_clone_fraglist(skb);
-+      if (zccd != NULL)                       /* user zero copy descriptor? */
-+              zccd_get (zccd);                /* extra ref (pages are shared) */
-+      if (zccd2 != NULL)                      /* 2nd user zero copy descriptor? */
-+              zccd_get (zccd2);               /* extra ref (pages are shared) */
-+
-       skb_release_data(skb);
-       off = (data+nhead) - skb->head;
-@@ -658,6 +681,8 @@
-       skb->nh.raw += off;
-       skb->cloned = 0;
-       atomic_set(&skb_shinfo(skb)->dataref, 1);
-+      skb_shinfo(skb)->zccd = zccd;
-+      skb_shinfo(skb)->zccd2 = zccd2;
-       return 0;
- nodata:
-Index: linux-2.4.21-171/net/ipv4/tcp.c
-===================================================================
---- linux-2.4.21-171.orig/net/ipv4/tcp.c       2004-02-24 13:42:30.000000000 -0500
-+++ linux-2.4.21-171/net/ipv4/tcp.c    2004-04-03 16:43:05.000000000 -0500
-@@ -748,7 +748,7 @@
-       goto out;
- }
--ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags);
-+ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags, zccd_t *zccd);
- static inline int
- can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
-@@ -827,7 +827,7 @@
-       return err;
- }
--ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags)
-+ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags, zccd_t *zccd)
- {
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-       int mss_now;
-@@ -875,6 +875,17 @@
-                       copy = size;
-               i = skb_shinfo(skb)->nr_frags;
-+
-+              if (zccd != NULL &&             /* this is a zcc I/O */
-+                  skb_shinfo(skb)->zccd != NULL && /* skb is part of a zcc I/O */
-+                  skb_shinfo(skb)->zccd2 != NULL &&
-+                  skb_shinfo(skb)->zccd != zccd && /* not the same one */
-+                  skb_shinfo(skb)->zccd2 != zccd)
-+              {
-+                      tcp_mark_push (tp, skb);
-+                      goto new_segment;
-+              }
-+
-               if (can_coalesce(skb, i, page, offset)) {
-                       skb_shinfo(skb)->frags[i-1].size += copy;
-               } else if (i < MAX_SKB_FRAGS) {
-@@ -885,6 +896,20 @@
-                       goto new_segment;
-               }
-+              if (zccd != NULL &&     /* this is a zcc I/O */
-+                  skb_shinfo(skb)->zccd != zccd && /* not already referencing this zccd */
-+                  skb_shinfo(skb)->zccd2 != zccd)
-+              {
-+                      zccd_get (zccd);        /* bump ref count */
-+
-+                      BUG_TRAP (skb_shinfo(skb)->zccd2 == NULL);
-+
-+                      if (skb_shinfo(skb)->zccd == NULL) /* reference this zccd */
-+                              skb_shinfo(skb)->zccd = zccd;
-+                      else
-+                              skb_shinfo(skb)->zccd2 = zccd;
-+              }
-+
-               skb->len += copy;
-               skb->data_len += copy;
-               skb->ip_summed = CHECKSUM_HW;
-@@ -948,7 +973,28 @@
-       lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
--      res = do_tcp_sendpages(sk, &page, offset, size, flags);
-+      res = do_tcp_sendpages(sk, &page, offset, size, flags, NULL);
-+      TCP_CHECK_TIMER(sk);
-+      release_sock(sk);
-+      return res;
-+}
-+
-+ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, ssize_t size,
-+                      int flags, zccd_t *zccd)
-+{
-+      ssize_t res;
-+      struct sock *sk = sock->sk;
-+
-+#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
-+
-+      if (!(sk->route_caps & NETIF_F_SG) ||   /* caller shouldn't waste her time */
-+          !(sk->route_caps & TCP_ZC_CSUM_FLAGS)) /* on double mapping */
-+              BUG ();
-+
-+      lock_sock(sk);
-+      TCP_CHECK_TIMER(sk);
-+
-+      res = do_tcp_sendpages(sk, &page, 0, size, flags, zccd);
-       TCP_CHECK_TIMER(sk);
-       release_sock(sk);
-       return res;
-@@ -1772,6 +1818,202 @@
-       goto out;
- }
-+int tcp_recvpackets (struct sock *sk, struct sk_buff_head *packets,
-+                   int len, int nonblock)
-+{
-+      struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-+      int copied;
-+      long timeo;
-+
-+      BUG_TRAP (len > 0);
-+      /*BUG_TRAP ((flags & (MSG_OOB | MSG_PEEK | MSG_TRUNC)) == 0);*/
-+
-+      lock_sock(sk);
-+
-+      TCP_CHECK_TIMER(sk);
-+
-+      copied = -ENOTCONN;
-+      if (sk->state == TCP_LISTEN)
-+              goto out;
-+
-+      copied = 0;
-+      timeo = sock_rcvtimeo(sk, nonblock);
-+
-+      do {
-+              struct sk_buff * skb;
-+              u32 offset;
-+              unsigned long used;
-+              int exhausted;
-+              int eaten;
-+
-+              /* Are we at urgent data? Stop if we have read anything. */
-+              if (copied && tp->urg_data && tp->urg_seq == tp->copied_seq)
-+                      break;
-+
-+              /* We need to check signals first, to get correct SIGURG
-+               * handling. FIXME: Need to check this doesnt impact 1003.1g
-+               * and move it down to the bottom of the loop
-+               */
-+              if (signal_pending(current)) {
-+                      if (copied)
-+                              break;
-+                      copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
-+                      break;
-+              }
-+
-+              /* Next get a buffer. */
-+
-+              skb = skb_peek(&sk->receive_queue);
-+
-+              if (skb == NULL)                /* nothing ready */
-+              {
-+                      if (copied) {
-+                              if (sk->err ||
-+                                  sk->state == TCP_CLOSE ||
-+                                  (sk->shutdown & RCV_SHUTDOWN) ||
-+                                  !timeo ||
-+                                  (0))
-+                                      break;
-+                      } else {
-+                              if (sk->done)
-+                                      break;
-+
-+                              if (sk->err) {
-+                                      copied = sock_error(sk);
-+                                      break;
-+                              }
-+
-+                              if (sk->shutdown & RCV_SHUTDOWN)
-+                                      break;
-+
-+                              if (sk->state == TCP_CLOSE) {
-+                                      if (!sk->done) {
-+                                              /* This occurs when user tries to read
-+                                               * from never connected socket.
-+                                               */
-+                                              copied = -ENOTCONN;
-+                                              break;
-+                                      }
-+                                      break;
-+                              }
-+
-+                              if (!timeo) {
-+                                      copied = -EAGAIN;
-+                                      break;
-+                              }
-+                      }
-+
-+                      cleanup_rbuf(sk, copied);
-+                      timeo = tcp_data_wait(sk, timeo);
-+                      continue;
-+              }
-+
-+              BUG_TRAP (atomic_read (&skb->users) == 1);
-+
-+              exhausted = eaten = 0;
-+
-+              offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
-+              if (skb->h.th->syn)
-+                      offset--;
-+
-+              used = skb->len - offset;
-+
-+              if (tp->urg_data) {
-+                      u32 urg_offset = tp->urg_seq - tp->copied_seq;
-+                      if (urg_offset < used) {
-+                              if (!urg_offset) { /* at urgent date */
-+                                      if (!sk->urginline) {
-+                                              tp->copied_seq++; /* discard the single byte of urgent data */
-+                                              offset++;
-+                                              used--;
-+                                      }
-+                              } else          /* truncate read */
-+                                      used = urg_offset;
-+                      }
-+              }
-+
-+              BUG_TRAP (used >= 0);
-+              if (len < used)
-+                      used = len;
-+
-+              if (used == 0)
-+                      exhausted = 1;
-+              else
-+              {
-+                      if (skb_is_nonlinear (skb))
-+                      {
-+                              int   rc = skb_linearize (skb, GFP_KERNEL);
-+
-+                              printk ("tcp_recvpackets(): linearising: %d\n", rc);
-+
-+                              if (rc)
-+                              {
-+                                      if (!copied)
-+                                              copied = rc;
-+                                      break;
-+                              }
-+                      }
-+
-+                      if ((offset + used) == skb->len) /* consuming the whole packet */
-+                      {
-+                              __skb_unlink (skb, &sk->receive_queue);
-+                              dst_release (skb->dst);
-+                              skb_orphan (skb);
-+                              __skb_pull (skb, offset);
-+                              __skb_queue_tail (packets, skb);
-+                              exhausted = eaten = 1;
-+                      }
-+                      else                    /* consuming only part of the packet */
-+                      {
-+                              struct sk_buff *skb2 = skb_clone (skb, GFP_KERNEL);
-+
-+                              if (skb2 == NULL)
-+                              {
-+                                      if (!copied)
-+                                              copied = -ENOMEM;
-+                                      break;
-+                              }
-+
-+                              dst_release (skb2->dst);
-+                              __skb_pull (skb2, offset);
-+                              __skb_trim (skb2, used);
-+                              __skb_queue_tail (packets, skb2);
-+                      }
-+
-+                      tp->copied_seq += used;
-+                      copied += used;
-+                      len -= used;
-+              }
-+
-+              if (tp->urg_data && after(tp->copied_seq,tp->urg_seq)) {
-+                      tp->urg_data = 0;
-+                      tcp_fast_path_check(sk, tp);
-+              }
-+
-+              if (!exhausted)
-+                      continue;
-+
-+              if (skb->h.th->fin)
-+              {
-+                      tp->copied_seq++;
-+                      if (!eaten)
-+                              tcp_eat_skb (sk, skb);
-+                      break;
-+              }
-+
-+              if (!eaten)
-+                      tcp_eat_skb (sk, skb);
-+
-+      } while (len > 0);
-+
-+ out:
-+      /* Clean up data we have read: This will do ACK frames. */
-+      cleanup_rbuf(sk, copied);
-+      TCP_CHECK_TIMER(sk);
-+      release_sock(sk);
-+      return copied;
-+}
-+
- /*
-  *    State processing on a close. This implements the state shift for
-  *    sending our FIN frame. Note that we only send a FIN for some
index b444822..d6bd7ed 100644 (file)
@@ -17,7 +17,6 @@ ext3-san-2.4.20.patch
 ext3-map_inode_page-2.4.21-suse2.patch 
 ext3-error-export.patch
 iopen-2.4.19-suse.patch
-tcp-zero-copy-2.4.21-suse-171.patch 
 jbd-dont-account-blocks-twice.patch
 jbd-commit-tricks.patch
 ext3-no-write-super-chaos.patch