update from b1_4 again.
Severity : enhancement
+Bugzilla : 9461
+Description: Implement 'lfs df' to report actual free space on per-OST basis
+Details : Add sub-command 'df' on 'lfs' to report the disk space usage of
+ MDS/OSDs. Usage: lfs df [-i][-h]. Command Options: '-i' to report
+ usage of objects; '-h' to report in human readable format.
+
+Severity : enhancement
Bugzilla : 7981/8208
Description: Introduced Lustre Networking (LNET)
Details : LNET is new networking infrastructure for Lustre, it includes
__u64 mds_last_transno;
__u64 mds_mount_count;
__u64 mds_io_epoch;
+ unsigned long mds_atime_diff;
struct semaphore mds_epoch_sem;
struct ll_fid mds_rootfid;
struct mds_server_data *mds_server_data;
--- /dev/null
+--- linux.orig/include/linux/skbuff.h 2004-11-10 17:02:53.000000000 +0000
++++ linux/include/linux/skbuff.h 2005-02-02 12:09:43.000000000 +0000
+@@ -134,6 +134,30 @@
+ __u16 size;
+ };
+
++/* Support for callback when skb data has been released */
++typedef struct zccd /* Zero Copy Callback Descriptor */
++{ /* (embed as first member of custom struct) */
++ atomic_t zccd_count; /* reference count */
++ void (*zccd_destructor)(struct zccd *); /* callback when refcount reaches zero */
++} zccd_t;
++
++static inline void zccd_init (zccd_t *d, void (*callback)(zccd_t *))
++{
++ atomic_set (&d->zccd_count, 1);
++ d->zccd_destructor = callback;
++}
++
++static inline void zccd_get (zccd_t *d) /* take a reference */
++{
++ atomic_inc (&d->zccd_count);
++}
++
++static inline void zccd_put (zccd_t *d) /* release a reference */
++{
++ if (atomic_dec_and_test (&d->zccd_count))
++ (d->zccd_destructor)(d);
++}
++
+ /* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+@@ -143,6 +167,12 @@
+ unsigned short tso_size;
+ unsigned short tso_segs;
+ struct sk_buff *frag_list;
++ zccd_t *zccd; /* zero copy descriptor */
++ zccd_t *zccd2; /* 2nd zero copy descriptor */
++ /* NB we expect zero-copy data to be at least 1 packet, so
++ * having 2 zccds means we don't unneccessarily split the packet
++ * where consecutive zero-copy sends abutt.
++ */
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ };
+
+--- linux.orig/include/net/tcp.h 2004-11-10 17:02:53.000000000 +0000
++++ linux/include/net/tcp.h 2005-02-02 10:12:14.000000000 +0000
+@@ -785,6 +785,8 @@
+ extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t size);
+ extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
++extern ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, size_t size,
++ int flags, zccd_t *zccd);
+
+ extern int tcp_ioctl(struct sock *sk,
+ int cmd,
+@@ -881,6 +883,9 @@
+ struct msghdr *msg,
+ size_t len, int nonblock,
+ int flags, int *addr_len);
++extern int tcp_recvpackets(struct sock *sk,
++ struct sk_buff_head *packets,
++ int len, int nonblock);
+
+ extern int tcp_listen_start(struct sock *sk);
+
+--- linux.orig/net/core/skbuff.c 2004-11-10 17:02:53.000000000 +0000
++++ linux/net/core/skbuff.c 2005-02-02 10:12:14.000000000 +0000
+@@ -155,6 +155,8 @@
+ skb_shinfo(skb)->tso_size = 0;
+ skb_shinfo(skb)->tso_segs = 0;
+ skb_shinfo(skb)->frag_list = NULL;
++ skb_shinfo(skb)->zccd = NULL; /* skbuffs kick off with NO user zero copy descriptors */
++ skb_shinfo(skb)->zccd2 = NULL;
+ out:
+ return skb;
+ nodata:
+@@ -189,6 +191,10 @@
+ {
+ if (!skb->cloned ||
+ atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
++ if (skb_shinfo(skb)->zccd != NULL) /* zero copy callback descriptor? */
++ zccd_put (skb_shinfo(skb)->zccd); /* release hold */
++ if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd zero copy callback descriptor? */
++ zccd_put (skb_shinfo(skb)->zccd2); /* release hold */
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+@@ -476,6 +482,14 @@
+ n->data_len = skb->data_len;
+ n->len = skb->len;
+
++ if (skb_shinfo(skb)->zccd != NULL) /* user zero copy descriptor? */
++ zccd_get (skb_shinfo(skb)->zccd); /* 1 more ref (pages are shared) */
++ skb_shinfo(n)->zccd = skb_shinfo(skb)->zccd;
++
++ if (skb_shinfo(skb)->zccd2 != NULL) /* 2nd user zero copy descriptor? */
++ zccd_get (skb_shinfo(skb)->zccd2); /* 1 more ref (pages are shared) */
++ skb_shinfo(n)->zccd2 = skb_shinfo(skb)->zccd2;
++
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+
+@@ -518,6 +532,8 @@
+ u8 *data;
+ int size = nhead + (skb->end - skb->head) + ntail;
+ long off;
++ zccd_t *zccd = skb_shinfo(skb)->zccd; /* stash user zero copy descriptor */
++ zccd_t *zccd2 = skb_shinfo(skb)->zccd2; /* stash 2nd user zero copy descriptor */
+
+ if (skb_shared(skb))
+ BUG();
+@@ -539,6 +555,11 @@
+ if (skb_shinfo(skb)->frag_list)
+ skb_clone_fraglist(skb);
+
++ if (zccd != NULL) /* user zero copy descriptor? */
++ zccd_get (zccd); /* extra ref (pages are shared) */
++ if (zccd2 != NULL) /* 2nd user zero copy descriptor? */
++ zccd_get (zccd2); /* extra ref (pages are shared) */
++
+ skb_release_data(skb);
+
+ off = (data + nhead) - skb->head;
+@@ -552,6 +573,8 @@
+ skb->nh.raw += off;
+ skb->cloned = 0;
+ atomic_set(&skb_shinfo(skb)->dataref, 1);
++ skb_shinfo(skb)->zccd = zccd;
++ skb_shinfo(skb)->zccd2 = zccd2;
+ return 0;
+
+ nodata:
+--- linux.orig/net/core/dev.c 2004-10-18 22:54:08.000000000 +0100
++++ linux/net/core/dev.c 2005-02-02 10:12:14.000000000 +0000
+@@ -1196,6 +1196,8 @@
+ ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
+ ninfo->nr_frags = 0;
+ ninfo->frag_list = NULL;
++ ninfo->zccd = NULL; /* copied data => no user zero copy descriptor */
++ ninfo->zccd2 = NULL;
+
+ /* Offset between the two in bytes */
+ offset = data - skb->head;
+--- linux-2.6.9-org/net/ipv4/tcp.c 2005-05-20 10:09:34.000000000 +0100
++++ ./linux-2.6.9/net/ipv4/tcp.c 2005-05-20 10:22:14.000000000 +0100
+@@ -628,8 +628,9 @@
+ }
+ }
+
++/* Extra parameter: user zero copy descriptor (or NULL if not doing that) */
+ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
+- size_t psize, int flags)
++size_t psize, int flags, zccd_t *zccd)
+ {
+ struct tcp_opt *tp = tcp_sk(sk);
+ int mss_now;
+@@ -676,6 +677,17 @@
+ copy = size;
+
+ i = skb_shinfo(skb)->nr_frags;
++
++ if (zccd != NULL && /* this is a zcc I/O */
++ skb_shinfo(skb)->zccd != NULL && /* skb is part of a zcc I/O */
++ skb_shinfo(skb)->zccd2 != NULL &&
++ skb_shinfo(skb)->zccd != zccd && /* not the same one */
++ skb_shinfo(skb)->zccd2 != zccd)
++ {
++ tcp_mark_push (tp, skb);
++ goto new_segment;
++ }
++
+ can_coalesce = skb_can_coalesce(skb, i, page, offset);
+ if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+ tcp_mark_push(tp, skb);
+@@ -692,6 +704,20 @@
+ skb_fill_page_desc(skb, i, page, offset, copy);
+ }
+
++ if (zccd != NULL && /* this is a zcc I/O */
++ skb_shinfo(skb)->zccd != zccd && /* not already referencing this zccd */
++ skb_shinfo(skb)->zccd2 != zccd)
++ {
++ zccd_get (zccd); /* bump ref count */
++
++ BUG_TRAP (skb_shinfo(skb)->zccd2 == NULL);
++
++ if (skb_shinfo(skb)->zccd == NULL) /* reference this zccd */
++ skb_shinfo(skb)->zccd = zccd;
++ else
++ skb_shinfo(skb)->zccd2 = zccd;
++ }
++
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+@@ -760,7 +786,31 @@
+
+ lock_sock(sk);
+ TCP_CHECK_TIMER(sk);
+- res = do_tcp_sendpages(sk, &page, offset, size, flags);
++ res = do_tcp_sendpages(sk, &page, offset, size, flags, NULL);
++ TCP_CHECK_TIMER(sk);
++ release_sock(sk);
++ return res;
++}
++
++ssize_t tcp_sendpage_zccd(struct socket *sock, struct page *page, int offset, size_t size,
++ int flags, zccd_t *zccd)
++{
++ ssize_t res;
++ struct sock *sk = sock->sk;
++
++#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
++
++ if (!(sk->sk_route_caps & NETIF_F_SG) || /* caller shouldn't waste her time */
++ !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS)) /* on double mapping */
++ BUG ();
++
++#undef TCP_ZC_CSUM_FLAGS
++
++ lock_sock(sk);
++ TCP_CHECK_TIMER(sk);
++
++ res = do_tcp_sendpages(sk, &page, offset, size, flags, zccd);
++
+ TCP_CHECK_TIMER(sk);
+ release_sock(sk);
+ return res;
+@@ -1528,6 +1578,202 @@
+ goto out;
+ }
+
++int tcp_recvpackets (struct sock *sk, struct sk_buff_head *packets,
++ int len, int nonblock)
++{
++ struct tcp_opt *tp = tcp_sk(sk);
++ int copied;
++ long timeo;
++
++ BUG_TRAP (len > 0);
++ /*BUG_TRAP ((flags & (MSG_OOB | MSG_PEEK | MSG_TRUNC)) == 0);*/
++
++ lock_sock(sk);
++
++ TCP_CHECK_TIMER(sk);
++
++ copied = -ENOTCONN;
++ if (sk->sk_state == TCP_LISTEN)
++ goto out;
++
++ copied = 0;
++ timeo = sock_rcvtimeo(sk, nonblock);
++
++ do {
++ struct sk_buff * skb;
++ u32 offset;
++ unsigned long used;
++ int exhausted;
++ int eaten;
++
++ /* Are we at urgent data? Stop if we have read anything. */
++ if (copied && tp->urg_data && tp->urg_seq == tp->copied_seq)
++ break;
++
++ /* We need to check signals first, to get correct SIGURG
++ * handling. FIXME: Need to check this doesnt impact 1003.1g
++ * and move it down to the bottom of the loop
++ */
++ if (signal_pending(current)) {
++ if (copied)
++ break;
++ copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
++ break;
++ }
++
++ /* Next get a buffer. */
++
++ skb = skb_peek(&sk->sk_receive_queue);
++
++ if (skb == NULL) /* nothing ready */
++ {
++ if (copied) {
++ if (sk->sk_err ||
++ sk->sk_state == TCP_CLOSE ||
++ (sk->sk_shutdown & RCV_SHUTDOWN) ||
++ !timeo ||
++ (0))
++ break;
++ } else {
++ if (sock_flag(sk, SOCK_DONE))
++ break;
++
++ if (sk->sk_err) {
++ copied = sock_error(sk);
++ break;
++ }
++
++ if (sk->sk_shutdown & RCV_SHUTDOWN)
++ break;
++
++ if (sk->sk_state == TCP_CLOSE) {
++ if (!(sock_flag(sk, SOCK_DONE))) {
++ /* This occurs when user tries to read
++ * from never connected socket.
++ */
++ copied = -ENOTCONN;
++ break;
++ }
++ break;
++ }
++
++ if (!timeo) {
++ copied = -EAGAIN;
++ break;
++ }
++ }
++
++ cleanup_rbuf(sk, copied);
++ sk_wait_data(sk, &timeo);
++ continue;
++ }
++
++ BUG_TRAP (atomic_read (&skb->users) == 1);
++
++ exhausted = eaten = 0;
++
++ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
++ if (skb->h.th->syn)
++ offset--;
++
++ used = skb->len - offset;
++
++ if (tp->urg_data) {
++ u32 urg_offset = tp->urg_seq - tp->copied_seq;
++ if (urg_offset < used) {
++ if (!urg_offset) { /* at urgent date */
++ if (!(sock_flag(sk, SOCK_URGINLINE))) {
++ tp->copied_seq++; /* discard the single byte of urgent data */
++ offset++;
++ used--;
++ }
++ } else /* truncate read */
++ used = urg_offset;
++ }
++ }
++
++ BUG_TRAP (used >= 0);
++ if (len < used)
++ used = len;
++
++ if (used == 0)
++ exhausted = 1;
++ else
++ {
++ if (skb_is_nonlinear (skb))
++ {
++ int rc = skb_linearize (skb, GFP_KERNEL);
++
++ printk ("tcp_recvpackets(): linearising: %d\n", rc);
++
++ if (rc)
++ {
++ if (!copied)
++ copied = rc;
++ break;
++ }
++ }
++
++ if ((offset + used) == skb->len) /* consuming the whole packet */
++ {
++ __skb_unlink (skb, &sk->sk_receive_queue);
++ dst_release (skb->dst);
++ skb_orphan (skb);
++ __skb_pull (skb, offset);
++ __skb_queue_tail (packets, skb);
++ exhausted = eaten = 1;
++ }
++ else /* consuming only part of the packet */
++ {
++ struct sk_buff *skb2 = skb_clone (skb, GFP_KERNEL);
++
++ if (skb2 == NULL)
++ {
++ if (!copied)
++ copied = -ENOMEM;
++ break;
++ }
++
++ dst_release (skb2->dst);
++ __skb_pull (skb2, offset);
++ __skb_trim (skb2, used);
++ __skb_queue_tail (packets, skb2);
++ }
++
++ tp->copied_seq += used;
++ copied += used;
++ len -= used;
++ }
++
++ if (tp->urg_data && after(tp->copied_seq,tp->urg_seq)) {
++ tp->urg_data = 0;
++ tcp_fast_path_check(sk, tp);
++ }
++
++ if (!exhausted)
++ continue;
++
++ if (skb->h.th->fin)
++ {
++ tp->copied_seq++;
++ if (!eaten)
++ sk_eat_skb (sk, skb);
++ break;
++ }
++
++ if (!eaten)
++ sk_eat_skb (sk, skb);
++
++ } while (len > 0);
++
++ out:
++ /* Clean up data we have read: This will do ACK frames. */
++ cleanup_rbuf(sk, copied);
++ TCP_CHECK_TIMER(sk);
++ release_sock(sk);
++ return copied;
++}
++
+ /*
+ * State processing on a close. This implements the state shift for
+ * sending our FIN frame. Note that we only send a FIN for some
+@@ -2326,6 +2572,8 @@
+ EXPORT_SYMBOL(tcp_recvmsg);
+ EXPORT_SYMBOL(tcp_sendmsg);
+ EXPORT_SYMBOL(tcp_sendpage);
++EXPORT_SYMBOL(tcp_sendpage_zccd);
++EXPORT_SYMBOL(tcp_recvpackets);
+ EXPORT_SYMBOL(tcp_setsockopt);
+ EXPORT_SYMBOL(tcp_shutdown);
+ EXPORT_SYMBOL(tcp_statistics);
}
RETURN(rc);
}
+
int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
if (err)
GOTO(out_mdc, err);
- /* async connect is surely finished by now */
+ /* MDC connect is surely finished by now */
*data = class_exp2cliimp(sbi->ll_mdc_exp)->imp_connect_data;
LASSERT(osfs.os_bsize);
rc = mdc_setattr(sbi->ll_mdc_exp, &op_data,
&attr, NULL, 0, NULL, 0, &req);
- if (rc) {
+ if (rc || lsm == NULL) {
ptlrpc_req_finished(req);
- if (rc != -EPERM && rc != -EACCES)
- CERROR("mdc_setattr fails: rc = %d\n", rc);
obdo_free(oa);
RETURN(rc);
}
spin_lock_init(&mds->mds_transno_lock);
mds->mds_max_mdsize = sizeof(struct lov_mds_md);
mds->mds_max_cookiesize = sizeof(struct llog_cookie);
+ mds->mds_atime_diff = MAX_ATIME_DIFF;
sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
}
#endif
+static int lprocfs_wr_atime_diff(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct obd_device *obd = data;
+ struct mds_obd *mds = &obd->u.mds;
+ char kernbuf[20], *end;
+ unsigned long diff = 0;
+
+ if (count > (sizeof(kernbuf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+
+ kernbuf[count] = '\0';
+
+ diff = simple_strtoul(kernbuf, &end, 0);
+ if (kernbuf == end)
+ return -EINVAL;
+
+ mds->mds_atime_diff = diff;
+ return count;
+}
+
+static int lprocfs_rd_atime_diff(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct obd_device *obd = data;
+ struct mds_obd *mds = &obd->u.mds;
+
+ *eof = 1;
+ return snprintf(page, count, "%u\n", mds->mds_atime_diff);
+}
+
struct lprocfs_vars lprocfs_mds_obd_vars[] = {
{ "uuid", lprocfs_rd_uuid, 0, 0 },
{ "blocksize", lprocfs_rd_blksize, 0, 0 },
lprocfs_wr_group_upcall, 0},
{ "group_flush", 0, lprocfs_wr_group_flush, 0},
{ "group_info", 0, lprocfs_wr_group_info, 0 },
+ { "atime_diff", lprocfs_rd_atime_diff, lprocfs_wr_atime_diff, 0 },
{ 0 }
};
rc = mds_get_parents_children_locked(obd, mds, &join_rec->jr_fid,
&de_tailparent, &head_fid,
- &de_head, LCK_PW, rec->ur_name,
+ &de_head, LCK_EX, rec->ur_name,
rec->ur_namelen, &de_tail,
NULL, 0, NULL, dlm_handles,
LCK_EX);
if (dlm_handles[0].cookie != 0) {
if (rc)
- ldlm_lock_decref(&dlm_handles[0], LCK_PW);
+ ldlm_lock_decref(&dlm_handles[0], LCK_EX);
else
- ptlrpc_save_lock(req, &dlm_handles[0], LCK_PW);
+ ptlrpc_save_lock(req, &dlm_handles[0], LCK_EX);
}
if (de_tail)
l_dput(de_tail);
* */
LTIME_S(iattr.ia_atime) = request_body->atime;
if ((LTIME_S(iattr.ia_atime) >
- LTIME_S(inode->i_atime) + MAX_ATIME_DIFF) ||
+ LTIME_S(inode->i_atime) + mds->mds_atime_diff) ||
(iattr.ia_valid != 0 &&
LTIME_S(iattr.ia_atime) > LTIME_S(inode->i_atime)))
iattr.ia_valid |= ATTR_ATIME;
struct filter_obd *filter = &exp->exp_obd->u.filter;
struct filter_server_data *fsd = filter->fo_fsd;
int index = le32_to_cpu(fsd->fsd_ost_index);
-
+
if (!(fsd->fsd_feature_compat &
cpu_to_le32(OBD_COMPAT_OST))) {
/* this will only happen on the first connect */
- fsd->fsd_ost_index = le32_to_cpu(data->ocd_index);
+ fsd->fsd_ost_index = cpu_to_le32(data->ocd_index);
fsd->fsd_feature_compat |= cpu_to_le32(OBD_COMPAT_OST);
- filter_update_server_data(exp->exp_obd,
+ filter_update_server_data(exp->exp_obd,
filter->fo_rcvd_filp, fsd, 1);
} else if (index != data->ocd_index) {
LCONSOLE_ERROR("Connection from %s to index "
"%u doesn't match actual OST "
"index %u, bad configuration?\n",
- obd_export_nid2str(exp), index,
+ obd_export_nid2str(exp), index,
data->ocd_index);
RETURN(-EBADF);
}
/* Data stored per server at the head of the last_rcvd file. In le32 order.
* Try to keep this the same as mds_server_data so we might one day merge. */
struct filter_server_data {
- __u8 fsd_uuid[40]; /* server UUID */
- __u64 fsd_last_transno_new;/* future last completed transaction ID */
- __u64 fsd_last_transno; /* last completed transaction ID */
+/* 00*/ __u8 fsd_uuid[40]; /* server UUID */
+/* 28*/ __u64 fsd_last_transno_new;/* future last completed transaction ID */
+/* 30*/ __u64 fsd_last_transno; /* last completed transaction ID */
__u64 fsd_mount_count; /* FILTER incarnation number */
- __u32 fsd_feature_compat; /* compatible feature flags */
+/* 40*/ __u32 fsd_feature_compat; /* compatible feature flags */
__u32 fsd_feature_rocompat;/* read-only compatible feature flags */
__u32 fsd_feature_incompat;/* incompatible feature flags */
__u32 fsd_server_size; /* size of server data area */
- __u32 fsd_client_start; /* start of per-client data area */
+/* 50*/ __u32 fsd_client_start; /* start of per-client data area */
__u16 fsd_client_size; /* size of per-client data area */
__u16 fsd_subdir_count; /* number of subdirectories for objects */
__u64 fsd_catalog_oid; /* recovery catalog object id */
- __u32 fsd_catalog_ogen; /* recovery catalog inode generation */
+/* 60*/ __u32 fsd_catalog_ogen; /* recovery catalog inode generation */
__u8 fsd_peeruuid[40]; /* UUID of MDS associated with this OST */
- __u32 fsd_ost_index; /* index number of OST in LOV */
+/* 8c*/ __u32 fsd_ost_index; /* index number of OST in LOV */
__u32 fsd_mds_index; /* index number of MDS in LMV */
- __u8 fsd_padding[LR_SERVER_SIZE - 148];
+/* 94*/ __u8 fsd_padding[LR_SERVER_SIZE - 148];
};
/* Data stored per client in the last_rcvd file. In le32 order. */
| sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
FOUNDMDS2UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '2p' \
| sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
+ [ -z "$FOUNDMDS1UUID" ] && echo "MDS1 UUID empty" && return 1
+ [ -z "$FOUNDMDS2UUID" ] && echo "MDS2 UUID empty" && return 1
if ([ $EXPECTEDMDS1UUID = $FOUNDMDS1UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS2UUID ]) || \
([ $EXPECTEDMDS1UUID = $FOUNDMDS2UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS1UUID ]); then
echo "Success:long uuid truncated successfully and being unique."
# Bug 113, check that readdir lost recv timeout works.
test_13() {
- mkdir /mnt/lustre/readdir || return 1
- touch /mnt/lustre/readdir/newentry || return
+ mkdir $MOUNT/readdir || return 1
+ touch $MOUNT/readdir/newentry || return
# OBD_FAIL_MDS_READPAGE_NET|OBD_FAIL_ONCE
do_facet mds "sysctl -w lustre.fail_loc=0x80000104"
- ls /mnt/lustre/readdir || return 3
+ ls $MOUNT/readdir || return 3
do_facet mds "sysctl -w lustre.fail_loc=0"
- rm -rf /mnt/lustre/readdir || return 4
+ rm -rf $MOUNT/readdir || return 4
}
run_test 13 "mdc_readpage restart test (bug 1138)"
# Bug 113, check that readdir lost send timeout works.
test_14() {
- mkdir /mnt/lustre/readdir
- touch /mnt/lustre/readdir/newentry
+ mkdir $MOUNT/readdir
+ touch $MOUNT/readdir/newentry
# OBD_FAIL_MDS_SENDPAGE|OBD_FAIL_ONCE
do_facet mds "sysctl -w lustre.fail_loc=0x80000106"
- ls /mnt/lustre/readdir || return 1
+ ls $MOUNT/readdir || return 1
do_facet mds "sysctl -w lustre.fail_loc=0"
}
run_test 14 "mdc_readpage resend test (bug 1138)"
#define CSF "%9s"
#define CDF "%9llu"
#define HSF "%8s"
-#define HDF "%8llu"
+#define HDF "%6.1f"
#define RSF "%5s"
#define RDF "%5d"
static int path2mnt(char *path, FILE *fp, char *mntdir, int dir_len)
{
char rpath[PATH_MAX] = {'\0'};
- struct mntent *mnt, out_mnt = {0};
+ struct mntent *mnt;
int rc, len, out_len = 0;
if (!realpath(path, rpath)) {
if (len > out_len &&
!strncmp(rpath, mnt->mnt_dir, len)) {
out_len = len;
- memcpy(&out_mnt, mnt, sizeof(out_mnt));
+ memset(mntdir, 0, dir_len);
+ strncpy(mntdir, mnt->mnt_dir, dir_len);
}
}
mnt = getmntent(fp);
}
- if (out_len > 0) {
- strncpy(mntdir, out_mnt.mnt_dir, dir_len);
+ if (out_len > 0)
return 0;
- }
-
+
+ fprintf(stderr, "error: lfs df: %s isn't mounted on lustre\n", path);
return -EINVAL;
}
if (cooked) {
int i;
- i = COOK(total);
+ double total_d, used_d, avail_d;
+
+ total_d = (double)total;
+ i = COOK(total_d);
if (i > 0)
- sprintf(tbuf, HDF"%c", total, suffix[i - 1]);
+ sprintf(tbuf, HDF"%c", total_d, suffix[i - 1]);
else
sprintf(tbuf, CDF, total);
- i = COOK(used);
+ used_d = (double)used;
+ i = COOK(used_d);
if (i > 0)
- sprintf(ubuf, HDF"%c", used, suffix[i - 1]);
+ sprintf(ubuf, HDF"%c", used_d, suffix[i - 1]);
else
sprintf(ubuf, CDF, used);
- i = COOK(avail);
+ avail_d = (double)avail;
+ i = COOK(avail_d);
if (i > 0)
- sprintf(abuf, HDF"%c", avail, suffix[i - 1]);
+ sprintf(abuf, HDF"%c", avail_d, suffix[i - 1]);
else
sprintf(abuf, CDF, avail);
} else {
struct obd_statfs stat_buf;
struct obd_uuid uuid_buf;
__u32 index;
+ __u64 avail_sum, used_sum, total_sum;
+ char tbuf[10], ubuf[10], abuf[10], rbuf[10];
+ double ratio_sum = 0;
int rc;
if (ishow)
"UUID", "1K-blocks", "Used", "Available",
"Use%", "Mounted on");
+ avail_sum = total_sum = 0;
for (index = 0; ; index++) {
memset(&stat_buf, 0, sizeof(struct obd_statfs));
memset(&uuid_buf, 0, sizeof(struct obd_uuid));
uuid_buf.uuid, strerror(-rc), rc);
return rc;
}
+ if (!rc && ishow) {
+ avail_sum += stat_buf.os_ffree;
+ total_sum += stat_buf.os_files;
+ }
}
for (index = 0;;index++) {
strerror(-rc), rc);
return rc;
}
+ if (!rc && !ishow) {
+ __u64 avail, total;
+ avail = stat_buf.os_bavail * stat_buf.os_bsize;
+ avail /= 1024;
+ total = stat_buf.os_blocks * stat_buf.os_bsize;
+ total /= 1024;
+
+ avail_sum += avail;
+ total_sum += total;
+ }
}
+
+ used_sum = total_sum - avail_sum;
+ if (total_sum > 0)
+ ratio_sum = (double)(total_sum - avail_sum) / (double)total_sum;
+ sprintf(rbuf, RDF, (int)(ratio_sum * 100));
+ if (cooked) {
+ int i;
+ char *suffix = "KMGTPEZY";
+ double total_sum_d, used_sum_d, avail_sum_d;
+
+ total_sum_d = (double)total_sum;
+ i = COOK(total_sum_d);
+ if (i > 0)
+ sprintf(tbuf, HDF"%c", total_sum_d, suffix[i - 1]);
+ else
+ sprintf(tbuf, CDF, total_sum);
+
+ used_sum_d = (double)used_sum;
+ i = COOK(used_sum_d);
+ if (i > 0)
+ sprintf(ubuf, HDF"%c", used_sum_d, suffix[i - 1]);
+ else
+ sprintf(ubuf, CDF, used_sum);
+
+ avail_sum_d = (double)avail_sum;
+ i = COOK(avail_sum_d);
+ if (i > 0)
+ sprintf(abuf, HDF"%c", avail_sum_d, suffix[i - 1]);
+ else
+ sprintf(abuf, CDF, avail_sum);
+ } else {
+ sprintf(tbuf, CDF, total_sum);
+ sprintf(ubuf, CDF, used_sum);
+ sprintf(abuf, CDF, avail_sum);
+ }
+
+ printf("\n"UUF" "CSF" "CSF" "CSF" "RSF" %-s\n",
+ "filesystem summary:", tbuf, ubuf, abuf, rbuf, mntdir);
+
return 0;
}
if (check_type)
check_type--;
+ else /* do quotacheck for both user & group quota by default */
+ check_type = 0x02;
if (argc == optind)
return CMD_HELP;