From: pschwan Date: Tue, 1 Oct 2002 17:50:22 +0000 (+0000) Subject: - ChangeLog update for 0.5.13 X-Git-Tag: v1_7_100~4645 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=9c6be6f3554365ceab4065467ad4d565141d0cb0;p=fs%2Flustre-release.git - ChangeLog update for 0.5.13 - Small formatting and wrapping changes that litter my tree - Fixed unbalanced ENTRY/EXIT in my last symlink checkin - Add a little more debugging info, to help track down an MDS LBUG --- diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 34e194c..e79307d 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -1,3 +1,23 @@ +2002-10-01 Phil Schwan + * version 0_5_13 + * bug fixes: + - locks would be cancelled without throwing away data pages, + resulting in inconsistent data (605627) + - inode attributes were not always being refreshed (605627, 612449) + - lconf now continues to cleanup after lctl reports an error + - MDS now enforces user permissions (602707) + - lprocfs cleanup fixed, but not yet enabled (614157) + - fixed infinite server hang, should a client not respond to an AST + - avoid going into recovery if user calls readlink() with a buffer + that's too small (613941) + - AST RPCs no longer require replies (614867) -- this may be changed + - don't crash server if client sends an IOV that's too big (611336) + - fixed lock conversion deadlock (611892) + - fixed the following of symlinks (614622) + * recovery: the server can remove locks from a client that dies, other + clients can make progress + * more extN patch fixes + 2002-09-20 Andreas Dilger * version v0_5_12 * bug fix diff --git a/lustre/llite/symlink.c b/lustre/llite/symlink.c index afccf2d..8571d39 100644 --- a/lustre/llite/symlink.c +++ b/lustre/llite/symlink.c @@ -29,6 +29,7 @@ static int ll_readlink_internal(struct inode *inode, struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); int rc, len = inode->i_size + 1; + ENTRY; *request = NULL; diff --git a/lustre/mds/handler.c b/lustre/mds/handler.c index 052b6ea..b08bc26 100644 --- a/lustre/mds/handler.c +++ b/lustre/mds/handler.c @@ -1020,7 +1020,8 @@ int mds_handle(struct ptlrpc_request *req) } out: if (rc) { - CERROR("mds: processing error %d\n", rc); + CERROR("mds: processing error (opcode %d): %d\n", + req->rq_reqmsg->opc, rc); ptlrpc_error(req->rq_svc, req); } else { CDEBUG(D_NET, "sending reply\n"); diff --git a/lustre/obdclass/lprocfs.c b/lustre/obdclass/lprocfs.c index 5b0d91c..f5486f3 100644 --- a/lustre/obdclass/lprocfs.c +++ b/lustre/obdclass/lprocfs.c @@ -387,9 +387,9 @@ int lprocfs_reg_dev(struct obd_device* device, lprocfs_group_t* namespace, DEV_PROF_START(mds, device, gen, open); - for(i = 0; i < 50; i++){ + for (i = 0; i < 50; i++) { DEV_PROF_START(mds, device, gen, close); - for(j=0; j<2000; j++) + for (j = 0; j < 2000; j++) continue; DEV_PROF_END(mds, device, gen, close); } @@ -398,9 +398,9 @@ int lprocfs_reg_dev(struct obd_device* device, lprocfs_group_t* namespace, if (!(strcmp(device->obd_type->typ_name, "ldlm"))) { DEV_PROF_START(ldlm, device, ldlm, mgmt_connect); - for(i = 0; i < 200; i++) { + for (i = 0; i < 200; i++) { DEV_PROF_START(ldlm, device, ldlm, mgmt_disconnect); - for (j = 0; j< 2000; j++) + for (j = 0; j < 2000; j++) continue; DEV_PROF_END(ldlm, device, ldlm, mgmt_disconnect); } @@ -830,13 +830,13 @@ void lprocfs_remove_all(struct proc_dir_entry* root) } -int lprocfs_ll_rd(char* page, char **start, off_t off, - int count, int *eof, void *data) +int lprocfs_ll_rd(char *page, char **start, off_t off, + int count, int *eof, void *data) { int len; - __u64 *temp = (__u64*)data; + __u64 *temp = (__u64 *)data; - len = sprintf(page, "%lld\n", *temp); + len = snprintf(page, count, "%Lu\n", *temp); return len; } diff --git a/lustre/ptlrpc/niobuf.c b/lustre/ptlrpc/niobuf.c index 0668c2b..ada350b 100644 --- a/lustre/ptlrpc/niobuf.c +++ b/lustre/ptlrpc/niobuf.c @@ -158,7 +158,7 @@ int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc) rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md, &desc->bd_md_h); - ptlrpc_put_bulk_iov (desc, iov); /* move down to reduce latency to send */ + ptlrpc_put_bulk_iov (desc, iov); /*move down to reduce latency to send*/ if (rc != PTL_OK) { CERROR("PtlMDBind failed: %d\n", rc); @@ -169,8 +169,8 @@ int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc) remote_id.nid = desc->bd_connection->c_peer.peer_nid; remote_id.pid = 0; - CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid %d xid %d\n", - desc->bd_md.niov, desc->bd_md.length, + CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid " + "%d xid %d\n", desc->bd_md.niov, desc->bd_md.length, desc->bd_portal, remote_id.nid, remote_id.pid, xid); rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id, @@ -195,8 +195,9 @@ int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc) ptl_process_id_t source_id; ENTRY; - if (desc->bd_page_count > PTL_MD_MAX_IOV) { - CERROR("iov longer than %d not supported\n", PTL_MD_MAX_IOV); + if (desc->bd_page_count > PTL_MD_MAX_IOV) { + CERROR("iov longer than %d pages not supported (count=%d)\n", + PTL_MD_MAX_IOV, desc->bd_page_count); RETURN(-EINVAL); } @@ -336,8 +337,9 @@ int ptl_send_rpc(struct ptlrpc_request *request) /* request->rq_repmsg is set only when the reply comes in, in * client_packet_callback() */ if (request->rq_reply_md.start) - OBD_FREE(request->rq_reply_md.start, request->rq_replen); - + OBD_FREE(request->rq_reply_md.start, + request->rq_replen); + OBD_ALLOC(repbuf, request->rq_replen); if (!repbuf) { LBUG(); @@ -360,7 +362,7 @@ int ptl_send_rpc(struct ptlrpc_request *request) request->rq_reply_md.options = PTL_MD_OP_PUT; request->rq_reply_md.user_ptr = request; request->rq_reply_md.eventq = reply_in_eq; - + rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md, PTL_UNLINK, &request->rq_reply_md_h); if (rc != PTL_OK) { @@ -368,7 +370,7 @@ int ptl_send_rpc(struct ptlrpc_request *request) LBUG(); GOTO(cleanup2, rc); } - + CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64 ", portal %u\n", request->rq_replen, request->rq_xid, @@ -397,7 +399,7 @@ void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd) ptl_handle_md_t md_h; LASSERT (atomic_read (&rqbd->rqbd_refcount) == 0); - + /* Attach the leading ME on which we build the ring */ rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal, match_id, 0, ~0, @@ -407,17 +409,17 @@ void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd) LBUG(); } - dummy.start = rqbd->rqbd_buffer; - dummy.length = service->srv_buf_size; - dummy.max_size = service->srv_max_req_size; - dummy.threshold = PTL_MD_THRESH_INF; - dummy.options = PTL_MD_OP_PUT | PTL_MD_MAX_SIZE | PTL_MD_AUTO_UNLINK; - dummy.user_ptr = rqbd; - dummy.eventq = service->srv_eq_h; + dummy.start = rqbd->rqbd_buffer; + dummy.length = service->srv_buf_size; + dummy.max_size = service->srv_max_req_size; + dummy.threshold = PTL_MD_THRESH_INF; + dummy.options = PTL_MD_OP_PUT | PTL_MD_MAX_SIZE | PTL_MD_AUTO_UNLINK; + dummy.user_ptr = rqbd; + dummy.eventq = service->srv_eq_h; atomic_inc (&service->srv_nrqbds_receiving); atomic_set (&rqbd->rqbd_refcount, 1); /* 1 ref for portals */ - + rc = PtlMDAttach(rqbd->rqbd_me_h, dummy, PTL_UNLINK, &md_h); if (rc != PTL_OK) { CERROR("PtlMDAttach failed: %d\n", rc);