X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fost%2Fost_handler.c;h=e72d6d1610e940f6aa7fa3d8ebc877591d92eb71;hp=f81eede545dd66cf7b96be69a550c5f3babf6134;hb=313f87df046b0dfe31d56b902bb66d9bf98d8709;hpb=9ac6ef3e18cb7977224d411d11ba03b7dcc6d40b diff --git a/lustre/ost/ost_handler.c b/lustre/ost/ost_handler.c index f81eede..e72d6d1 100644 --- a/lustre/ost/ost_handler.c +++ b/lustre/ost/ost_handler.c @@ -1,24 +1,27 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2001, 2002 Cluster File Systems, Inc. + * Copyright (C) 2001-2003 Cluster File Systems, Inc. * Author: Peter J. Braam * Author: Phil Schwan * - * This file is part of Lustre, http://www.lustre.org. + * This file is part of the Lustre file system, http://www.lustre.org + * Lustre is a trademark of Cluster File Systems, Inc. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * You may have signed or agreed to another license before downloading + * this software. If so, you are bound by the terms and conditions + * of that agreement, and the following does not apply to you. See the + * LICENSE file included with this distribution for more information. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * If you did not agree to a different license, then this copy of Lustre + * is open source software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * In either case, Lustre is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * license text for more details. * * Storage Target Handling functions * Lustre Object Server Module (OST) @@ -30,427 +33,1448 @@ * modules do not have a full method table.) */ -#define EXPORT_SYMTAB +#ifndef EXPORT_SYMTAB +# define EXPORT_SYMTAB +#endif #define DEBUG_SUBSYSTEM S_OST #include -#include -#include -#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include "ost_internal.h" -static int ost_destroy(struct ptlrpc_request *req) +static int oss_num_threads; +CFS_MODULE_PARM(oss_num_threads, "i", int, 0444, + "number of OSS service threads to start"); + +static int ost_num_threads; +CFS_MODULE_PARM(ost_num_threads, "i", int, 0444, + "number of OST service threads to start (deprecated)"); + +static int oss_num_create_threads; +CFS_MODULE_PARM(oss_num_create_threads, "i", int, 0444, + "number of OSS create threads to start"); + +void oti_to_request(struct obd_trans_info *oti, struct ptlrpc_request *req) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; - struct ost_body *body; - int rc, size = sizeof(*body); - ENTRY; + struct oti_req_ack_lock *ack_lock; + int i; + + if (oti == NULL) + return; - body = lustre_msg_buf(req->rq_reqmsg, 0); + if (req->rq_repmsg) + lustre_msg_set_transno(req->rq_repmsg, oti->oti_transno); + req->rq_transno = oti->oti_transno; - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + /* XXX 4 == entries in oti_ack_locks??? */ + for (ack_lock = oti->oti_ack_locks, i = 0; i < 4; i++, ack_lock++) { + if (!ack_lock->mode) + break; + /* XXX not even calling target_send_reply in some cases... */ + ptlrpc_save_lock (req, &ack_lock->lock, ack_lock->mode); + } +} + +static int ost_destroy(struct obd_export *exp, struct ptlrpc_request *req, + struct obd_trans_info *oti) +{ + struct ost_body *body, *repbody; + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) }; + ENTRY; + + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) + RETURN(-EFAULT); + + if (lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1)) { + struct ldlm_request *dlm; + dlm = lustre_swab_reqbuf(req, REQ_REC_OFF + 1, sizeof(*dlm), + lustre_swab_ldlm_request); + if (dlm == NULL) + RETURN (-EFAULT); + ldlm_request_cancel(req, dlm, 0); + } + + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - req->rq_status = obd_destroy(conn, &body->oa, NULL); + if (body->oa.o_valid & OBD_MD_FLCOOKIE) + oti->oti_logcookies = obdo_logcookie(&body->oa); + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); + memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); + req->rq_status = obd_destroy(exp, &body->oa, NULL, oti, NULL); RETURN(0); } -static int ost_getattr(struct ptlrpc_request *req) +static int ost_getattr(struct obd_export *exp, struct ptlrpc_request *req) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct ost_body *body, *repbody; - int rc, size = sizeof(*body); + struct obd_info oinfo = { { { 0 } } }; + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) }; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) + RETURN(-EFAULT); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - repbody = lustre_msg_buf(req->rq_repmsg, 0); - memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_getattr(conn, &repbody->oa, NULL); + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); + repbody->oa = body->oa; + + oinfo.oi_oa = &repbody->oa; + if (oinfo.oi_oa->o_valid & OBD_MD_FLOSSCAPA) + oinfo.oi_capa = lustre_unpack_capa(req->rq_reqmsg, + REQ_REC_OFF + 1); + req->rq_status = obd_getattr(exp, &oinfo); RETURN(0); } static int ost_statfs(struct ptlrpc_request *req) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct obd_statfs *osfs; - int rc, size = sizeof(*osfs); + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*osfs) }; ENTRY; - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - osfs = lustre_msg_buf(req->rq_repmsg, 0); - memset(osfs, 0, size); + osfs = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*osfs)); - rc = obd_statfs(conn, osfs); - if (rc) { - CERROR("ost: statfs failed: rc %d\n", rc); - req->rq_status = rc; - RETURN(rc); - } - obd_statfs_pack(osfs, osfs); + req->rq_status = obd_statfs(req->rq_export->exp_obd, osfs, + cfs_time_current_64() - HZ); + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_ENOSPC)) + osfs->os_bfree = osfs->os_bavail = 64; + if (req->rq_status != 0) + CERROR("ost: statfs failed: rc %d\n", req->rq_status); RETURN(0); } -static int ost_open(struct ptlrpc_request *req) +static int ost_create(struct obd_export *exp, struct ptlrpc_request *req, + struct obd_trans_info *oti) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct ost_body *body, *repbody; - int rc, size = sizeof(*body); + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) }; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) + RETURN(-EFAULT); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - repbody = lustre_msg_buf(req->rq_repmsg, 0); + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_open(conn, &repbody->oa, NULL); + oti->oti_logcookies = obdo_logcookie(&repbody->oa); + req->rq_status = obd_create(exp, &repbody->oa, NULL, oti); + //obd_log_cancel(conn, NULL, 1, oti->oti_logcookies, 0); RETURN(0); } -static int ost_close(struct ptlrpc_request *req) +/* + * Helper function for ost_punch(): if asked by client, acquire [size, EOF] + * lock on the file being truncated. + */ +static int ost_punch_lock_get(struct obd_export *exp, struct obdo *oa, + struct lustre_handle *lh) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; - struct ost_body *body, *repbody; - int rc, size = sizeof(*body); + int flags; + struct ldlm_res_id res_id = { .name = { oa->o_id, 0, oa->o_gr, 0} }; + ldlm_policy_data_t policy; + __u64 start; + __u64 finis; + ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); + LASSERT(!lustre_handle_is_used(lh)); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); - if (rc) - RETURN(rc); + if (!(oa->o_valid & OBD_MD_FLFLAGS) || + !(oa->o_flags & OBD_FL_TRUNCLOCK)) + RETURN(0); - repbody = lustre_msg_buf(req->rq_repmsg, 0); - memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_close(conn, &repbody->oa, NULL); - RETURN(0); + CDEBUG(D_INODE, "OST-side truncate lock.\n"); + + start = oa->o_size; + finis = start + oa->o_blocks; + + /* + * standard truncate optimization: if file body is completely + * destroyed, don't send data back to the server. + */ + flags = (start == 0) ? LDLM_AST_DISCARD_DATA : 0; + + policy.l_extent.start = start & CFS_PAGE_MASK; + + /* + * If ->o_blocks is EOF it means "lock till the end of the + * file". Otherwise, it's size of a hole being punched (in bytes) + */ + if (oa->o_blocks == OBD_OBJECT_EOF || finis < start) + policy.l_extent.end = OBD_OBJECT_EOF; + else + policy.l_extent.end = finis | ~CFS_PAGE_MASK; + + RETURN(ldlm_cli_enqueue_local(exp->exp_obd->obd_namespace, &res_id, + LDLM_EXTENT, &policy, LCK_PW, &flags, + ldlm_blocking_ast, ldlm_completion_ast, + ldlm_glimpse_ast, NULL, 0, NULL, lh)); } -static int ost_create(struct ptlrpc_request *req) +/* + * Helper function for ost_punch(): release lock acquired by + * ost_punch_lock_get(), if any. + */ +static void ost_punch_lock_put(struct obd_export *exp, struct obdo *oa, + struct lustre_handle *lh) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; + ENTRY; + if (lustre_handle_is_used(lh)) + ldlm_lock_decref(lh, LCK_PW); + EXIT; +} + +static int ost_punch(struct obd_export *exp, struct ptlrpc_request *req, + struct obd_trans_info *oti) +{ + struct obd_info oinfo = { { { 0 } } }; struct ost_body *body, *repbody; - int rc, size = sizeof(*body); + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) }; + struct lustre_handle lh = {0,}; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); + /* check that we do support OBD_CONNECT_TRUNCLOCK. */ + CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK); + + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) + RETURN(-EFAULT); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + oinfo.oi_oa = &body->oa; + oinfo.oi_policy.l_extent.start = oinfo.oi_oa->o_size; + oinfo.oi_policy.l_extent.end = oinfo.oi_oa->o_blocks; + + if ((oinfo.oi_oa->o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) != + (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) + RETURN(-EINVAL); + + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - repbody = lustre_msg_buf(req->rq_repmsg, 0); - memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_create(conn, &repbody->oa, NULL); - RETURN(0); + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); + rc = ost_punch_lock_get(exp, oinfo.oi_oa, &lh); + if (rc == 0) { + if (oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS && + oinfo.oi_oa->o_flags == OBD_FL_TRUNCLOCK) + /* + * If OBD_FL_TRUNCLOCK is the only bit set in + * ->o_flags, clear OBD_MD_FLFLAGS to avoid falling + * through filter_setattr() to filter_iocontrol(). + */ + oinfo.oi_oa->o_valid &= ~OBD_MD_FLFLAGS; + + if (oinfo.oi_oa->o_valid & OBD_MD_FLOSSCAPA) + oinfo.oi_capa = lustre_unpack_capa(req->rq_reqmsg, + REQ_REC_OFF + 1); + req->rq_status = obd_punch(exp, &oinfo, oti, NULL); + ost_punch_lock_put(exp, oinfo.oi_oa, &lh); + } + repbody->oa = *oinfo.oi_oa; + RETURN(rc); } -static int ost_punch(struct ptlrpc_request *req) +static int ost_sync(struct obd_export *exp, struct ptlrpc_request *req) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct ost_body *body, *repbody; - int rc, size = sizeof(*body); + struct lustre_capa *capa = NULL; + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) }; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) + RETURN(-EFAULT); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + if (body->oa.o_valid & OBD_MD_FLOSSCAPA) + capa = lustre_unpack_capa(req->rq_reqmsg, REQ_REC_OFF + 1); + + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - repbody = lustre_msg_buf(req->rq_repmsg, 0); + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_punch(conn, &repbody->oa, NULL, - repbody->oa.o_blocks, repbody->oa.o_size); + req->rq_status = obd_sync(exp, &repbody->oa, NULL, repbody->oa.o_size, + repbody->oa.o_blocks, capa); RETURN(0); } -static int ost_setattr(struct ptlrpc_request *req) +static int ost_setattr(struct obd_export *exp, struct ptlrpc_request *req, + struct obd_trans_info *oti) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct ost_body *body, *repbody; - int rc, size = sizeof(*body); + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) }; + struct obd_info oinfo = { { { 0 } } }; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) + RETURN(-EFAULT); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + rc = lustre_pack_reply(req, 2, size, NULL); if (rc) RETURN(rc); - repbody = lustre_msg_buf(req->rq_repmsg, 0); - memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_setattr(conn, &repbody->oa, NULL); + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); + repbody->oa = body->oa; + + oinfo.oi_oa = &repbody->oa; + if (oinfo.oi_oa->o_valid & OBD_MD_FLOSSCAPA) + oinfo.oi_capa = lustre_unpack_capa(req->rq_reqmsg, + REQ_REC_OFF + 1); + req->rq_status = obd_setattr(exp, &oinfo, oti); RETURN(0); } static int ost_bulk_timeout(void *data) { - struct ptlrpc_bulk_desc *desc = data; - ENTRY; - CERROR("(not yet) starting recovery of client %p\n", desc->bd_client); + /* We don't fail the connection here, because having the export + * killed makes the (vital) call to commitrw very sad. + */ RETURN(1); } -static int ost_brw_read(struct ptlrpc_request *req) +static int get_per_page_niobufs(struct obd_ioobj *ioo, int nioo, + struct niobuf_remote *rnb, int nrnb, + struct niobuf_remote **pp_rnbp) +{ + /* Copy a remote niobuf, splitting it into page-sized chunks + * and setting ioo[i].ioo_bufcnt accordingly */ + struct niobuf_remote *pp_rnb; + int i; + int j; + int page; + int rnbidx = 0; + int npages = 0; + + /* + * array of sufficient size already preallocated by caller + */ + LASSERT(pp_rnbp != NULL); + LASSERT(*pp_rnbp != NULL); + + /* first count and check the number of pages required */ + for (i = 0; i < nioo; i++) + for (j = 0; j < ioo->ioo_bufcnt; j++, rnbidx++) { + obd_off offset = rnb[rnbidx].offset; + obd_off p0 = offset >> CFS_PAGE_SHIFT; + obd_off pn = (offset + rnb[rnbidx].len - 1) >> + CFS_PAGE_SHIFT; + + LASSERT(rnbidx < nrnb); + + npages += (pn + 1 - p0); + + if (rnb[rnbidx].len == 0) { + CERROR("zero len BRW: obj %d objid "LPX64 + " buf %u\n", i, ioo[i].ioo_id, j); + return -EINVAL; + } + if (j > 0 && + rnb[rnbidx].offset <= rnb[rnbidx-1].offset) { + CERROR("unordered BRW: obj %d objid "LPX64 + " buf %u offset "LPX64" <= "LPX64"\n", + i, ioo[i].ioo_id, j, rnb[rnbidx].offset, + rnb[rnbidx].offset); + return -EINVAL; + } + } + + LASSERT(rnbidx == nrnb); + + if (npages == nrnb) { /* all niobufs are for single pages */ + *pp_rnbp = rnb; + return npages; + } + + pp_rnb = *pp_rnbp; + + /* now do the actual split */ + page = rnbidx = 0; + for (i = 0; i < nioo; i++) { + int obj_pages = 0; + + for (j = 0; j < ioo[i].ioo_bufcnt; j++, rnbidx++) { + obd_off off = rnb[rnbidx].offset; + int nob = rnb[rnbidx].len; + + LASSERT(rnbidx < nrnb); + do { + obd_off poff = off & ~CFS_PAGE_MASK; + int pnob = (poff + nob > CFS_PAGE_SIZE) ? + PAGE_SIZE - poff : nob; + + LASSERT(page < npages); + pp_rnb[page].len = pnob; + pp_rnb[page].offset = off; + pp_rnb[page].flags = rnb[rnbidx].flags; + + CDEBUG(0, " obj %d id "LPX64 + "page %d(%d) "LPX64" for %d, flg %x\n", + i, ioo[i].ioo_id, obj_pages, page, + pp_rnb[page].offset, pp_rnb[page].len, + pp_rnb[page].flags); + page++; + obj_pages++; + + off += pnob; + nob -= pnob; + } while (nob > 0); + LASSERT(nob == 0); + } + ioo[i].ioo_bufcnt = obj_pages; + } + LASSERT(page == npages); + + return npages; +} + +static __u32 ost_checksum_bulk(struct ptlrpc_bulk_desc *desc) +{ + __u32 cksum = ~0; + int i; + + for (i = 0; i < desc->bd_iov_count; i++) { + struct page *page = desc->bd_iov[i].kiov_page; + int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK; + char *ptr = kmap(page) + off; + int len = desc->bd_iov[i].kiov_len; + + /* corrupt the data before we compute the checksum, to + * simulate a client->OST data error */ + if (i == 0 && + OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_CHECKSUM_RECEIVE)) + memcpy(ptr, "bad3", min(4, len)); + cksum = crc32_le(cksum, ptr, len); + /* corrupt the data after we compute the checksum, to + * simulate an OST->client data error */ + if (i == 0 && OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_CHECKSUM_SEND)) + memcpy(ptr, "bad4", min(4, len)); + kunmap(page); + } + + return cksum; +} + +/* + * populate @nio by @nrpages pages from per-thread page pool + */ +static void ost_nio_pages_get(struct ptlrpc_request *req, + struct niobuf_local *nio, int nrpages) +{ + int i; + struct ost_thread_local_cache *tls; + + ENTRY; + + LASSERT(nrpages <= OST_THREAD_POOL_SIZE); + LASSERT(req != NULL); + LASSERT(req->rq_svc_thread != NULL); + + tls = ost_tls(req); + LASSERT(tls != NULL); + + memset(nio, 0, nrpages * sizeof *nio); + for (i = 0; i < nrpages; ++ i) { + struct page *page; + + page = tls->page[i]; + LASSERT(page != NULL); + POISON_PAGE(page, 0xf1); + nio[i].page = page; + LL_CDEBUG_PAGE(D_INFO, page, "%d\n", i); + } + EXIT; +} + +/* + * Dual for ost_nio_pages_get(). Poison pages in pool for debugging + */ +static void ost_nio_pages_put(struct ptlrpc_request *req, + struct niobuf_local *nio, int nrpages) +{ + int i; + + ENTRY; + + LASSERT(nrpages <= OST_THREAD_POOL_SIZE); + + for (i = 0; i < nrpages; ++ i) + POISON_PAGE(nio[i].page, 0xf2); + EXIT; +} + +static int ost_brw_lock_get(int mode, struct obd_export *exp, + struct obd_ioobj *obj, struct niobuf_remote *nb, + struct lustre_handle *lh) +{ + int flags = 0; + int nrbufs = obj->ioo_bufcnt; + struct ldlm_res_id res_id = { .name = { obj->ioo_id, 0, + obj->ioo_gr, 0} }; + ldlm_policy_data_t policy; + int i; + + ENTRY; + + LASSERT(mode == LCK_PR || mode == LCK_PW); + LASSERT(!lustre_handle_is_used(lh)); + + if (nrbufs == 0 || !(nb[0].flags & OBD_BRW_SRVLOCK)) + RETURN(0); + + /* EXPENSIVE ASSERTION */ + for (i = 1; i < nrbufs; i ++) + LASSERT((nb[0].flags & OBD_BRW_SRVLOCK) == + (nb[i].flags & OBD_BRW_SRVLOCK)); + + policy.l_extent.start = nb[0].offset & CFS_PAGE_MASK; + policy.l_extent.end = (nb[nrbufs - 1].offset + + nb[nrbufs - 1].len - 1) | ~CFS_PAGE_MASK; + + RETURN(ldlm_cli_enqueue_local(exp->exp_obd->obd_namespace, &res_id, + LDLM_EXTENT, &policy, mode, &flags, + ldlm_blocking_ast, ldlm_completion_ast, + ldlm_glimpse_ast, NULL, 0, NULL, lh)); +} + +static void ost_brw_lock_put(int mode, + struct obd_ioobj *obj, struct niobuf_remote *niob, + struct lustre_handle *lh) +{ + ENTRY; + LASSERT(mode == LCK_PR || mode == LCK_PW); + LASSERT((obj->ioo_bufcnt > 0 && (niob[0].flags & OBD_BRW_SRVLOCK)) == + lustre_handle_is_used(lh)); + if (lustre_handle_is_used(lh)) + ldlm_lock_decref(lh, mode); + EXIT; +} + +struct ost_prolong_data { + struct obd_export *opd_exp; + ldlm_policy_data_t opd_policy; + ldlm_mode_t opd_mode; +}; + +static int ost_prolong_locks_iter(struct ldlm_lock *lock, void *data) +{ + struct ost_prolong_data *opd = data; + + LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); + + if (lock->l_req_mode != lock->l_granted_mode) { + /* scan granted locks only */ + return LDLM_ITER_STOP; + } + + if (lock->l_export != opd->opd_exp) { + /* prolong locks only for given client */ + return LDLM_ITER_CONTINUE; + } + + if (!(lock->l_granted_mode & opd->opd_mode)) { + /* we aren't interesting in all type of locks */ + return LDLM_ITER_CONTINUE; + } + + if (lock->l_policy_data.l_extent.end < opd->opd_policy.l_extent.start || + lock->l_policy_data.l_extent.start > opd->opd_policy.l_extent.end) { + /* the request doesn't cross the lock, skip it */ + return LDLM_ITER_CONTINUE; + } + + if (!(lock->l_flags & LDLM_FL_AST_SENT)) { + /* ignore locks not being cancelled */ + return LDLM_ITER_CONTINUE; + } + + /* OK. this is a possible lock the user holds doing I/O + * let's refresh eviction timer for it */ + ldlm_refresh_waiting_lock(lock); + + return LDLM_ITER_CONTINUE; +} + +static void ost_prolong_locks(struct obd_export *exp, struct obd_ioobj *obj, + struct niobuf_remote *nb, ldlm_mode_t mode) +{ + struct ldlm_res_id res_id = { .name = { obj->ioo_id, 0, + obj->ioo_gr, 0} }; + int nrbufs = obj->ioo_bufcnt; + struct ost_prolong_data opd; + + ENTRY; + + opd.opd_mode = mode; + opd.opd_exp = exp; + opd.opd_policy.l_extent.start = nb[0].offset & CFS_PAGE_MASK; + opd.opd_policy.l_extent.end = (nb[nrbufs - 1].offset + + nb[nrbufs - 1].len - 1) | ~CFS_PAGE_MASK; + + CDEBUG(D_DLMTRACE,"refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n", + res_id.name[0], res_id.name[1], opd.opd_policy.l_extent.start, + opd.opd_policy.l_extent.end); + ldlm_resource_iterate(exp->exp_obd->obd_namespace, &res_id, + ost_prolong_locks_iter, &opd); +} + +static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct ptlrpc_bulk_desc *desc; - void *tmp1, *tmp2, *end2; struct niobuf_remote *remote_nb; - struct niobuf_local *local_nb = NULL; + struct niobuf_remote *pp_rnb = NULL; + struct niobuf_local *local_nb; struct obd_ioobj *ioo; - struct ost_body *body; + struct ost_body *body, *repbody; + struct lustre_capa *capa = NULL; struct l_wait_info lwi; - void *desc_priv = NULL; - int rc, cmd, i, j, objcount, niocount, size = sizeof(*body); + struct lustre_handle lockh = { 0 }; + int size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) }; + int comms_error = 0, niocount, npages, nob = 0, rc, i; + int no_reply = 0; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); - tmp1 = lustre_msg_buf(req->rq_reqmsg, 1); - tmp2 = lustre_msg_buf(req->rq_reqmsg, 2); - end2 = (char *)tmp2 + req->rq_reqmsg->buflens[2]; - objcount = req->rq_reqmsg->buflens[1] / sizeof(*ioo); - niocount = req->rq_reqmsg->buflens[2] / sizeof(*remote_nb); - cmd = OBD_BRW_READ; + req->rq_bulk_read = 1; if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_READ_BULK)) - GOTO(out, rc = 0); + GOTO(out, rc = -EIO); - for (i = 0; i < objcount; i++) { - ost_unpack_ioo(&tmp1, &ioo); - if (tmp2 + ioo->ioo_bufcnt > end2) { - LBUG(); - GOTO(out, rc = -EFAULT); - } - for (j = 0; j < ioo->ioo_bufcnt; j++) - ost_unpack_niobuf(&tmp2, &remote_nb); + OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK | OBD_FAIL_ONCE, + (obd_timeout + 1) / 4); + + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) { + CERROR("Missing/short ost_body\n"); + GOTO(out, rc = -EFAULT); } - OBD_ALLOC(local_nb, sizeof(*local_nb) * niocount); - if (local_nb == NULL) - GOTO(out, rc = -ENOMEM); + ioo = lustre_swab_reqbuf(req, REQ_REC_OFF + 1, sizeof(*ioo), + lustre_swab_obd_ioobj); + if (ioo == NULL) { + CERROR("Missing/short ioobj\n"); + GOTO(out, rc = -EFAULT); + } - /* The unpackers move tmp1 and tmp2, so reset them before using */ - ioo = lustre_msg_buf(req->rq_reqmsg, 1); - remote_nb = lustre_msg_buf(req->rq_reqmsg, 2); - req->rq_status = obd_preprw(cmd, conn, objcount, ioo, niocount, - remote_nb, local_nb, &desc_priv); + niocount = ioo->ioo_bufcnt; + if (niocount > PTLRPC_MAX_BRW_PAGES) { + DEBUG_REQ(D_ERROR, req, "bulk has too many pages (%d)", + niocount); + GOTO(out, rc = -EFAULT); + } - if (req->rq_status) - GOTO(out, rc = 0); + remote_nb = lustre_swab_reqbuf(req, REQ_REC_OFF + 2, + niocount * sizeof(*remote_nb), + lustre_swab_niobuf_remote); + if (remote_nb == NULL) { + CERROR("Missing/short niobuf\n"); + GOTO(out, rc = -EFAULT); + } + if (lustre_msg_swabbed(req->rq_reqmsg)) { /* swab remaining niobufs */ + for (i = 1; i < niocount; i++) + lustre_swab_niobuf_remote (&remote_nb[i]); + } + + if (body->oa.o_valid & OBD_MD_FLOSSCAPA) + capa = lustre_unpack_capa(req->rq_reqmsg, REQ_REC_OFF + 3); + + rc = lustre_pack_reply(req, 2, size, NULL); + if (rc) + GOTO(out, rc); + + /* + * Per-thread array of struct niobuf_{local,remote}'s was allocated by + * ost_thread_init(). + */ + local_nb = ost_tls(req)->local; + pp_rnb = ost_tls(req)->remote; + + /* FIXME all niobuf splitting should be done in obdfilter if needed */ + /* CAVEAT EMPTOR this sets ioo->ioo_bufcnt to # pages */ + npages = get_per_page_niobufs(ioo, 1, remote_nb, niocount, &pp_rnb); + if (npages < 0) + GOTO(out, rc = npages); - desc = ptlrpc_prep_bulk(req->rq_connection); + LASSERT(npages <= OST_THREAD_POOL_SIZE); + + ost_nio_pages_get(req, local_nb, npages); + + desc = ptlrpc_prep_bulk_exp(req, npages, + BULK_PUT_SOURCE, OST_BULK_PORTAL); if (desc == NULL) - GOTO(out_local, rc = -ENOMEM); - desc->bd_portal = OST_BULK_PORTAL; + GOTO(out, rc = -ENOMEM); - for (i = 0; i < niocount; i++) { - struct ptlrpc_bulk_page *bulk = ptlrpc_prep_bulk_page(desc); + rc = ost_brw_lock_get(LCK_PR, req->rq_export, ioo, pp_rnb, &lockh); + if (rc != 0) + GOTO(out_bulk, rc); - if (bulk == NULL) - GOTO(out_bulk, rc = -ENOMEM); - bulk->bp_xid = remote_nb[i].xid; - bulk->bp_buf = local_nb[i].addr; - bulk->bp_buflen = remote_nb[i].len; + /* + * If getting the lock took more time than + * client was willing to wait, drop it. b=11330 + */ + if (cfs_time_current_sec() > req->rq_arrival_time.tv_sec + obd_timeout || + OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) { + no_reply = 1; + CERROR("Dropping timed-out read from %s because locking" + "object "LPX64" took %ld seconds.\n", + libcfs_id2str(req->rq_peer), ioo->ioo_id, + cfs_time_current_sec() - req->rq_arrival_time.tv_sec); + goto out_lock; } - rc = ptlrpc_send_bulk(desc); - if (rc) - GOTO(out_bulk, rc); + rc = obd_preprw(OBD_BRW_READ, req->rq_export, &body->oa, 1, + ioo, npages, pp_rnb, local_nb, oti, capa); + if (rc != 0) + GOTO(out_lock, rc); - lwi = LWI_TIMEOUT(obd_timeout * HZ, ost_bulk_timeout, desc); - rc = l_wait_event(desc->bd_waitq, desc->bd_flags &PTL_BULK_FL_SENT, &lwi); - if (rc) { - LASSERT(rc == -ETIMEDOUT); - GOTO(out_bulk, rc); + ost_prolong_locks(req->rq_export, ioo, pp_rnb, LCK_PW | LCK_PR); + + nob = 0; + for (i = 0; i < npages; i++) { + int page_rc = local_nb[i].rc; + + if (page_rc < 0) { /* error */ + rc = page_rc; + break; + } + + LASSERTF(page_rc <= pp_rnb[i].len, "page_rc (%d) > " + "pp_rnb[%d].len (%d)\n", page_rc, i, pp_rnb[i].len); + nob += page_rc; + if (page_rc != 0) { /* some data! */ + LASSERT (local_nb[i].page != NULL); + ptlrpc_prep_bulk_page(desc, local_nb[i].page, + pp_rnb[i].offset & ~CFS_PAGE_MASK, + page_rc); + } + + if (page_rc != pp_rnb[i].len) { /* short read */ + /* All subsequent pages should be 0 */ + while(++i < npages) + LASSERT(local_nb[i].rc == 0); + break; + } } - req->rq_status = obd_commitrw(cmd, conn, objcount, ioo, niocount, - local_nb, desc_priv); + if (unlikely(body->oa.o_valid & OBD_MD_FLCKSUM)) { + body->oa.o_cksum = ost_checksum_bulk(desc); + body->oa.o_valid = OBD_MD_FLCKSUM; + CDEBUG(D_PAGE,"checksum at read origin: %x\n",body->oa.o_cksum); + } else { + body->oa.o_valid = 0; + } + /* We're finishing using body->oa as an input variable */ + + /* Check if client was evicted while we were doing i/o before touching + network */ + if (rc == 0) { + if (desc->bd_export->exp_failed) + rc = -ENOTCONN; + else { + sptlrpc_svc_wrap_bulk(req, desc); + + rc = ptlrpc_start_bulk_transfer(desc); + } + + if (rc == 0) { + lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ, + ost_bulk_timeout, desc); + rc = l_wait_event(desc->bd_waitq, + !ptlrpc_bulk_active(desc) || + desc->bd_export->exp_failed, &lwi); + LASSERT(rc == 0 || rc == -ETIMEDOUT); + if (rc == -ETIMEDOUT) { + DEBUG_REQ(D_ERROR, req, "timeout on bulk PUT"); + ptlrpc_abort_bulk(desc); + } else if (desc->bd_export->exp_failed) { + DEBUG_REQ(D_ERROR, req, "Eviction on bulk PUT"); + rc = -ENOTCONN; + ptlrpc_abort_bulk(desc); + } else if (!desc->bd_success || + desc->bd_nob_transferred != desc->bd_nob) { + DEBUG_REQ(D_ERROR, req, "%s bulk PUT %d(%d)", + desc->bd_success ? + "truncated" : "network error on", + desc->bd_nob_transferred, + desc->bd_nob); + /* XXX should this be a different errno? */ + rc = -ETIMEDOUT; + } + } else { + DEBUG_REQ(D_ERROR, req, "bulk PUT failed: rc %d", rc); + } + comms_error = rc != 0; + } + + /* Must commit after prep above in all cases */ + rc = obd_commitrw(OBD_BRW_READ, req->rq_export, &body->oa, 1, + ioo, npages, local_nb, oti, rc); - rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg); + ost_nio_pages_put(req, local_nb, npages); + + if (rc == 0) { + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); + memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa)); + } +out_lock: + ost_brw_lock_put(LCK_PR, ioo, pp_rnb, &lockh); out_bulk: ptlrpc_free_bulk(desc); -out_local: - OBD_FREE(local_nb, sizeof(*local_nb) * niocount); + if (no_reply) + RETURN(rc); out: - if (rc) - ptlrpc_error(req->rq_svc, req); - else - ptlrpc_reply(req->rq_svc, req); + LASSERT(rc <= 0); + if (rc == 0) { + req->rq_status = nob; + target_committed_to_req(req); + ptlrpc_reply(req); + } else if (!comms_error) { + /* Only reply if there was no comms problem with bulk */ + target_committed_to_req(req); + req->rq_status = rc; + ptlrpc_error(req); + } else { + if (req->rq_reply_state != NULL) { + /* reply out callback would free */ + ptlrpc_rs_decref(req->rq_reply_state); + req->rq_reply_state = NULL; + } + CWARN("%s: ignoring bulk IO comm error with %s@%s id %s - " + "client will retry\n", + req->rq_export->exp_obd->obd_name, + req->rq_export->exp_client_uuid.uuid, + req->rq_export->exp_connection->c_remote_uuid.uuid, + libcfs_id2str(req->rq_peer)); + } + RETURN(rc); } -static int ost_brw_write(struct ptlrpc_request *req) +static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) { - struct lustre_handle *conn = (struct lustre_handle *)req->rq_reqmsg; struct ptlrpc_bulk_desc *desc; - struct niobuf_remote *remote_nb; - struct niobuf_local *local_nb, *lnb; - struct obd_ioobj *ioo; - struct ost_body *body; - int cmd, rc, i, j, objcount, niocount, size[2] = {sizeof(*body)}; - void *tmp1, *tmp2, *end2; - void *desc_priv = NULL; - int reply_sent = 0; - struct ptlrpc_service *srv; - struct l_wait_info lwi; - __u32 xid; + struct niobuf_remote *remote_nb; + struct niobuf_remote *pp_rnb; + struct niobuf_local *local_nb; + struct obd_ioobj *ioo; + struct ost_body *body, *repbody; + struct l_wait_info lwi; + struct lustre_handle lockh = {0}; + struct lustre_capa *capa = NULL; + __u32 *rcs; + int size[3] = { sizeof(struct ptlrpc_body), sizeof(*body) }; + int objcount, niocount, npages, comms_error = 0; + int rc, swab, i, j; + obd_count client_cksum, server_cksum = 0; + int no_reply = 0; ENTRY; - body = lustre_msg_buf(req->rq_reqmsg, 0); - tmp1 = lustre_msg_buf(req->rq_reqmsg, 1); - tmp2 = lustre_msg_buf(req->rq_reqmsg, 2); - end2 = (char *)tmp2 + req->rq_reqmsg->buflens[2]; - objcount = req->rq_reqmsg->buflens[1] / sizeof(*ioo); - niocount = req->rq_reqmsg->buflens[2] / sizeof(*remote_nb); - cmd = OBD_BRW_WRITE; - - for (i = 0; i < objcount; i++) { - ost_unpack_ioo((void *)&tmp1, &ioo); - if (tmp2 + ioo->ioo_bufcnt > end2) { - rc = -EFAULT; - break; + req->rq_bulk_write = 1; + + if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK)) + GOTO(out, rc = -EIO); + + /* pause before transaction has been started */ + OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK | OBD_FAIL_ONCE, + (obd_timeout + 1) / 4); + + swab = lustre_msg_swabbed(req->rq_reqmsg); + body = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*body), + lustre_swab_ost_body); + if (body == NULL) { + CERROR("Missing/short ost_body\n"); + GOTO(out, rc = -EFAULT); + } + + LASSERT_REQSWAB(req, REQ_REC_OFF + 1); + objcount = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1) / + sizeof(*ioo); + if (objcount == 0) { + CERROR("Missing/short ioobj\n"); + GOTO(out, rc = -EFAULT); + } + if (objcount > 1) { + CERROR("too many ioobjs (%d)\n", objcount); + GOTO(out, rc = -EFAULT); + } + + ioo = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, + objcount * sizeof(*ioo)); + LASSERT (ioo != NULL); + for (niocount = i = 0; i < objcount; i++) { + if (swab) + lustre_swab_obd_ioobj(&ioo[i]); + if (ioo[i].ioo_bufcnt == 0) { + CERROR("ioo[%d] has zero bufcnt\n", i); + GOTO(out, rc = -EFAULT); } - for (j = 0; j < ioo->ioo_bufcnt; j++) - ost_unpack_niobuf((void *)&tmp2, &remote_nb); + niocount += ioo[i].ioo_bufcnt; } - size[1] = niocount * sizeof(*remote_nb); - rc = lustre_pack_msg(2, size, NULL, &req->rq_replen, &req->rq_repmsg); - if (rc) + if (niocount > PTLRPC_MAX_BRW_PAGES) { + DEBUG_REQ(D_ERROR, req, "bulk has too many pages (%d)", + niocount); + GOTO(out, rc = -EFAULT); + } + + remote_nb = lustre_swab_reqbuf(req, REQ_REC_OFF + 2, + niocount * sizeof(*remote_nb), + lustre_swab_niobuf_remote); + if (remote_nb == NULL) { + CERROR("Missing/short niobuf\n"); + GOTO(out, rc = -EFAULT); + } + if (swab) { /* swab the remaining niobufs */ + for (i = 1; i < niocount; i++) + lustre_swab_niobuf_remote (&remote_nb[i]); + } + + if (body->oa.o_valid & OBD_MD_FLOSSCAPA) + capa = lustre_unpack_capa(req->rq_reqmsg, REQ_REC_OFF + 3); + + size[REPLY_REC_OFF + 1] = niocount * sizeof(*rcs); + rc = lustre_pack_reply(req, 3, size, NULL); + if (rc != 0) GOTO(out, rc); - remote_nb = lustre_msg_buf(req->rq_repmsg, 1); + rcs = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF + 1, + niocount * sizeof(*rcs)); - OBD_ALLOC(local_nb, niocount * sizeof(*local_nb)); - if (local_nb == NULL) - GOTO(out, rc = -ENOMEM); + /* + * Per-thread array of struct niobuf_{local,remote}'s was allocated by + * ost_thread_init(). + */ + local_nb = ost_tls(req)->local; + pp_rnb = ost_tls(req)->remote; - /* The unpackers move tmp1 and tmp2, so reset them before using */ - tmp1 = lustre_msg_buf(req->rq_reqmsg, 1); - tmp2 = lustre_msg_buf(req->rq_reqmsg, 2); - req->rq_status = obd_preprw(cmd, conn, objcount, tmp1, niocount, tmp2, - local_nb, &desc_priv); - if (req->rq_status) - GOTO(out_free, rc = 0); /* XXX is this correct? */ + /* FIXME all niobuf splitting should be done in obdfilter if needed */ + /* CAVEAT EMPTOR this sets ioo->ioo_bufcnt to # pages */ + npages = get_per_page_niobufs(ioo, objcount,remote_nb,niocount,&pp_rnb); + if (npages < 0) + GOTO(out, rc = npages); - if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK)) - GOTO(fail_preprw, rc = 0); + LASSERT(npages <= OST_THREAD_POOL_SIZE); + + ost_nio_pages_get(req, local_nb, npages); - desc = ptlrpc_prep_bulk(req->rq_connection); + desc = ptlrpc_prep_bulk_exp(req, npages, + BULK_GET_SINK, OST_BULK_PORTAL); if (desc == NULL) - GOTO(fail_preprw, rc = -ENOMEM); - desc->bd_cb = NULL; - desc->bd_portal = OSC_BULK_PORTAL; - desc->bd_desc_private = desc_priv; - memcpy(&(desc->bd_conn), &conn, sizeof(conn)); + GOTO(out, rc = -ENOMEM); - srv = req->rq_obd->u.ost.ost_service; - spin_lock(&srv->srv_lock); - xid = srv->srv_xid++; /* single xid for all pages */ - spin_unlock(&srv->srv_lock); + rc = ost_brw_lock_get(LCK_PW, req->rq_export, ioo, pp_rnb, &lockh); + if (rc != 0) + GOTO(out_bulk, rc); + + /* + * If getting the lock took more time than + * client was willing to wait, drop it. b=11330 + */ + if (cfs_time_current_sec() > req->rq_arrival_time.tv_sec + obd_timeout || + OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) { + no_reply = 1; + CERROR("Dropping timed-out write from %s because locking" + "object "LPX64" took %ld seconds.\n", + libcfs_id2str(req->rq_peer), ioo->ioo_id, + cfs_time_current_sec() - req->rq_arrival_time.tv_sec); + goto out_lock; + } - for (i = 0, lnb = local_nb; i < niocount; i++, lnb++) { - struct ptlrpc_bulk_page *bulk; + ost_prolong_locks(req->rq_export, ioo, pp_rnb, LCK_PW); + + /* obd_preprw clobbers oa->valid, so save what we need */ + client_cksum = body->oa.o_valid & OBD_MD_FLCKSUM ? body->oa.o_cksum : 0; + + /* Because we already sync grant info with client when reconnect, + * grant info will be cleared for resent req, then fed_grant and + * total_grant will not be modified in following preprw_write */ + if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) { + DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info\n"); + body->oa.o_valid &= ~OBD_MD_FLGRANT; + } - bulk = ptlrpc_prep_bulk_page(desc); - if (bulk == NULL) - GOTO(fail_bulk, rc = -ENOMEM); + rc = obd_preprw(OBD_BRW_WRITE, req->rq_export, &body->oa, objcount, + ioo, npages, pp_rnb, local_nb, oti, capa); + if (rc != 0) + GOTO(out_lock, rc); - bulk->bp_xid = xid; /* single xid for all pages */ + /* NB Having prepped, we must commit... */ - bulk->bp_buf = lnb->addr; - bulk->bp_page = lnb->page; - bulk->bp_flags = lnb->flags; - bulk->bp_dentry = lnb->dentry; - bulk->bp_buflen = lnb->len; - bulk->bp_cb = NULL; + for (i = 0; i < npages; i++) + ptlrpc_prep_bulk_page(desc, local_nb[i].page, + pp_rnb[i].offset & ~CFS_PAGE_MASK, + pp_rnb[i].len); - /* this advances remote_nb */ - ost_pack_niobuf((void **)&remote_nb, lnb->offset, lnb->len, 0, - bulk->bp_xid); + /* Check if client was evicted while we were doing i/o before touching + network */ + if (desc->bd_export->exp_failed) + rc = -ENOTCONN; + else + rc = ptlrpc_start_bulk_transfer (desc); + if (rc == 0) { + lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 2, HZ, + ost_bulk_timeout, desc); + rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) || + desc->bd_export->exp_failed, &lwi); + LASSERT(rc == 0 || rc == -ETIMEDOUT); + if (rc == -ETIMEDOUT) { + DEBUG_REQ(D_ERROR, req, "timeout on bulk GET"); + ptlrpc_abort_bulk(desc); + } else if (desc->bd_export->exp_failed) { + DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET"); + rc = -ENOTCONN; + ptlrpc_abort_bulk(desc); + } else if (!desc->bd_success || + desc->bd_nob_transferred != desc->bd_nob) { + DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)", + desc->bd_success ? + "truncated" : "network error on", + desc->bd_nob_transferred, desc->bd_nob); + /* XXX should this be a different errno? */ + rc = -ETIMEDOUT; + } + } else { + DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc); + } + comms_error = rc != 0; + + repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, + sizeof(*repbody)); + memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa)); + + if (unlikely(client_cksum != 0 && rc == 0)) { + static int cksum_counter; + server_cksum = ost_checksum_bulk(desc); + repbody->oa.o_valid |= OBD_MD_FLCKSUM; + repbody->oa.o_cksum = server_cksum; + cksum_counter++; + if (unlikely(client_cksum != server_cksum)) { + CERROR("client csum %x, server csum %x\n", + client_cksum, server_cksum); + cksum_counter = 0; + } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){ + CDEBUG(D_INFO, "Checksum %u from %s OK: %x\n", + cksum_counter, libcfs_id2str(req->rq_peer), + server_cksum); + } } - rc = ptlrpc_register_bulk(desc); - if (rc) - GOTO(fail_bulk, rc); + sptlrpc_svc_unwrap_bulk(req, desc); - reply_sent = 1; - ptlrpc_reply(req->rq_svc, req); + /* Must commit after prep above in all cases */ + rc = obd_commitrw(OBD_BRW_WRITE, req->rq_export, &repbody->oa, + objcount, ioo, npages, local_nb, oti, rc); - lwi = LWI_TIMEOUT(obd_timeout * HZ, ost_bulk_timeout, desc); - rc = l_wait_event(desc->bd_waitq, desc->bd_flags & PTL_BULK_FL_RCVD, - &lwi); - if (rc) { - if (rc != -ETIMEDOUT) - LBUG(); - GOTO(fail_bulk, rc); + if (unlikely(client_cksum != server_cksum && rc == 0)) { + int new_cksum = ost_checksum_bulk(desc); + char *msg; + char *via; + char *router; + + if (new_cksum == server_cksum) + msg = "changed in transit before arrival at OST"; + else if (new_cksum == client_cksum) + msg = "initial checksum before message complete"; + else + msg = "changed in transit AND after initial checksum"; + + if (req->rq_peer.nid == desc->bd_sender) { + via = router = ""; + } else { + via = " via "; + router = libcfs_nid2str(desc->bd_sender); + } + + LCONSOLE_ERROR_MSG(0x168, "%s: BAD WRITE CHECKSUM: %s from " + "%s%s%s inum "LPU64"/"LPU64" object " + LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n", + req->rq_export->exp_obd->obd_name, msg, + libcfs_id2str(req->rq_peer), + via, router, + body->oa.o_valid & OBD_MD_FLFID ? + body->oa.o_fid : (__u64)0, + body->oa.o_valid & OBD_MD_FLFID ? + body->oa.o_generation :(__u64)0, + body->oa.o_id, + body->oa.o_valid & OBD_MD_FLGROUP ? + body->oa.o_gr : (__u64)0, + pp_rnb[0].offset, + pp_rnb[npages-1].offset+pp_rnb[npages-1].len + - 1 ); + CERROR("client csum %x, original server csum %x, " + "server csum now %x\n", + client_cksum, server_cksum, new_cksum); + } + + ost_nio_pages_put(req, local_nb, npages); + + if (rc == 0) { + /* set per-requested niobuf return codes */ + for (i = j = 0; i < niocount; i++) { + int nob = remote_nb[i].len; + + rcs[i] = 0; + do { + LASSERT(j < npages); + if (local_nb[j].rc < 0) + rcs[i] = local_nb[j].rc; + nob -= pp_rnb[j].len; + j++; + } while (nob > 0); + LASSERT(nob == 0); + } + LASSERT(j == npages); } - rc = obd_commitrw(cmd, conn, objcount, tmp1, niocount, local_nb, - desc->bd_desc_private); +out_lock: + ost_brw_lock_put(LCK_PW, ioo, pp_rnb, &lockh); +out_bulk: ptlrpc_free_bulk(desc); - EXIT; -out_free: - OBD_FREE(local_nb, niocount * sizeof(*local_nb)); + if (no_reply) + RETURN(rc); out: - if (!reply_sent) { - if (rc) - ptlrpc_error(req->rq_svc, req); - else - ptlrpc_reply(req->rq_svc, req); + if (rc == 0) { + oti_to_request(oti, req); + target_committed_to_req(req); + rc = ptlrpc_reply(req); + } else if (!comms_error) { + /* Only reply if there was no comms problem with bulk */ + target_committed_to_req(req); + req->rq_status = rc; + ptlrpc_error(req); + } else { + if (req->rq_reply_state != NULL) { + /* reply out callback would free */ + ptlrpc_rs_decref(req->rq_reply_state); + req->rq_reply_state = NULL; + } + CWARN("%s: ignoring bulk IO comm error with %s@%s id %s - " + "client will retry\n", + req->rq_export->exp_obd->obd_name, + req->rq_export->exp_client_uuid.uuid, + req->rq_export->exp_connection->c_remote_uuid.uuid, + libcfs_id2str(req->rq_peer)); } - return rc; + RETURN(rc); +} -fail_bulk: - ptlrpc_free_bulk(desc); -fail_preprw: - /* FIXME: how do we undo the preprw? */ - goto out_free; +static int ost_set_info(struct obd_export *exp, struct ptlrpc_request *req) +{ + char *key, *val = NULL; + int keylen, vallen, rc = 0; + ENTRY; + + key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1); + if (key == NULL) { + DEBUG_REQ(D_HA, req, "no set_info key"); + RETURN(-EFAULT); + } + keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF); + + rc = lustre_pack_reply(req, 1, NULL, NULL); + if (rc) + RETURN(rc); + + vallen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1); + if (vallen) + val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, 0); + + if (KEY_IS("evict_by_nid")) { + if (val && vallen) + obd_export_evict_by_nid(exp->exp_obd, val); + + GOTO(out, rc = 0); + } + + rc = obd_set_info_async(exp, keylen, key, vallen, val, NULL); +out: + lustre_msg_set_status(req->rq_repmsg, 0); + RETURN(rc); } -static int ost_handle(struct ptlrpc_request *req) +static int ost_get_info(struct obd_export *exp, struct ptlrpc_request *req) { - int rc; + char *key; + int keylen, rc = 0; + int size[2] = { sizeof(struct ptlrpc_body), sizeof(obd_id) }; + obd_id *reply; + ENTRY; + + key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1); + if (key == NULL) { + DEBUG_REQ(D_HA, req, "no get_info key"); + RETURN(-EFAULT); + } + keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF); + + if (keylen < strlen("last_id") || memcmp(key, "last_id", 7) != 0) + RETURN(-EPROTO); + + rc = lustre_pack_reply(req, 2, size, NULL); + if (rc) + RETURN(rc); + + reply = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*reply)); + rc = obd_get_info(exp, keylen, key, size, reply); + lustre_msg_set_status(req->rq_repmsg, 0); + RETURN(rc); +} + +static int ost_handle_quotactl(struct ptlrpc_request *req) +{ + struct obd_quotactl *oqctl, *repoqc; + int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) }; ENTRY; - rc = lustre_unpack_msg(req->rq_reqmsg, req->rq_reqlen); - if (rc || OBD_FAIL_CHECK(OBD_FAIL_OST_HANDLE_UNPACK)) { - CERROR("lustre_ost: Invalid request\n"); + oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl), + lustre_swab_obd_quotactl); + if (oqctl == NULL) + GOTO(out, rc = -EPROTO); + + rc = lustre_pack_reply(req, 2, size, NULL); + if (rc) GOTO(out, rc); + + repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc)); + + req->rq_status = obd_quotactl(req->rq_export, oqctl); + *repoqc = *oqctl; +out: + RETURN(rc); +} + +static int ost_handle_quotacheck(struct ptlrpc_request *req) +{ + struct obd_quotactl *oqctl; + int rc; + ENTRY; + + oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl), + lustre_swab_obd_quotactl); + if (oqctl == NULL) + RETURN(-EPROTO); + + rc = lustre_pack_reply(req, 1, NULL, NULL); + if (rc) { + CERROR("ost: out of memory while packing quotacheck reply\n"); + RETURN(-ENOMEM); } - if (req->rq_reqmsg->opc != OST_CONNECT && - req->rq_export == NULL) { - CERROR("lustre_ost: operation %d on unconnected OST\n", - req->rq_reqmsg->opc); - GOTO(out, rc = -ENOTCONN); + req->rq_status = obd_quotacheck(req->rq_export, oqctl); + RETURN(0); +} + +static int ost_llog_handle_connect(struct obd_export *exp, + struct ptlrpc_request *req) +{ + struct llogd_conn_body *body; + int rc; + ENTRY; + + body = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*body)); + rc = obd_llog_connect(exp, body); + RETURN(rc); +} + + +static int ost_filter_recovery_request(struct ptlrpc_request *req, + struct obd_device *obd, int *process) +{ + switch (lustre_msg_get_opc(req->rq_reqmsg)) { + case OST_CONNECT: /* This will never get here, but for completeness. */ + case OST_DISCONNECT: + *process = 1; + RETURN(0); + + case OBD_PING: + case OST_CREATE: + case OST_DESTROY: + case OST_PUNCH: + case OST_SETATTR: + case OST_SYNC: + case OST_WRITE: + case OBD_LOG_CANCEL: + case LDLM_ENQUEUE: + *process = target_queue_recovery_request(req, obd); + RETURN(0); + + default: + DEBUG_REQ(D_ERROR, req, "not permitted during recovery"); + *process = -EAGAIN; + RETURN(0); } +} - if (strcmp(req->rq_obd->obd_type->typ_name, "ost") != 0) - GOTO(out, rc = -EINVAL); +int ost_msg_check_version(struct lustre_msg *msg) +{ + int rc; - switch (req->rq_reqmsg->opc) { + switch(lustre_msg_get_opc(msg)) { case OST_CONNECT: + case OST_DISCONNECT: + case OBD_PING: + case SEC_CTX_INIT: + case SEC_CTX_INIT_CONT: + case SEC_CTX_FINI: + rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_OBD_VERSION); + break; + case OST_CREATE: + case OST_DESTROY: + case OST_GETATTR: + case OST_SETATTR: + case OST_WRITE: + case OST_READ: + case OST_PUNCH: + case OST_STATFS: + case OST_SYNC: + case OST_SET_INFO: + case OST_GET_INFO: + case OST_QUOTACHECK: + case OST_QUOTACTL: + rc = lustre_msg_check_version(msg, LUSTRE_OST_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_OST_VERSION); + break; + case LDLM_ENQUEUE: + case LDLM_CONVERT: + case LDLM_CANCEL: + case LDLM_BL_CALLBACK: + case LDLM_CP_CALLBACK: + rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_DLM_VERSION); + break; + case LLOG_ORIGIN_CONNECT: + case OBD_LOG_CANCEL: + rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_LOG_VERSION); + break; + default: + CERROR("Unexpected opcode %d\n", lustre_msg_get_opc(msg)); + rc = -ENOTSUPP; + } + return rc; +} + +int ost_handle(struct ptlrpc_request *req) +{ + struct obd_trans_info trans_info = { 0, }; + struct obd_trans_info *oti = &trans_info; + int should_process, fail = OBD_FAIL_OST_ALL_REPLY_NET, rc = 0; + struct obd_device *obd = NULL; + ENTRY; + + LASSERT(current->journal_info == NULL); + + /* primordial rpcs don't affect server recovery */ + switch (lustre_msg_get_opc(req->rq_reqmsg)) { + case SEC_CTX_INIT: + case SEC_CTX_INIT_CONT: + case SEC_CTX_FINI: + GOTO(out, rc = 0); + } + + /* XXX identical to MDS */ + if (lustre_msg_get_opc(req->rq_reqmsg) != OST_CONNECT) { + int recovering; + + if (req->rq_export == NULL) { + CDEBUG(D_HA,"operation %d on unconnected OST from %s\n", + lustre_msg_get_opc(req->rq_reqmsg), + libcfs_id2str(req->rq_peer)); + req->rq_status = -ENOTCONN; + GOTO(out, rc = -ENOTCONN); + } + + obd = req->rq_export->exp_obd; + + /* Check for aborted recovery. */ + spin_lock_bh(&obd->obd_processing_task_lock); + recovering = obd->obd_recovering; + spin_unlock_bh(&obd->obd_processing_task_lock); + if (recovering) { + rc = ost_filter_recovery_request(req, obd, + &should_process); + if (rc || !should_process) + RETURN(rc); + else if (should_process < 0) { + req->rq_status = should_process; + rc = ptlrpc_error(req); + RETURN(rc); + } + } + } + + oti_init(oti, req); + + rc = ost_msg_check_version(req->rq_reqmsg); + if (rc) + RETURN(rc); + + switch (lustre_msg_get_opc(req->rq_reqmsg)) { + case OST_CONNECT: { CDEBUG(D_INODE, "connect\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_CONNECT_NET, 0); rc = target_handle_connect(req); + if (!rc) + obd = req->rq_export->exp_obd; break; + } case OST_DISCONNECT: CDEBUG(D_INODE, "disconnect\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_DISCONNECT_NET, 0); @@ -459,211 +1483,448 @@ static int ost_handle(struct ptlrpc_request *req) case OST_CREATE: CDEBUG(D_INODE, "create\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_CREATE_NET, 0); - rc = ost_create(req); + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_ENOSPC)) + GOTO(out, rc = -ENOSPC); + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_EROFS)) + GOTO(out, rc = -EROFS); + rc = ost_create(req->rq_export, req, oti); break; case OST_DESTROY: CDEBUG(D_INODE, "destroy\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_DESTROY_NET, 0); - rc = ost_destroy(req); + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_EROFS)) + GOTO(out, rc = -EROFS); + rc = ost_destroy(req->rq_export, req, oti); break; case OST_GETATTR: CDEBUG(D_INODE, "getattr\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_GETATTR_NET, 0); - rc = ost_getattr(req); + rc = ost_getattr(req->rq_export, req); break; case OST_SETATTR: CDEBUG(D_INODE, "setattr\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_SETATTR_NET, 0); - rc = ost_setattr(req); - break; - case OST_OPEN: - CDEBUG(D_INODE, "open\n"); - OBD_FAIL_RETURN(OBD_FAIL_OST_OPEN_NET, 0); - rc = ost_open(req); - break; - case OST_CLOSE: - CDEBUG(D_INODE, "close\n"); - OBD_FAIL_RETURN(OBD_FAIL_OST_CLOSE_NET, 0); - rc = ost_close(req); + rc = ost_setattr(req->rq_export, req, oti); break; case OST_WRITE: CDEBUG(D_INODE, "write\n"); + /* req->rq_request_portal would be nice, if it was set */ + if (req->rq_rqbd->rqbd_service->srv_req_portal !=OST_IO_PORTAL){ + CERROR("%s: deny write request from %s to portal %u\n", + req->rq_export->exp_obd->obd_name, + obd_export_nid2str(req->rq_export), + req->rq_rqbd->rqbd_service->srv_req_portal); + GOTO(out, rc = -EPROTO); + } OBD_FAIL_RETURN(OBD_FAIL_OST_BRW_NET, 0); - rc = ost_brw_write(req); - /* ost_brw sends its own replies */ + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_ENOSPC)) + GOTO(out, rc = -ENOSPC); + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_EROFS)) + GOTO(out, rc = -EROFS); + rc = ost_brw_write(req, oti); + LASSERT(current->journal_info == NULL); + /* ost_brw_write sends its own replies */ RETURN(rc); case OST_READ: CDEBUG(D_INODE, "read\n"); + /* req->rq_request_portal would be nice, if it was set */ + if (req->rq_rqbd->rqbd_service->srv_req_portal !=OST_IO_PORTAL){ + CERROR("%s: deny read request from %s to portal %u\n", + req->rq_export->exp_obd->obd_name, + obd_export_nid2str(req->rq_export), + req->rq_rqbd->rqbd_service->srv_req_portal); + GOTO(out, rc = -EPROTO); + } OBD_FAIL_RETURN(OBD_FAIL_OST_BRW_NET, 0); - rc = ost_brw_read(req); - /* ost_brw sends its own replies */ + rc = ost_brw_read(req, oti); + LASSERT(current->journal_info == NULL); + /* ost_brw_read sends its own replies */ RETURN(rc); case OST_PUNCH: CDEBUG(D_INODE, "punch\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_PUNCH_NET, 0); - rc = ost_punch(req); + if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_OST_EROFS)) + GOTO(out, rc = -EROFS); + rc = ost_punch(req->rq_export, req, oti); break; case OST_STATFS: CDEBUG(D_INODE, "statfs\n"); OBD_FAIL_RETURN(OBD_FAIL_OST_STATFS_NET, 0); rc = ost_statfs(req); break; + case OST_SYNC: + CDEBUG(D_INODE, "sync\n"); + OBD_FAIL_RETURN(OBD_FAIL_OST_SYNC_NET, 0); + rc = ost_sync(req->rq_export, req); + break; + case OST_SET_INFO: + DEBUG_REQ(D_INODE, req, "set_info"); + rc = ost_set_info(req->rq_export, req); + break; + case OST_GET_INFO: + DEBUG_REQ(D_INODE, req, "get_info"); + rc = ost_get_info(req->rq_export, req); + break; + case OST_QUOTACHECK: + CDEBUG(D_INODE, "quotacheck\n"); + OBD_FAIL_RETURN(OBD_FAIL_OST_QUOTACHECK_NET, 0); + rc = ost_handle_quotacheck(req); + break; + case OST_QUOTACTL: + CDEBUG(D_INODE, "quotactl\n"); + OBD_FAIL_RETURN(OBD_FAIL_OST_QUOTACTL_NET, 0); + rc = ost_handle_quotactl(req); + break; + case OBD_PING: + DEBUG_REQ(D_INODE, req, "ping"); + rc = target_handle_ping(req); + break; + /* FIXME - just reply status */ + case LLOG_ORIGIN_CONNECT: + DEBUG_REQ(D_INODE, req, "log connect"); + rc = ost_llog_handle_connect(req->rq_export, req); + req->rq_status = rc; + rc = lustre_pack_reply(req, 1, NULL, NULL); + if (rc) + RETURN(rc); + RETURN(ptlrpc_reply(req)); + case OBD_LOG_CANCEL: + CDEBUG(D_INODE, "log cancel\n"); + OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0); + rc = llog_origin_handle_cancel(req); + req->rq_status = rc; + rc = lustre_pack_reply(req, 1, NULL, NULL); + if (rc) + RETURN(rc); + RETURN(ptlrpc_reply(req)); case LDLM_ENQUEUE: CDEBUG(D_INODE, "enqueue\n"); OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0); - rc = ldlm_handle_enqueue(req); - if (rc) - break; - RETURN(0); + rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast, + ldlm_server_blocking_ast, + ldlm_server_glimpse_ast); + fail = OBD_FAIL_OST_LDLM_REPLY_NET; + break; case LDLM_CONVERT: CDEBUG(D_INODE, "convert\n"); OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0); rc = ldlm_handle_convert(req); - if (rc) - break; - RETURN(0); + break; case LDLM_CANCEL: CDEBUG(D_INODE, "cancel\n"); OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0); rc = ldlm_handle_cancel(req); - if (rc) - break; - RETURN(0); + break; case LDLM_BL_CALLBACK: case LDLM_CP_CALLBACK: CDEBUG(D_INODE, "callback\n"); CERROR("callbacks should not happen on OST\n"); - LBUG(); - OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0); - break; + /* fall through */ default: + CERROR("Unexpected opcode %d\n", + lustre_msg_get_opc(req->rq_reqmsg)); req->rq_status = -ENOTSUPP; - rc = ptlrpc_error(req->rq_svc, req); + rc = ptlrpc_error(req); RETURN(rc); } + LASSERT(current->journal_info == NULL); + EXIT; + /* If we're DISCONNECTing, the export_data is already freed */ + if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != OST_DISCONNECT) + target_committed_to_req(req); + out: - //req->rq_status = rc; - if (rc) { - CERROR("ost: processing error (opcode=%d): %d\n", - req->rq_reqmsg->opc, rc); - ptlrpc_error(req->rq_svc, req); - } else { - CDEBUG(D_INODE, "sending reply\n"); - if (req->rq_repmsg == NULL) - CERROR("handler for opcode %d returned rc=0 without " - "creating rq_repmsg; needs to return rc != " - "0!\n", req->rq_reqmsg->opc); - ptlrpc_reply(req->rq_svc, req); - } + if (!rc) + oti_to_request(oti, req); + target_send_reply(req, rc, fail); return 0; } +EXPORT_SYMBOL(ost_handle); +/* + * free per-thread pool created by ost_thread_init(). + */ +static void ost_thread_done(struct ptlrpc_thread *thread) +{ + int i; + struct ost_thread_local_cache *tls; /* TLS stands for Thread-Local + * Storage */ + + ENTRY; -#define OST_NUM_THREADS 6 + LASSERT(thread != NULL); + + /* + * be prepared to handle partially-initialized pools (because this is + * called from ost_thread_init() for cleanup. + */ + tls = thread->t_data; + if (tls != NULL) { + for (i = 0; i < OST_THREAD_POOL_SIZE; ++ i) { + if (tls->page[i] != NULL) + __cfs_free_page(tls->page[i]); + } + OBD_FREE_PTR(tls); + thread->t_data = NULL; + } + EXIT; +} -/* mount the file system (secretly) */ -static int ost_setup(struct obd_device *obddev, obd_count len, void *buf) +/* + * initialize per-thread page pool (bug 5137). + */ +static int ost_thread_init(struct ptlrpc_thread *thread) { - struct obd_ioctl_data* data = buf; - struct ost_obd *ost = &obddev->u.ost; - struct obd_device *tgt; - int err; + int result; int i; + struct ost_thread_local_cache *tls; + ENTRY; - if (data->ioc_inllen1 < 1) { - CERROR("requires a TARGET OBD UUID\n"); - RETURN(-EINVAL); - } - if (data->ioc_inllen1 > 37) { - CERROR("OBD UUID must be less than 38 characters\n"); - RETURN(-EINVAL); + LASSERT(thread != NULL); + LASSERT(thread->t_data == NULL); + LASSERTF(thread->t_id <= OSS_THREADS_MAX, "%u\n", thread->t_id); + + OBD_ALLOC_PTR(tls); + if (tls != NULL) { + result = 0; + thread->t_data = tls; + /* + * populate pool + */ + for (i = 0; i < OST_THREAD_POOL_SIZE; ++ i) { + tls->page[i] = cfs_alloc_page(OST_THREAD_POOL_GFP); + if (tls->page[i] == NULL) { + ost_thread_done(thread); + result = -ENOMEM; + break; + } + } + } else + result = -ENOMEM; + RETURN(result); +} + +#define OST_WATCHDOG_TIMEOUT (obd_timeout * 1000) + +/* Sigh - really, this is an OSS, the _server_, not the _target_ */ +static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg) +{ + struct ost_obd *ost = &obd->u.ost; + struct lprocfs_static_vars lvars; + int oss_min_threads; + int oss_max_threads; + int oss_min_create_threads; + int oss_max_create_threads; + int rc; + ENTRY; + + rc = cleanup_group_info(); + if (rc) + RETURN(rc); + + rc = llog_start_commit_thread(); + if (rc < 0) + RETURN(rc); + + lprocfs_init_vars(ost, &lvars); + lprocfs_obd_setup(obd, lvars.obd_vars); + + sema_init(&ost->ost_health_sem, 1); + + if (oss_num_threads) { + /* If oss_num_threads is set, it is the min and the max. */ + if (oss_num_threads > OSS_THREADS_MAX) + oss_num_threads = OSS_THREADS_MAX; + if (oss_num_threads < OSS_THREADS_MIN) + oss_num_threads = OSS_THREADS_MIN; + oss_max_threads = oss_min_threads = oss_num_threads; + } else { + /* Base min threads on memory and cpus */ + oss_min_threads = smp_num_cpus * num_physpages >> + (27 - CFS_PAGE_SHIFT); + if (oss_min_threads < OSS_THREADS_MIN) + oss_min_threads = OSS_THREADS_MIN; + /* Insure a 4x range for dynamic threads */ + if (oss_min_threads > OSS_THREADS_MAX / 4) + oss_min_threads = OSS_THREADS_MAX / 4; + oss_max_threads = min(OSS_THREADS_MAX, oss_min_threads * 4); } - MOD_INC_USE_COUNT; - tgt = class_uuid2obd(data->ioc_inlbuf1); - if (!tgt || !(tgt->obd_flags & OBD_ATTACHED) || - !(tgt->obd_flags & OBD_SET_UP)) { - CERROR("device not attached or not set up (%d)\n", - data->ioc_dev); - GOTO(error_dec, err = -EINVAL); + ost->ost_service = + ptlrpc_init_svc(OST_NBUFS, OST_BUFSIZE, OST_MAXREQSIZE, + OST_MAXREPSIZE, OST_REQUEST_PORTAL, + OSC_REPLY_PORTAL, + OST_WATCHDOG_TIMEOUT, ost_handle, + LUSTRE_OSS_NAME, obd->obd_proc_entry, + ost_print_req, oss_min_threads, + oss_max_threads, "ll_ost", + LCT_DT_THREAD); + if (ost->ost_service == NULL) { + CERROR("failed to start service\n"); + GOTO(out_lprocfs, rc = -ENOMEM); } - err = obd_connect(&ost->ost_conn, tgt, NULL); - if (err) { - CERROR("fail to connect to device %d\n", data->ioc_dev); - GOTO(error_dec, err = -EINVAL); + rc = ptlrpc_start_threads(obd, ost->ost_service); + if (rc) + GOTO(out_service, rc = -EINVAL); + + if (oss_num_create_threads) { + if (oss_num_create_threads > OSS_MAX_CREATE_THREADS) + oss_num_create_threads = OSS_MAX_CREATE_THREADS; + if (oss_num_create_threads < OSS_DEF_CREATE_THREADS) + oss_num_create_threads = OSS_DEF_CREATE_THREADS; + oss_min_create_threads = oss_max_create_threads = + oss_num_create_threads; + } else { + oss_min_create_threads = OSS_DEF_CREATE_THREADS; + oss_max_create_threads = OSS_MAX_CREATE_THREADS; } - ost->ost_service = ptlrpc_init_svc(1024, 640, OST_REQUEST_PORTAL, - OSC_REPLY_PORTAL, "self", ost_handle, - "ost"); - if (!ost->ost_service) { - CERROR("failed to start service\n"); - GOTO(error_disc, err = -EINVAL); + ost->ost_create_service = + ptlrpc_init_svc(OST_NBUFS, OST_BUFSIZE, OST_MAXREQSIZE, + OST_MAXREPSIZE, OST_CREATE_PORTAL, + OSC_REPLY_PORTAL, + OST_WATCHDOG_TIMEOUT, ost_handle, "ost_create", + obd->obd_proc_entry, ost_print_req, + oss_min_create_threads, + oss_max_create_threads, + "ll_ost_creat", LCT_DT_THREAD); + if (ost->ost_create_service == NULL) { + CERROR("failed to start OST create service\n"); + GOTO(out_service, rc = -ENOMEM); } - for (i = 0; i < OST_NUM_THREADS; i++) { - char name[32]; - sprintf(name, "lustre_ost_%02d", i); - err = ptlrpc_start_thread(obddev, ost->ost_service, name); - if (err) { - CERROR("error starting thread #%d: rc %d\n", i, err); - GOTO(error_disc, err = -EINVAL); - } + rc = ptlrpc_start_threads(obd, ost->ost_create_service); + if (rc) + GOTO(out_create, rc = -EINVAL); + + ost->ost_io_service = + ptlrpc_init_svc(OST_NBUFS, OST_BUFSIZE, OST_MAXREQSIZE, + OST_MAXREPSIZE, OST_IO_PORTAL, + OSC_REPLY_PORTAL, + OST_WATCHDOG_TIMEOUT, ost_handle, "ost_io", + obd->obd_proc_entry, ost_print_req, + oss_min_threads, oss_max_threads, + "ll_ost_io", LCT_DT_THREAD); + if (ost->ost_io_service == NULL) { + CERROR("failed to start OST I/O service\n"); + GOTO(out_create, rc = -ENOMEM); } + ost->ost_io_service->srv_init = ost_thread_init; + ost->ost_io_service->srv_done = ost_thread_done; + ost->ost_io_service->srv_cpu_affinity = 1; + rc = ptlrpc_start_threads(obd, ost->ost_io_service); + if (rc) + GOTO(out_io, rc = -EINVAL); + + ping_evictor_start(); + RETURN(0); -error_disc: - obd_disconnect(&ost->ost_conn); -error_dec: - MOD_DEC_USE_COUNT; - RETURN(err); +out_io: + ptlrpc_unregister_service(ost->ost_io_service); + ost->ost_io_service = NULL; +out_create: + ptlrpc_unregister_service(ost->ost_create_service); + ost->ost_create_service = NULL; +out_service: + ptlrpc_unregister_service(ost->ost_service); + ost->ost_service = NULL; +out_lprocfs: + lprocfs_obd_cleanup(obd); + RETURN(rc); } -static int ost_cleanup(struct obd_device * obddev) +static int ost_cleanup(struct obd_device *obd) { - struct ost_obd *ost = &obddev->u.ost; - int err; - + struct ost_obd *ost = &obd->u.ost; + int err = 0; ENTRY; - if ( !list_empty(&obddev->obd_exports) ) { - CERROR("still has clients!\n"); - RETURN(-EBUSY); + ping_evictor_stop(); + + spin_lock_bh(&obd->obd_processing_task_lock); + if (obd->obd_recovering) { + target_cancel_recovery_timer(obd); + obd->obd_recovering = 0; } + spin_unlock_bh(&obd->obd_processing_task_lock); - ptlrpc_stop_all_threads(ost->ost_service); + down(&ost->ost_health_sem); ptlrpc_unregister_service(ost->ost_service); + ptlrpc_unregister_service(ost->ost_create_service); + ptlrpc_unregister_service(ost->ost_io_service); + ost->ost_service = NULL; + ost->ost_create_service = NULL; + up(&ost->ost_health_sem); - err = obd_disconnect(&ost->ost_conn); - if (err) { - CERROR("lustre ost: fail to disconnect device\n"); - RETURN(-EINVAL); - } + lprocfs_obd_cleanup(obd); - MOD_DEC_USE_COUNT; - RETURN(0); + RETURN(err); +} + +static int ost_health_check(struct obd_device *obd) +{ + struct ost_obd *ost = &obd->u.ost; + int rc = 0; + + down(&ost->ost_health_sem); + rc |= ptlrpc_service_health_check(ost->ost_service); + rc |= ptlrpc_service_health_check(ost->ost_create_service); + rc |= ptlrpc_service_health_check(ost->ost_io_service); + up(&ost->ost_health_sem); + + /* + * health_check to return 0 on healthy + * and 1 on unhealthy. + */ + if( rc != 0) + rc = 1; + + return rc; +} + +struct ost_thread_local_cache *ost_tls(struct ptlrpc_request *r) +{ + return (struct ost_thread_local_cache *)(r->rq_svc_thread->t_data); } /* use obd ops to offer management infrastructure */ static struct obd_ops ost_obd_ops = { - o_setup: ost_setup, - o_cleanup: ost_cleanup, + .o_owner = THIS_MODULE, + .o_setup = ost_setup, + .o_cleanup = ost_cleanup, + .o_health_check = ost_health_check, }; + static int __init ost_init(void) { - class_register_type(&ost_obd_ops, LUSTRE_OST_NAME); - return 0; + struct lprocfs_static_vars lvars; + int rc; + ENTRY; + + lprocfs_init_vars(ost, &lvars); + rc = class_register_type(&ost_obd_ops, NULL, lvars.module_vars, + LUSTRE_OSS_NAME, NULL); + + if (ost_num_threads != 0 && oss_num_threads == 0) { + LCONSOLE_INFO("ost_num_threads module parameter is deprecated, " + "use oss_num_threads instead or unset both for " + "dynamic thread startup\n"); + oss_num_threads = ost_num_threads; + } + + RETURN(rc); } -static void __exit ost_exit(void) +static void /*__exit*/ ost_exit(void) { - class_unregister_type(LUSTRE_OST_NAME); + class_unregister_type(LUSTRE_OSS_NAME); } MODULE_AUTHOR("Cluster File Systems, Inc. ");