4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ost/ost_handler.c
38 * Author: Peter J. Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_OST
44 #include <linux/module.h>
45 #include <obd_cksum.h>
47 #include <lustre_net.h>
48 #include <lustre_dlm.h>
49 #include <lustre_export.h>
50 #include <lustre_debug.h>
51 #include <linux/init.h>
52 #include <lprocfs_status.h>
53 #include <libcfs/list.h>
54 #include "ost_internal.h"
56 static int oss_num_threads;
57 CFS_MODULE_PARM(oss_num_threads, "i", int, 0444,
58 "number of OSS service threads to start");
60 static int ost_num_threads;
61 CFS_MODULE_PARM(ost_num_threads, "i", int, 0444,
62 "number of OST service threads to start (deprecated)");
64 static int oss_num_create_threads;
65 CFS_MODULE_PARM(oss_num_create_threads, "i", int, 0444,
66 "number of OSS create threads to start");
68 static char *oss_cpts;
69 CFS_MODULE_PARM(oss_cpts, "s", charp, 0444,
70 "CPU partitions OSS threads should run on");
72 static char *oss_io_cpts;
73 CFS_MODULE_PARM(oss_io_cpts, "s", charp, 0444,
74 "CPU partitions OSS IO threads should run on");
77 * this page is allocated statically when module is initializing
78 * it is used to simulate data corruptions, see ost_checksum_bulk()
79 * for details. as the original pages provided by the layers below
80 * can be remain in the internal cache, we do not want to modify
83 static struct page *ost_page_to_corrupt = NULL;
86 * Do not return server-side uid/gid to remote client
88 static void ost_drop_id(struct obd_export *exp, struct obdo *oa)
90 if (exp_connect_rmtclient(exp)) {
93 oa->o_valid &= ~(OBD_MD_FLUID | OBD_MD_FLGID);
98 * Validate oa from client.
99 * If the request comes from 2.0 clients, currently only RSVD seq and IDIF
101 * a. for single MDS seq = FID_SEQ_OST_MDT0,
102 * b. for CMD, seq = FID_SEQ_OST_MDT0, FID_SEQ_OST_MDT1 - FID_SEQ_OST_MAX
104 static int ost_validate_obdo(struct obd_export *exp, struct obdo *oa,
105 struct obd_ioobj *ioobj)
107 if (oa != NULL && !(oa->o_valid & OBD_MD_FLGROUP)) {
108 oa->o_seq = FID_SEQ_OST_MDT0;
110 ioobj->ioo_seq = FID_SEQ_OST_MDT0;
111 /* remove fid_seq_is_rsvd() after FID-on-OST allows SEQ > 9 */
112 } else if (oa == NULL || !(fid_seq_is_rsvd(oa->o_seq) ||
113 fid_seq_is_mdt0(oa->o_seq))) {
114 CERROR("%s: client %s sent invalid object "POSTID"\n",
115 exp->exp_obd->obd_name, obd_export_nid2str(exp),
116 oa ? oa->o_id : -1, oa ? oa->o_seq : -1);
119 obdo_from_ostid(oa, &oa->o_oi);
121 ioobj_from_obdo(ioobj, oa);
125 void oti_to_request(struct obd_trans_info *oti, struct ptlrpc_request *req)
127 struct oti_req_ack_lock *ack_lock;
133 if (req->rq_repmsg) {
134 __u64 versions[PTLRPC_NUM_VERSIONS] = { 0 };
135 lustre_msg_set_transno(req->rq_repmsg, oti->oti_transno);
136 versions[0] = oti->oti_pre_version;
137 lustre_msg_set_versions(req->rq_repmsg, versions);
139 req->rq_transno = oti->oti_transno;
141 /* XXX 4 == entries in oti_ack_locks??? */
142 for (ack_lock = oti->oti_ack_locks, i = 0; i < 4; i++, ack_lock++) {
145 /* XXX not even calling target_send_reply in some cases... */
146 ptlrpc_save_lock (req, &ack_lock->lock, ack_lock->mode, 0);
150 static int ost_destroy(struct obd_export *exp, struct ptlrpc_request *req,
151 struct obd_trans_info *oti)
153 struct ost_body *body, *repbody;
154 struct lustre_capa *capa = NULL;
158 /* Get the request body */
159 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
163 if (body->oa.o_id == 0)
166 rc = ost_validate_obdo(exp, &body->oa, NULL);
170 /* If there's a DLM request, cancel the locks mentioned in it*/
171 if (req_capsule_field_present(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT)) {
172 struct ldlm_request *dlm;
174 dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
177 ldlm_request_cancel(req, dlm, 0);
180 /* If there's a capability, get it */
181 if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
182 capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
184 CERROR("Missing capability for OST DESTROY");
189 /* Prepare the reply */
190 rc = req_capsule_server_pack(&req->rq_pill);
194 /* Get the log cancellation cookie */
195 if (body->oa.o_valid & OBD_MD_FLCOOKIE)
196 oti->oti_logcookies = &body->oa.o_lcookie;
198 /* Finish the reply */
199 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
200 memcpy(&repbody->oa, &body->oa, sizeof(body->oa));
202 /* Do the destroy and set the reply status accordingly */
203 req->rq_status = obd_destroy(req->rq_svc_thread->t_env, exp,
204 &repbody->oa, NULL, oti, NULL, capa);
209 * Helper function for getting server side [start, start+count] DLM lock
210 * if asked by client.
212 static int ost_lock_get(struct obd_export *exp, struct obdo *oa,
213 __u64 start, __u64 count, struct lustre_handle *lh,
214 int mode, __u64 flags)
216 struct ldlm_res_id res_id;
217 ldlm_policy_data_t policy;
218 __u64 end = start + count;
222 LASSERT(!lustre_handle_is_used(lh));
223 /* o_id and o_gr are used for localizing resource, if client miss to set
224 * them, do not trigger ASSERTION. */
225 if (unlikely((oa->o_valid & (OBD_MD_FLID | OBD_MD_FLGROUP)) !=
226 (OBD_MD_FLID | OBD_MD_FLGROUP)))
229 if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
230 !(oa->o_flags & OBD_FL_SRVLOCK))
233 osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
234 CDEBUG(D_INODE, "OST-side extent lock.\n");
236 policy.l_extent.start = start & CFS_PAGE_MASK;
238 /* If ->o_blocks is EOF it means "lock till the end of the
239 * file". Otherwise, it's size of a hole being punched (in bytes) */
240 if (count == OBD_OBJECT_EOF || end < start)
241 policy.l_extent.end = OBD_OBJECT_EOF;
243 policy.l_extent.end = end | ~CFS_PAGE_MASK;
245 RETURN(ldlm_cli_enqueue_local(exp->exp_obd->obd_namespace, &res_id,
246 LDLM_EXTENT, &policy, mode, &flags,
247 ldlm_blocking_ast, ldlm_completion_ast,
248 ldlm_glimpse_ast, NULL, 0, NULL, lh));
251 /* Helper function: release lock, if any. */
252 static void ost_lock_put(struct obd_export *exp,
253 struct lustre_handle *lh, int mode)
256 if (lustre_handle_is_used(lh))
257 ldlm_lock_decref(lh, mode);
261 static int ost_getattr(struct obd_export *exp, struct ptlrpc_request *req)
263 struct ost_body *body, *repbody;
264 struct obd_info *oinfo;
265 struct lustre_handle lh = { 0 };
266 struct lustre_capa *capa = NULL;
270 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
274 rc = ost_validate_obdo(exp, &body->oa, NULL);
278 if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
279 capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
281 CERROR("Missing capability for OST GETATTR");
286 rc = req_capsule_server_pack(&req->rq_pill);
290 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
291 repbody->oa = body->oa;
293 rc = ost_lock_get(exp, &repbody->oa, 0, OBD_OBJECT_EOF, &lh, LCK_PR, 0);
297 OBD_ALLOC_PTR(oinfo);
299 GOTO(unlock, rc = -ENOMEM);
300 oinfo->oi_oa = &repbody->oa;
301 oinfo->oi_capa = capa;
303 req->rq_status = obd_getattr(req->rq_svc_thread->t_env, exp, oinfo);
307 ost_drop_id(exp, &repbody->oa);
310 ost_lock_put(exp, &lh, LCK_PR);
314 static int ost_statfs(struct ptlrpc_request *req)
316 struct obd_statfs *osfs;
320 rc = req_capsule_server_pack(&req->rq_pill);
324 osfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
326 req->rq_status = obd_statfs(req->rq_svc_thread->t_env, req->rq_export,
328 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
330 if (req->rq_status != 0)
331 CERROR("ost: statfs failed: rc %d\n", req->rq_status);
333 if (OBD_FAIL_CHECK(OBD_FAIL_OST_STATFS_EINPROGRESS))
334 req->rq_status = -EINPROGRESS;
339 static int ost_create(struct obd_export *exp, struct ptlrpc_request *req,
340 struct obd_trans_info *oti)
342 struct ost_body *body, *repbody;
346 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
350 rc = ost_validate_obdo(req->rq_export, &body->oa, NULL);
354 rc = req_capsule_server_pack(&req->rq_pill);
358 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
359 repbody->oa = body->oa;
360 oti->oti_logcookies = &body->oa.o_lcookie;
362 req->rq_status = obd_create(req->rq_svc_thread->t_env, exp,
363 &repbody->oa, NULL, oti);
364 //obd_log_cancel(conn, NULL, 1, oti->oti_logcookies, 0);
368 static int ost_punch(struct obd_export *exp, struct ptlrpc_request *req,
369 struct obd_trans_info *oti)
371 struct ost_body *body, *repbody;
373 struct lustre_handle lh = {0,};
377 /* check that we do support OBD_CONNECT_TRUNCLOCK. */
378 CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
380 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
384 rc = ost_validate_obdo(exp, &body->oa, NULL);
388 if ((body->oa.o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) !=
389 (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS))
392 rc = req_capsule_server_pack(&req->rq_pill);
396 /* standard truncate optimization: if file body is completely
397 * destroyed, don't send data back to the server. */
398 if (body->oa.o_size == 0)
399 flags |= LDLM_AST_DISCARD_DATA;
401 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
402 repbody->oa = body->oa;
404 rc = ost_lock_get(exp, &repbody->oa, repbody->oa.o_size,
405 repbody->oa.o_blocks, &lh, LCK_PW, flags);
407 struct obd_info *oinfo;
408 struct lustre_capa *capa = NULL;
410 if (repbody->oa.o_valid & OBD_MD_FLFLAGS &&
411 repbody->oa.o_flags == OBD_FL_SRVLOCK)
413 * If OBD_FL_SRVLOCK is the only bit set in
414 * ->o_flags, clear OBD_MD_FLFLAGS to avoid falling
415 * through filter_setattr() to filter_iocontrol().
417 repbody->oa.o_valid &= ~OBD_MD_FLFLAGS;
419 if (repbody->oa.o_valid & OBD_MD_FLOSSCAPA) {
420 capa = req_capsule_client_get(&req->rq_pill,
423 CERROR("Missing capability for OST PUNCH");
424 GOTO(unlock, rc = -EFAULT);
428 OBD_ALLOC_PTR(oinfo);
430 GOTO(unlock, rc = -ENOMEM);
431 oinfo->oi_oa = &repbody->oa;
432 oinfo->oi_policy.l_extent.start = oinfo->oi_oa->o_size;
433 oinfo->oi_policy.l_extent.end = oinfo->oi_oa->o_blocks;
434 oinfo->oi_capa = capa;
435 oinfo->oi_flags = OBD_FL_PUNCH;
437 req->rq_status = obd_punch(req->rq_svc_thread->t_env, exp,
441 ost_lock_put(exp, &lh, LCK_PW);
444 ost_drop_id(exp, &repbody->oa);
448 static int ost_sync(struct obd_export *exp, struct ptlrpc_request *req,
449 struct obd_trans_info *oti)
451 struct ost_body *body, *repbody;
452 struct obd_info *oinfo;
453 struct lustre_capa *capa = NULL;
457 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
461 rc = ost_validate_obdo(exp, &body->oa, NULL);
465 if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
466 capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
468 CERROR("Missing capability for OST SYNC");
473 rc = req_capsule_server_pack(&req->rq_pill);
477 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
478 repbody->oa = body->oa;
480 OBD_ALLOC_PTR(oinfo);
484 oinfo->oi_oa = &repbody->oa;
485 oinfo->oi_capa = capa;
486 oinfo->oi_jobid = oti->oti_jobid;
487 req->rq_status = obd_sync(req->rq_svc_thread->t_env, exp, oinfo,
488 repbody->oa.o_size, repbody->oa.o_blocks,
492 ost_drop_id(exp, &repbody->oa);
496 static int ost_setattr(struct obd_export *exp, struct ptlrpc_request *req,
497 struct obd_trans_info *oti)
499 struct ost_body *body, *repbody;
500 struct obd_info *oinfo;
501 struct lustre_capa *capa = NULL;
505 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
509 rc = ost_validate_obdo(req->rq_export, &body->oa, NULL);
513 rc = req_capsule_server_pack(&req->rq_pill);
517 if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
518 capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
520 CERROR("Missing capability for OST SETATTR");
525 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
526 repbody->oa = body->oa;
528 OBD_ALLOC_PTR(oinfo);
531 oinfo->oi_oa = &repbody->oa;
532 oinfo->oi_capa = capa;
534 req->rq_status = obd_setattr(req->rq_svc_thread->t_env, exp, oinfo,
539 ost_drop_id(exp, &repbody->oa);
543 static __u32 ost_checksum_bulk(struct ptlrpc_bulk_desc *desc, int opc,
544 cksum_type_t cksum_type)
546 struct cfs_crypto_hash_desc *hdesc;
547 unsigned int bufsize;
549 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
552 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
554 CERROR("Unable to initialize checksum hash %s\n",
555 cfs_crypto_hash_name(cfs_alg));
556 return PTR_ERR(hdesc);
558 CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
559 for (i = 0; i < desc->bd_iov_count; i++) {
561 /* corrupt the data before we compute the checksum, to
562 * simulate a client->OST data error */
563 if (i == 0 && opc == OST_WRITE &&
564 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
565 int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
566 int len = desc->bd_iov[i].kiov_len;
567 struct page *np = ost_page_to_corrupt;
568 char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
571 char *ptr2 = kmap(np) + off;
573 memcpy(ptr2, ptr, len);
574 memcpy(ptr2, "bad3", min(4, len));
576 desc->bd_iov[i].kiov_page = np;
578 CERROR("can't alloc page for corruption\n");
581 cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
582 desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
583 desc->bd_iov[i].kiov_len);
585 /* corrupt the data after we compute the checksum, to
586 * simulate an OST->client data error */
587 if (i == 0 && opc == OST_READ &&
588 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
589 int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
590 int len = desc->bd_iov[i].kiov_len;
591 struct page *np = ost_page_to_corrupt;
592 char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
595 char *ptr2 = kmap(np) + off;
597 memcpy(ptr2, ptr, len);
598 memcpy(ptr2, "bad4", min(4, len));
600 desc->bd_iov[i].kiov_page = np;
602 CERROR("can't alloc page for corruption\n");
608 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
610 cfs_crypto_hash_final(hdesc, NULL, NULL);
615 static int ost_brw_lock_get(int mode, struct obd_export *exp,
616 struct obd_ioobj *obj, struct niobuf_remote *nb,
617 struct lustre_handle *lh)
620 int nrbufs = obj->ioo_bufcnt;
621 struct ldlm_res_id res_id;
622 ldlm_policy_data_t policy;
626 osc_build_res_name(obj->ioo_id, obj->ioo_seq, &res_id);
627 LASSERT(mode == LCK_PR || mode == LCK_PW);
628 LASSERT(!lustre_handle_is_used(lh));
630 if (nrbufs == 0 || !(nb[0].flags & OBD_BRW_SRVLOCK))
633 for (i = 1; i < nrbufs; i ++)
634 if ((nb[0].flags & OBD_BRW_SRVLOCK) !=
635 (nb[i].flags & OBD_BRW_SRVLOCK))
638 policy.l_extent.start = nb[0].offset & CFS_PAGE_MASK;
639 policy.l_extent.end = (nb[nrbufs - 1].offset +
640 nb[nrbufs - 1].len - 1) | ~CFS_PAGE_MASK;
642 RETURN(ldlm_cli_enqueue_local(exp->exp_obd->obd_namespace, &res_id,
643 LDLM_EXTENT, &policy, mode, &flags,
644 ldlm_blocking_ast, ldlm_completion_ast,
645 ldlm_glimpse_ast, NULL, 0, NULL, lh));
648 static void ost_brw_lock_put(int mode,
649 struct obd_ioobj *obj, struct niobuf_remote *niob,
650 struct lustre_handle *lh)
653 LASSERT(mode == LCK_PR || mode == LCK_PW);
654 LASSERT((obj->ioo_bufcnt > 0 && (niob[0].flags & OBD_BRW_SRVLOCK)) ==
655 lustre_handle_is_used(lh));
656 if (lustre_handle_is_used(lh))
657 ldlm_lock_decref(lh, mode);
661 /* Allocate thread local buffers if needed */
662 static struct ost_thread_local_cache *ost_tls_get(struct ptlrpc_request *r)
664 struct ost_thread_local_cache *tls =
665 (struct ost_thread_local_cache *)(r->rq_svc_thread->t_data);
667 /* In normal mode of operation an I/O request is serviced only
668 * by ll_ost_io threads each of them has own tls buffers allocated by
669 * ost_io_thread_init().
670 * During recovery, an I/O request may be queued until any of the ost
671 * service threads process it. Not necessary it should be one of
672 * ll_ost_io threads. In that case we dynamically allocating tls
673 * buffers for the request service time. */
674 if (unlikely(tls == NULL)) {
675 LASSERT(r->rq_export->exp_in_recovery);
679 r->rq_svc_thread->t_data = tls;
685 /* Free thread local buffers if they were allocated only for servicing
686 * this one request */
687 static void ost_tls_put(struct ptlrpc_request *r)
689 struct ost_thread_local_cache *tls =
690 (struct ost_thread_local_cache *)(r->rq_svc_thread->t_data);
692 if (unlikely(tls->temporary)) {
694 r->rq_svc_thread->t_data = NULL;
698 static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti)
700 struct ptlrpc_bulk_desc *desc = NULL;
701 struct obd_export *exp = req->rq_export;
702 struct niobuf_remote *remote_nb;
703 struct niobuf_local *local_nb;
704 struct obd_ioobj *ioo;
705 struct ost_body *body, *repbody;
706 struct lustre_capa *capa = NULL;
707 struct l_wait_info lwi;
708 struct lustre_handle lockh = { 0 };
709 int niocount, npages, nob = 0, rc, i;
711 struct ost_thread_local_cache *tls;
714 req->rq_bulk_read = 1;
716 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_READ_BULK))
717 GOTO(out, rc = -EIO);
719 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, (obd_timeout + 1) / 4);
721 /* Check if there is eviction in progress, and if so, wait for it to
723 if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
724 lwi = LWI_INTR(NULL, NULL); // We do not care how long it takes
725 rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
726 !cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress),
730 GOTO(out, rc = -ENOTCONN);
732 /* ost_body, ioobj & noibuf_remote are verified and swabbed in
733 * ost_rw_hpreq_check(). */
734 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
736 GOTO(out, rc = -EFAULT);
739 * A req_capsule_X_get_array(pill, field, ptr_to_element_count) function
740 * would be useful here and wherever we get &RMF_OBD_IOOBJ and
741 * &RMF_NIOBUF_REMOTE.
743 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
745 GOTO(out, rc = -EFAULT);
747 rc = ost_validate_obdo(exp, &body->oa, ioo);
751 niocount = ioo->ioo_bufcnt;
752 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
753 if (remote_nb == NULL)
754 GOTO(out, rc = -EFAULT);
756 if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
757 capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
759 CERROR("Missing capability for OST BRW READ");
760 GOTO(out, rc = -EFAULT);
764 rc = req_capsule_server_pack(&req->rq_pill);
768 tls = ost_tls_get(req);
770 GOTO(out_bulk, rc = -ENOMEM);
771 local_nb = tls->local;
773 rc = ost_brw_lock_get(LCK_PR, exp, ioo, remote_nb, &lockh);
778 * If getting the lock took more time than
779 * client was willing to wait, drop it. b=11330
781 if (cfs_time_current_sec() > req->rq_deadline ||
782 OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
784 CERROR("Dropping timed-out read from %s because locking"
785 "object "LPX64" took %ld seconds (limit was %ld).\n",
786 libcfs_id2str(req->rq_peer), ioo->ioo_id,
787 cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
788 req->rq_deadline - req->rq_arrival_time.tv_sec);
789 GOTO(out_lock, rc = -ETIMEDOUT);
792 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
793 memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa));
795 npages = OST_THREAD_POOL_SIZE;
796 rc = obd_preprw(req->rq_svc_thread->t_env, OBD_BRW_READ, exp,
797 &repbody->oa, 1, ioo, remote_nb, &npages, local_nb,
802 desc = ptlrpc_prep_bulk_exp(req, npages,
803 BULK_PUT_SOURCE, OST_BULK_PORTAL);
805 GOTO(out_commitrw, rc = -ENOMEM);
808 for (i = 0; i < npages; i++) {
809 int page_rc = local_nb[i].rc;
811 if (page_rc < 0) { /* error */
817 if (page_rc != 0) { /* some data! */
818 LASSERT (local_nb[i].page != NULL);
819 ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
820 local_nb[i].lnb_page_offset,
824 if (page_rc != local_nb[i].len) { /* short read */
825 /* All subsequent pages should be 0 */
827 LASSERT(local_nb[i].rc == 0);
832 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
833 cksum_type_t cksum_type =
834 cksum_type_unpack(repbody->oa.o_valid & OBD_MD_FLFLAGS ?
835 repbody->oa.o_flags : 0);
836 repbody->oa.o_flags = cksum_type_pack(cksum_type);
837 repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
838 repbody->oa.o_cksum = ost_checksum_bulk(desc, OST_READ,cksum_type);
839 CDEBUG(D_PAGE, "checksum at read origin: %x\n",
840 repbody->oa.o_cksum);
842 repbody->oa.o_valid = 0;
844 /* We're finishing using body->oa as an input variable */
846 /* Check if client was evicted while we were doing i/o before touching
849 if (likely(!CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)))
850 rc = target_bulk_io(exp, desc, &lwi);
855 /* Must commit after prep above in all cases */
856 rc = obd_commitrw(req->rq_svc_thread->t_env, OBD_BRW_READ, exp,
857 &repbody->oa, 1, ioo, remote_nb, npages, local_nb,
861 ost_drop_id(exp, &repbody->oa);
864 ost_brw_lock_put(LCK_PR, ioo, remote_nb, &lockh);
868 if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
869 ptlrpc_free_bulk_nopin(desc);
873 req->rq_status = nob;
874 ptlrpc_lprocfs_brw(req, nob);
875 target_committed_to_req(req);
877 } else if (!no_reply) {
878 /* Only reply if there was no comms problem with bulk */
879 target_committed_to_req(req);
883 /* reply out callback would free */
884 ptlrpc_req_drop_rs(req);
885 LCONSOLE_WARN("%s: Bulk IO read error with %s (at %s), "
886 "client will retry: rc %d\n",
887 exp->exp_obd->obd_name,
888 obd_uuid2str(&exp->exp_client_uuid),
889 obd_export_nid2str(exp), rc);
891 /* send a bulk after reply to simulate a network delay or reordering
893 if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
895 struct l_wait_info lwi1;
897 CDEBUG(D_INFO, "reorder BULK\n");
898 cfs_waitq_init(&waitq);
900 lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
901 l_wait_event(waitq, 0, &lwi1);
902 rc = target_bulk_io(exp, desc, &lwi);
903 ptlrpc_free_bulk_nopin(desc);
909 static void ost_warn_on_cksum(struct ptlrpc_request *req,
910 struct ptlrpc_bulk_desc *desc,
911 struct niobuf_local *local_nb, int npages,
912 obd_count client_cksum, obd_count server_cksum,
915 struct obd_export *exp = req->rq_export;
916 struct ost_body *body;
920 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
921 LASSERT (body != NULL);
923 if (req->rq_peer.nid == desc->bd_sender) {
927 router = libcfs_nid2str(desc->bd_sender);
931 CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
932 client_cksum, server_cksum);
936 LCONSOLE_ERROR_MSG(0x168, "BAD WRITE CHECKSUM: %s from %s%s%s inode "
937 DFID" object "LPU64"/"LPU64" extent ["LPU64"-"LPU64
938 "]: client csum %x, server csum %x\n",
939 exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
941 body->oa.o_valid & OBD_MD_FLFID ?
942 body->oa.o_parent_seq : (__u64)0,
943 body->oa.o_valid & OBD_MD_FLFID ?
944 body->oa.o_parent_oid : 0,
945 body->oa.o_valid & OBD_MD_FLFID ?
946 body->oa.o_parent_ver : 0,
948 body->oa.o_valid & OBD_MD_FLGROUP ?
949 body->oa.o_seq : (__u64)0,
950 local_nb[0].lnb_file_offset,
951 local_nb[npages-1].lnb_file_offset +
952 local_nb[npages-1].len - 1,
953 client_cksum, server_cksum);
956 static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti)
958 struct ptlrpc_bulk_desc *desc = NULL;
959 struct obd_export *exp = req->rq_export;
960 struct niobuf_remote *remote_nb;
961 struct niobuf_local *local_nb;
962 struct obd_ioobj *ioo;
963 struct ost_body *body, *repbody;
964 struct l_wait_info lwi;
965 struct lustre_handle lockh = {0};
966 struct lustre_capa *capa = NULL;
968 int objcount, niocount, npages;
970 obd_count client_cksum = 0, server_cksum = 0;
971 cksum_type_t cksum_type = OBD_CKSUM_CRC32;
972 int no_reply = 0, mmap = 0;
973 __u32 o_uid = 0, o_gid = 0;
974 struct ost_thread_local_cache *tls;
977 req->rq_bulk_write = 1;
979 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK))
980 GOTO(out, rc = -EIO);
981 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK2))
982 GOTO(out, rc = -EFAULT);
984 /* pause before transaction has been started */
985 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, (obd_timeout + 1) / 4);
987 /* ost_body, ioobj & noibuf_remote are verified and swabbed in
988 * ost_rw_hpreq_check(). */
989 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
991 GOTO(out, rc = -EFAULT);
993 objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ,
994 RCL_CLIENT) / sizeof(*ioo);
995 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
997 GOTO(out, rc = -EFAULT);
999 rc = ost_validate_obdo(exp, &body->oa, ioo);
1003 for (niocount = i = 0; i < objcount; i++)
1004 niocount += ioo[i].ioo_bufcnt;
1007 * It'd be nice to have a capsule function to indicate how many elements
1008 * there were in a buffer for an RMF that's declared to be an array.
1009 * It's easy enough to compute the number of elements here though.
1011 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
1012 if (remote_nb == NULL || niocount != (req_capsule_get_size(&req->rq_pill,
1013 &RMF_NIOBUF_REMOTE, RCL_CLIENT) / sizeof(*remote_nb)))
1014 GOTO(out, rc = -EFAULT);
1016 if ((remote_nb[0].flags & OBD_BRW_MEMALLOC) &&
1017 (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
1018 cfs_memory_pressure_set();
1020 if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
1021 capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
1023 CERROR("Missing capability for OST BRW WRITE");
1024 GOTO(out, rc = -EFAULT);
1028 req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
1029 niocount * sizeof(*rcs));
1030 rc = req_capsule_server_pack(&req->rq_pill);
1033 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_PACK, cfs_fail_val);
1034 rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS);
1036 tls = ost_tls_get(req);
1038 GOTO(out_bulk, rc = -ENOMEM);
1039 local_nb = tls->local;
1041 rc = ost_brw_lock_get(LCK_PW, exp, ioo, remote_nb, &lockh);
1046 * If getting the lock took more time than
1047 * client was willing to wait, drop it. b=11330
1049 if (cfs_time_current_sec() > req->rq_deadline ||
1050 OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
1052 CERROR("Dropping timed-out write from %s because locking "
1053 "object "LPX64" took %ld seconds (limit was %ld).\n",
1054 libcfs_id2str(req->rq_peer), ioo->ioo_id,
1055 cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
1056 req->rq_deadline - req->rq_arrival_time.tv_sec);
1057 GOTO(out_lock, rc = -ETIMEDOUT);
1060 /* obd_preprw clobbers oa->valid, so save what we need */
1061 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1062 client_cksum = body->oa.o_cksum;
1063 if (body->oa.o_valid & OBD_MD_FLFLAGS)
1064 cksum_type = cksum_type_unpack(body->oa.o_flags);
1066 if (body->oa.o_valid & OBD_MD_FLFLAGS && body->oa.o_flags & OBD_FL_MMAP)
1069 /* Because we already sync grant info with client when reconnect,
1070 * grant info will be cleared for resent req, then fed_grant and
1071 * total_grant will not be modified in following preprw_write */
1072 if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) {
1073 DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info");
1074 body->oa.o_valid &= ~OBD_MD_FLGRANT;
1077 if (exp_connect_rmtclient(exp)) {
1078 o_uid = body->oa.o_uid;
1079 o_gid = body->oa.o_gid;
1082 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1083 memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa));
1085 npages = OST_THREAD_POOL_SIZE;
1086 rc = obd_preprw(req->rq_svc_thread->t_env, OBD_BRW_WRITE, exp,
1087 &repbody->oa, objcount, ioo, remote_nb, &npages,
1088 local_nb, oti, capa);
1092 desc = ptlrpc_prep_bulk_exp(req, npages,
1093 BULK_GET_SINK, OST_BULK_PORTAL);
1095 GOTO(skip_transfer, rc = -ENOMEM);
1097 /* NB Having prepped, we must commit... */
1099 for (i = 0; i < npages; i++)
1100 ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
1101 local_nb[i].lnb_page_offset,
1104 rc = sptlrpc_svc_prep_bulk(req, desc);
1108 rc = target_bulk_io(exp, desc, &lwi);
1112 if (client_cksum != 0 && rc == 0) {
1113 static int cksum_counter;
1114 repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1115 repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL;
1116 repbody->oa.o_flags |= cksum_type_pack(cksum_type);
1117 server_cksum = ost_checksum_bulk(desc, OST_WRITE, cksum_type);
1118 repbody->oa.o_cksum = server_cksum;
1120 if (unlikely(client_cksum != server_cksum)) {
1121 ost_warn_on_cksum(req, desc, local_nb, npages,
1122 client_cksum, server_cksum, mmap);
1125 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
1126 CDEBUG(D_INFO, "Checksum %u from %s OK: %x\n",
1127 cksum_counter, libcfs_id2str(req->rq_peer),
1132 /* Must commit after prep above in all cases */
1133 rc = obd_commitrw(req->rq_svc_thread->t_env, OBD_BRW_WRITE, exp,
1134 &repbody->oa, objcount, ioo, remote_nb, npages,
1136 if (rc == -ENOTCONN)
1137 /* quota acquire process has been given up because
1138 * either the client has been evicted or the client
1139 * has timed out the request already */
1142 if (exp_connect_rmtclient(exp)) {
1143 repbody->oa.o_uid = o_uid;
1144 repbody->oa.o_gid = o_gid;
1148 * Disable sending mtime back to the client. If the client locked the
1149 * whole object, then it has already updated the mtime on its side,
1150 * otherwise it will have to glimpse anyway (see bug 21489, comment 32)
1152 repbody->oa.o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLATIME);
1157 /* set per-requested niobuf return codes */
1158 for (i = j = 0; i < niocount; i++) {
1159 int len = remote_nb[i].len;
1164 LASSERT(j < npages);
1165 if (local_nb[j].rc < 0)
1166 rcs[i] = local_nb[j].rc;
1167 len -= local_nb[j].len;
1172 LASSERT(j == npages);
1173 ptlrpc_lprocfs_brw(req, nob);
1177 ost_brw_lock_put(LCK_PW, ioo, remote_nb, &lockh);
1182 ptlrpc_free_bulk_nopin(desc);
1185 oti_to_request(oti, req);
1186 target_committed_to_req(req);
1187 rc = ptlrpc_reply(req);
1188 } else if (!no_reply) {
1189 /* Only reply if there was no comms problem with bulk */
1190 target_committed_to_req(req);
1191 req->rq_status = rc;
1194 /* reply out callback would free */
1195 ptlrpc_req_drop_rs(req);
1196 LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s), "
1197 "client will retry: rc %d\n",
1198 exp->exp_obd->obd_name,
1199 obd_uuid2str(&exp->exp_client_uuid),
1200 obd_export_nid2str(exp), rc);
1202 cfs_memory_pressure_clr();
1207 * Implementation of OST_SET_INFO.
1209 * OST_SET_INFO is like ioctl(): heavily overloaded. Specifically, it takes a
1210 * "key" and a value RPC buffers as arguments, with the value's contents
1211 * interpreted according to the key.
1213 * Value types that need swabbing have swabbing done explicitly, either here or
1214 * in functions called from here. This should be corrected: all swabbing should
1215 * be done in the capsule abstraction, as that will then allow us to move
1216 * swabbing exclusively to the client without having to modify server code
1217 * outside the capsule abstraction's implementation itself. To correct this
1218 * will require minor changes to the capsule abstraction; see the comments for
1219 * req_capsule_extend() in layout.c.
1221 static int ost_set_info(struct obd_export *exp, struct ptlrpc_request *req)
1223 struct ost_body *body = NULL, *repbody;
1224 char *key, *val = NULL;
1225 int keylen, vallen, rc = 0;
1226 int is_grant_shrink = 0;
1229 key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1231 DEBUG_REQ(D_HA, req, "no set_info key");
1234 keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1237 vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1240 if ((is_grant_shrink = KEY_IS(KEY_GRANT_SHRINK)))
1241 /* In this case the value is actually an RMF_OST_BODY, so we
1242 * transmutate the type of this PTLRPC */
1243 req_capsule_extend(&req->rq_pill, &RQF_OST_SET_GRANT_INFO);
1245 rc = req_capsule_server_pack(&req->rq_pill);
1250 if (is_grant_shrink) {
1251 body = req_capsule_client_get(&req->rq_pill,
1256 repbody = req_capsule_server_get(&req->rq_pill,
1258 memcpy(repbody, body, sizeof(*body));
1259 val = (char*)repbody;
1261 val = req_capsule_client_get(&req->rq_pill,
1266 if (KEY_IS(KEY_EVICT_BY_NID)) {
1268 obd_export_evict_by_nid(exp->exp_obd, val);
1270 } else if (KEY_IS(KEY_MDS_CONN) && ptlrpc_req_need_swab(req)) {
1271 if (vallen < sizeof(__u32))
1273 __swab32s((__u32 *)val);
1276 /* OBD will also check if KEY_IS(KEY_GRANT_SHRINK), and will cast val to
1277 * a struct ost_body * value */
1278 rc = obd_set_info_async(req->rq_svc_thread->t_env, exp, keylen,
1279 key, vallen, val, NULL);
1281 lustre_msg_set_status(req->rq_repmsg, 0);
1285 static int ost_get_info(struct obd_export *exp, struct ptlrpc_request *req)
1288 int keylen, replylen, rc = 0;
1289 struct req_capsule *pill = &req->rq_pill;
1292 /* this common part for get_info rpc */
1293 key = req_capsule_client_get(pill, &RMF_SETINFO_KEY);
1295 DEBUG_REQ(D_HA, req, "no get_info key");
1298 keylen = req_capsule_get_size(pill, &RMF_SETINFO_KEY, RCL_CLIENT);
1300 if (KEY_IS(KEY_FIEMAP)) {
1301 struct ll_fiemap_info_key *fm_key = key;
1304 rc = ost_validate_obdo(exp, &fm_key->oa, NULL);
1309 rc = obd_get_info(req->rq_svc_thread->t_env, exp, keylen, key,
1310 &replylen, NULL, NULL);
1314 req_capsule_set_size(pill, &RMF_GENERIC_DATA,
1315 RCL_SERVER, replylen);
1317 rc = req_capsule_server_pack(pill);
1321 reply = req_capsule_server_get(pill, &RMF_GENERIC_DATA);
1325 /* call again to fill in the reply buffer */
1326 rc = obd_get_info(req->rq_svc_thread->t_env, exp, keylen, key,
1327 &replylen, reply, NULL);
1329 lustre_msg_set_status(req->rq_repmsg, 0);
1333 static int ost_handle_quotactl(struct ptlrpc_request *req)
1335 struct obd_quotactl *oqctl, *repoqc;
1339 oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
1341 GOTO(out, rc = -EPROTO);
1343 rc = req_capsule_server_pack(&req->rq_pill);
1347 repoqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
1348 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1355 static int ost_handle_quotacheck(struct ptlrpc_request *req)
1357 struct obd_quotactl *oqctl;
1361 oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
1365 rc = req_capsule_server_pack(&req->rq_pill);
1369 /* deprecated, not used any more */
1370 req->rq_status = -EOPNOTSUPP;
1371 RETURN(-EOPNOTSUPP);
1374 static int ost_llog_handle_connect(struct obd_export *exp,
1375 struct ptlrpc_request *req)
1377 struct llogd_conn_body *body;
1381 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_CONN_BODY);
1382 rc = obd_llog_connect(exp, body);
1386 #define ost_init_sec_none(reply, exp) \
1388 reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT | \
1389 OBD_CONNECT_RMT_CLIENT_FORCE | \
1390 OBD_CONNECT_OSS_CAPA); \
1391 spin_lock(&exp->exp_lock); \
1392 exp->exp_connect_flags = reply->ocd_connect_flags; \
1393 spin_unlock(&exp->exp_lock); \
1396 static int ost_init_sec_level(struct ptlrpc_request *req)
1398 struct obd_export *exp = req->rq_export;
1399 struct req_capsule *pill = &req->rq_pill;
1400 struct obd_device *obd = exp->exp_obd;
1401 struct filter_obd *filter = &obd->u.filter;
1402 char *client = libcfs_nid2str(req->rq_peer.nid);
1403 struct obd_connect_data *data, *reply;
1407 data = req_capsule_client_get(pill, &RMF_CONNECT_DATA);
1408 reply = req_capsule_server_get(pill, &RMF_CONNECT_DATA);
1409 if (data == NULL || reply == NULL)
1412 /* connection from MDT is always trusted */
1413 if (req->rq_auth_usr_mdt) {
1414 ost_init_sec_none(reply, exp);
1418 /* no GSS support case */
1419 if (!req->rq_auth_gss) {
1420 if (filter->fo_sec_level > LUSTRE_SEC_NONE) {
1421 CWARN("client %s -> target %s does not user GSS, "
1422 "can not run under security level %d.\n",
1423 client, obd->obd_name, filter->fo_sec_level);
1426 ost_init_sec_none(reply, exp);
1431 /* old version case */
1432 if (unlikely(!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) ||
1433 !(data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA))) {
1434 if (filter->fo_sec_level > LUSTRE_SEC_NONE) {
1435 CWARN("client %s -> target %s uses old version, "
1436 "can not run under security level %d.\n",
1437 client, obd->obd_name, filter->fo_sec_level);
1440 CWARN("client %s -> target %s uses old version, "
1441 "run under security level %d.\n",
1442 client, obd->obd_name, filter->fo_sec_level);
1443 ost_init_sec_none(reply, exp);
1448 remote = data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT_FORCE;
1450 if (!req->rq_auth_remote)
1451 CDEBUG(D_SEC, "client (local realm) %s -> target %s "
1452 "asked to be remote.\n", client, obd->obd_name);
1453 } else if (req->rq_auth_remote) {
1455 CDEBUG(D_SEC, "client (remote realm) %s -> target %s is set "
1456 "as remote by default.\n", client, obd->obd_name);
1460 if (!filter->fo_fl_oss_capa) {
1461 CDEBUG(D_SEC, "client %s -> target %s is set as remote,"
1462 " but OSS capabilities are not enabled: %d.\n",
1463 client, obd->obd_name, filter->fo_fl_oss_capa);
1468 switch (filter->fo_sec_level) {
1469 case LUSTRE_SEC_NONE:
1471 ost_init_sec_none(reply, exp);
1474 CDEBUG(D_SEC, "client %s -> target %s is set as remote, "
1475 "can not run under security level %d.\n",
1476 client, obd->obd_name, filter->fo_sec_level);
1479 case LUSTRE_SEC_REMOTE:
1481 ost_init_sec_none(reply, exp);
1483 case LUSTRE_SEC_ALL:
1485 reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
1486 OBD_CONNECT_RMT_CLIENT_FORCE);
1487 if (!filter->fo_fl_oss_capa)
1488 reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
1490 spin_lock(&exp->exp_lock);
1491 exp->exp_connect_flags = reply->ocd_connect_flags;
1492 spin_unlock(&exp->exp_lock);
1504 * this should be done in filter_connect()/filter_reconnect(), but
1505 * we can't obtain information like NID, which stored in incoming
1506 * request, thus can't decide what flavor to use. so we do it here.
1508 * This hack should be removed after the OST stack be rewritten, just
1509 * like what we are doing in mdt_obd_connect()/mdt_obd_reconnect().
1511 static int ost_connect_check_sptlrpc(struct ptlrpc_request *req)
1513 struct obd_export *exp = req->rq_export;
1514 struct filter_obd *filter = &exp->exp_obd->u.filter;
1515 struct sptlrpc_flavor flvr;
1518 if (unlikely(strcmp(exp->exp_obd->obd_type->typ_name,
1519 LUSTRE_ECHO_NAME) == 0)) {
1520 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
1524 if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
1525 read_lock(&filter->fo_sptlrpc_lock);
1526 sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
1530 read_unlock(&filter->fo_sptlrpc_lock);
1532 spin_lock(&exp->exp_lock);
1534 exp->exp_sp_peer = req->rq_sp_from;
1535 exp->exp_flvr = flvr;
1537 if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
1538 exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
1539 CERROR("unauthorized rpc flavor %x from %s, "
1540 "expect %x\n", req->rq_flvr.sf_rpc,
1541 libcfs_nid2str(req->rq_peer.nid),
1542 exp->exp_flvr.sf_rpc);
1546 spin_unlock(&exp->exp_lock);
1548 if (exp->exp_sp_peer != req->rq_sp_from) {
1549 CERROR("RPC source %s doesn't match %s\n",
1550 sptlrpc_part2name(req->rq_sp_from),
1551 sptlrpc_part2name(exp->exp_sp_peer));
1554 rc = sptlrpc_target_export_check(exp, req);
1561 /* Ensure that data and metadata are synced to the disk when lock is cancelled
1563 int ost_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1564 void *data, int flag)
1567 __u32 sync_lock_cancel = 0;
1568 __u32 len = sizeof(sync_lock_cancel);
1573 rc = lu_env_init(&env, LCT_DT_THREAD);
1574 if (unlikely(rc != 0))
1577 rc = obd_get_info(&env, lock->l_export, sizeof(KEY_SYNC_LOCK_CANCEL),
1578 KEY_SYNC_LOCK_CANCEL, &len, &sync_lock_cancel, NULL);
1579 if (rc == 0 && flag == LDLM_CB_CANCELING &&
1580 (lock->l_granted_mode & (LCK_PW|LCK_GROUP)) &&
1581 (sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
1582 (sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
1583 lock->l_flags & LDLM_FL_CBPENDING))) {
1584 struct obd_info *oinfo;
1588 OBD_ALLOC_PTR(oinfo);
1590 GOTO(out_env, rc = -ENOMEM);
1593 OBD_FREE_PTR(oinfo);
1594 GOTO(out_env, rc = -ENOMEM);
1596 oa->o_id = lock->l_resource->lr_name.name[0];
1597 oa->o_seq = lock->l_resource->lr_name.name[1];
1598 oa->o_valid = OBD_MD_FLID|OBD_MD_FLGROUP;
1600 oinfo->oi_capa = BYPASS_CAPA;
1602 rc = obd_sync(&env, lock->l_export, oinfo,
1603 lock->l_policy_data.l_extent.start,
1604 lock->l_policy_data.l_extent.end, NULL);
1606 CERROR("Error %d syncing data on lock cancel\n", rc);
1609 OBD_FREE_PTR(oinfo);
1612 rc = ldlm_server_blocking_ast(lock, desc, data, flag);
1618 static int ost_filter_recovery_request(struct ptlrpc_request *req,
1619 struct obd_device *obd, int *process)
1621 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1622 case OST_CONNECT: /* This will never get here, but for completeness. */
1623 case OST_DISCONNECT:
1634 case OBD_LOG_CANCEL:
1636 *process = target_queue_recovery_request(req, obd);
1640 DEBUG_REQ(D_WARNING, req, "not permitted during recovery");
1646 int ost_msg_check_version(struct lustre_msg *msg)
1650 switch(lustre_msg_get_opc(msg)) {
1652 case OST_DISCONNECT:
1655 case SEC_CTX_INIT_CONT:
1657 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1659 CERROR("bad opc %u version %08x, expecting %08x\n",
1660 lustre_msg_get_opc(msg),
1661 lustre_msg_get_version(msg),
1662 LUSTRE_OBD_VERSION);
1675 case OST_QUOTACHECK:
1677 rc = lustre_msg_check_version(msg, LUSTRE_OST_VERSION);
1679 CERROR("bad opc %u version %08x, expecting %08x\n",
1680 lustre_msg_get_opc(msg),
1681 lustre_msg_get_version(msg),
1682 LUSTRE_OST_VERSION);
1687 case LDLM_BL_CALLBACK:
1688 case LDLM_CP_CALLBACK:
1689 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1691 CERROR("bad opc %u version %08x, expecting %08x\n",
1692 lustre_msg_get_opc(msg),
1693 lustre_msg_get_version(msg),
1694 LUSTRE_DLM_VERSION);
1696 case LLOG_ORIGIN_CONNECT:
1697 case OBD_LOG_CANCEL:
1698 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1700 CERROR("bad opc %u version %08x, expecting %08x\n",
1701 lustre_msg_get_opc(msg),
1702 lustre_msg_get_version(msg),
1703 LUSTRE_LOG_VERSION);
1705 case OST_QUOTA_ADJUST_QUNIT:
1707 CERROR("Quota adjust is deprecated as of 2.4.0\n");
1710 CERROR("Unexpected opcode %d\n", lustre_msg_get_opc(msg));
1716 struct ost_prolong_data {
1717 struct ptlrpc_request *opd_req;
1718 struct obd_export *opd_exp;
1719 struct obdo *opd_oa;
1720 struct ldlm_res_id opd_resid;
1721 struct ldlm_extent opd_extent;
1722 ldlm_mode_t opd_mode;
1723 unsigned int opd_locks;
1727 /* prolong locks for the current service time of the corresponding
1728 * portal (= OST_IO_PORTAL)
1730 static inline int prolong_timeout(struct ptlrpc_request *req)
1732 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1735 return obd_timeout / 2;
1737 return max(at_est2timeout(at_get(&svcpt->scp_at_estimate)),
1741 static void ost_prolong_lock_one(struct ost_prolong_data *opd,
1742 struct ldlm_lock *lock)
1744 LASSERT(lock->l_export == opd->opd_exp);
1746 if (lock->l_destroyed) /* lock already cancelled */
1749 /* XXX: never try to grab resource lock here because we're inside
1750 * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take
1751 * res lock and then exp_bl_list_lock. */
1753 if (!(lock->l_flags & LDLM_FL_AST_SENT))
1754 /* ignore locks not being cancelled */
1758 "refreshed for req x"LPU64" ext("LPU64"->"LPU64") to %ds.\n",
1759 opd->opd_req->rq_xid, opd->opd_extent.start,
1760 opd->opd_extent.end, opd->opd_timeout);
1762 /* OK. this is a possible lock the user holds doing I/O
1763 * let's refresh eviction timer for it */
1764 ldlm_refresh_waiting_lock(lock, opd->opd_timeout);
1768 static void ost_prolong_locks(struct ost_prolong_data *data)
1770 struct obd_export *exp = data->opd_exp;
1771 struct obdo *oa = data->opd_oa;
1772 struct ldlm_lock *lock;
1775 if (oa->o_valid & OBD_MD_FLHANDLE) {
1776 /* mostly a request should be covered by only one lock, try
1778 lock = ldlm_handle2lock(&oa->o_handle);
1780 /* Fast path to check if the lock covers the whole IO
1781 * region exclusively. */
1782 if (lock->l_granted_mode == LCK_PW &&
1783 ldlm_extent_contain(&lock->l_policy_data.l_extent,
1784 &data->opd_extent)) {
1786 ost_prolong_lock_one(data, lock);
1787 LDLM_LOCK_PUT(lock);
1790 LDLM_LOCK_PUT(lock);
1795 spin_lock_bh(&exp->exp_bl_list_lock);
1796 cfs_list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
1797 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1798 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
1800 if (!ldlm_res_eq(&data->opd_resid, &lock->l_resource->lr_name))
1803 if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent,
1807 ost_prolong_lock_one(data, lock);
1809 spin_unlock_bh(&exp->exp_bl_list_lock);
1815 * Returns 1 if the given PTLRPC matches the given LDLM locks, or 0 if it does
1818 static int ost_rw_hpreq_lock_match(struct ptlrpc_request *req,
1819 struct ldlm_lock *lock)
1821 struct niobuf_remote *nb;
1822 struct obd_ioobj *ioo;
1824 struct ldlm_extent ext;
1827 opc = lustre_msg_get_opc(req->rq_reqmsg);
1828 LASSERT(opc == OST_READ || opc == OST_WRITE);
1830 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
1831 LASSERT(ioo != NULL);
1833 nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
1834 LASSERT(nb != NULL);
1836 ext.start = nb->offset;
1837 nb += ioo->ioo_bufcnt - 1;
1838 ext.end = nb->offset + nb->len - 1;
1840 LASSERT(lock->l_resource != NULL);
1841 if (!osc_res_name_eq(ioo->ioo_id, ioo->ioo_seq,
1842 &lock->l_resource->lr_name))
1846 if (opc == OST_READ)
1848 if (!(lock->l_granted_mode & mode))
1851 RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext));
1855 * High-priority queue request check for whether the given PTLRPC request (\a
1856 * req) is blocking an LDLM lock cancel.
1858 * Returns 1 if the given given PTLRPC request (\a req) is blocking an LDLM lock
1859 * cancel, 0 if it is not, and -EFAULT if the request is malformed.
1861 * Only OST_READs, OST_WRITEs and OST_PUNCHes go on the h-p RPC queue. This
1862 * function looks only at OST_READs and OST_WRITEs.
1864 static int ost_rw_hpreq_check(struct ptlrpc_request *req)
1866 struct obd_device *obd = req->rq_export->exp_obd;
1867 struct ost_body *body;
1868 struct obd_ioobj *ioo;
1869 struct niobuf_remote *nb;
1870 struct ost_prolong_data opd = { 0 };
1875 * Use LASSERT to do sanity check because malformed RPCs should have
1876 * been filtered out in ost_hpreq_handler().
1878 opc = lustre_msg_get_opc(req->rq_reqmsg);
1879 LASSERT(opc == OST_READ || opc == OST_WRITE);
1881 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1882 LASSERT(body != NULL);
1884 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
1885 LASSERT(ioo != NULL);
1887 nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
1888 LASSERT(nb != NULL);
1889 LASSERT(!(nb->flags & OBD_BRW_SRVLOCK));
1891 osc_build_res_name(ioo->ioo_id, ioo->ioo_seq, &opd.opd_resid);
1895 if (opc == OST_READ)
1897 opd.opd_mode = mode;
1898 opd.opd_exp = req->rq_export;
1899 opd.opd_oa = &body->oa;
1900 opd.opd_extent.start = nb->offset;
1901 nb += ioo->ioo_bufcnt - 1;
1902 opd.opd_extent.end = nb->offset + nb->len - 1;
1903 opd.opd_timeout = prolong_timeout(req);
1905 DEBUG_REQ(D_RPCTRACE, req,
1906 "%s %s: refresh rw locks: " LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
1907 obd->obd_name, cfs_current()->comm,
1908 opd.opd_resid.name[0], opd.opd_resid.name[1],
1909 opd.opd_extent.start, opd.opd_extent.end);
1911 ost_prolong_locks(&opd);
1913 CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
1914 obd->obd_name, opd.opd_locks, req);
1916 RETURN(opd.opd_locks);
1919 static void ost_rw_hpreq_fini(struct ptlrpc_request *req)
1921 (void)ost_rw_hpreq_check(req);
1925 * Like ost_rw_hpreq_lock_match(), but for OST_PUNCH RPCs.
1927 static int ost_punch_hpreq_lock_match(struct ptlrpc_request *req,
1928 struct ldlm_lock *lock)
1930 struct ost_body *body;
1933 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1934 LASSERT(body != NULL);
1936 if (body->oa.o_valid & OBD_MD_FLHANDLE &&
1937 body->oa.o_handle.cookie == lock->l_handle.h_cookie)
1944 * Like ost_rw_hpreq_check(), but for OST_PUNCH RPCs.
1946 static int ost_punch_hpreq_check(struct ptlrpc_request *req)
1948 struct obd_device *obd = req->rq_export->exp_obd;
1949 struct ost_body *body;
1951 struct ost_prolong_data opd = { 0 };
1955 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1956 LASSERT(body != NULL);
1959 LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS) ||
1960 !(oa->o_flags & OBD_FL_SRVLOCK));
1963 end = start + oa->o_blocks;
1966 opd.opd_mode = LCK_PW;
1967 opd.opd_exp = req->rq_export;
1969 opd.opd_extent.start = start;
1970 opd.opd_extent.end = end;
1971 if (oa->o_blocks == OBD_OBJECT_EOF)
1972 opd.opd_extent.end = OBD_OBJECT_EOF;
1973 opd.opd_timeout = prolong_timeout(req);
1975 osc_build_res_name(oa->o_id, oa->o_seq, &opd.opd_resid);
1978 "%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
1980 opd.opd_resid.name[0], opd.opd_resid.name[1],
1981 opd.opd_extent.start, opd.opd_extent.end);
1983 ost_prolong_locks(&opd);
1985 CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
1986 obd->obd_name, opd.opd_locks, req);
1988 RETURN(opd.opd_locks > 0);
1991 static void ost_punch_hpreq_fini(struct ptlrpc_request *req)
1993 (void)ost_punch_hpreq_check(req);
1996 struct ptlrpc_hpreq_ops ost_hpreq_rw = {
1997 .hpreq_lock_match = ost_rw_hpreq_lock_match,
1998 .hpreq_check = ost_rw_hpreq_check,
1999 .hpreq_fini = ost_rw_hpreq_fini
2002 struct ptlrpc_hpreq_ops ost_hpreq_punch = {
2003 .hpreq_lock_match = ost_punch_hpreq_lock_match,
2004 .hpreq_check = ost_punch_hpreq_check,
2005 .hpreq_fini = ost_punch_hpreq_fini
2008 /** Assign high priority operations to the request if needed. */
2009 static int ost_io_hpreq_handler(struct ptlrpc_request *req)
2012 if (req->rq_export) {
2013 int opc = lustre_msg_get_opc(req->rq_reqmsg);
2014 struct ost_body *body;
2016 if (opc == OST_READ || opc == OST_WRITE) {
2017 struct niobuf_remote *nb;
2018 struct obd_ioobj *ioo;
2019 int objcount, niocount;
2023 /* RPCs on the H-P queue can be inspected before
2024 * ost_handler() initializes their pills, so we
2025 * initialize that here. Capsule initialization is
2026 * idempotent, as is setting the pill's format (provided
2027 * it doesn't change).
2029 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2030 if (opc == OST_READ)
2031 req_capsule_set(&req->rq_pill,
2034 req_capsule_set(&req->rq_pill,
2035 &RQF_OST_BRW_WRITE);
2037 body = req_capsule_client_get(&req->rq_pill,
2040 CERROR("Missing/short ost_body\n");
2044 objcount = req_capsule_get_size(&req->rq_pill,
2048 if (objcount == 0) {
2049 CERROR("Missing/short ioobj\n");
2053 CERROR("too many ioobjs (%d)\n", objcount);
2057 ioo = req_capsule_client_get(&req->rq_pill,
2060 CERROR("Missing/short ioobj\n");
2064 rc = ost_validate_obdo(req->rq_export, &body->oa, ioo);
2066 CERROR("invalid object ids\n");
2070 for (niocount = i = 0; i < objcount; i++) {
2071 if (ioo[i].ioo_bufcnt == 0) {
2072 CERROR("ioo[%d] has zero bufcnt\n", i);
2075 niocount += ioo[i].ioo_bufcnt;
2077 if (niocount > PTLRPC_MAX_BRW_PAGES) {
2078 DEBUG_REQ(D_RPCTRACE, req,
2079 "bulk has too many pages (%d)",
2084 nb = req_capsule_client_get(&req->rq_pill,
2085 &RMF_NIOBUF_REMOTE);
2087 CERROR("Missing/short niobuf\n");
2091 if (niocount == 0 || !(nb[0].flags & OBD_BRW_SRVLOCK))
2092 req->rq_ops = &ost_hpreq_rw;
2093 } else if (opc == OST_PUNCH) {
2094 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2095 req_capsule_set(&req->rq_pill, &RQF_OST_PUNCH);
2097 body = req_capsule_client_get(&req->rq_pill,
2100 CERROR("Missing/short ost_body\n");
2104 if (!(body->oa.o_valid & OBD_MD_FLFLAGS) ||
2105 !(body->oa.o_flags & OBD_FL_SRVLOCK))
2106 req->rq_ops = &ost_hpreq_punch;
2112 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
2113 int ost_handle(struct ptlrpc_request *req)
2115 struct obd_trans_info trans_info = { 0, };
2116 struct obd_trans_info *oti = &trans_info;
2117 int should_process, fail = OBD_FAIL_OST_ALL_REPLY_NET, rc = 0;
2118 struct obd_device *obd = NULL;
2121 /* OST module is kept between remounts, but the last reference
2122 * to specific module (say, osd or ofd) kills all related keys
2123 * from the environment. so we have to refill it until the root
2124 * cause is fixed properly */
2125 lu_env_refill(req->rq_svc_thread->t_env);
2127 LASSERT(current->journal_info == NULL);
2129 /* primordial rpcs don't affect server recovery */
2130 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2132 case SEC_CTX_INIT_CONT:
2137 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2139 if (lustre_msg_get_opc(req->rq_reqmsg) != OST_CONNECT) {
2140 if (!class_connected_export(req->rq_export)) {
2141 CDEBUG(D_HA,"operation %d on unconnected OST from %s\n",
2142 lustre_msg_get_opc(req->rq_reqmsg),
2143 libcfs_id2str(req->rq_peer));
2144 req->rq_status = -ENOTCONN;
2145 GOTO(out, rc = -ENOTCONN);
2148 obd = req->rq_export->exp_obd;
2150 /* Check for aborted recovery. */
2151 if (obd->obd_recovering) {
2152 rc = ost_filter_recovery_request(req, obd,
2154 if (rc || !should_process)
2156 else if (should_process < 0) {
2157 req->rq_status = should_process;
2158 rc = ptlrpc_error(req);
2166 rc = ost_msg_check_version(req->rq_reqmsg);
2170 if (req && req->rq_reqmsg && req->rq_export &&
2171 (req->rq_export->exp_connect_flags & OBD_CONNECT_JOBSTATS))
2172 oti->oti_jobid = lustre_msg_get_jobid(req->rq_reqmsg);
2174 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2176 CDEBUG(D_INODE, "connect\n");
2177 req_capsule_set(&req->rq_pill, &RQF_OST_CONNECT);
2178 if (OBD_FAIL_CHECK(OBD_FAIL_OST_CONNECT_NET))
2180 rc = target_handle_connect(req);
2181 if (OBD_FAIL_CHECK(OBD_FAIL_OST_CONNECT_NET2))
2184 rc = ost_init_sec_level(req);
2186 rc = ost_connect_check_sptlrpc(req);
2190 case OST_DISCONNECT:
2191 CDEBUG(D_INODE, "disconnect\n");
2192 req_capsule_set(&req->rq_pill, &RQF_OST_DISCONNECT);
2193 if (OBD_FAIL_CHECK(OBD_FAIL_OST_DISCONNECT_NET))
2195 rc = target_handle_disconnect(req);
2198 CDEBUG(D_INODE, "create\n");
2199 req_capsule_set(&req->rq_pill, &RQF_OST_CREATE);
2200 if (OBD_FAIL_CHECK(OBD_FAIL_OST_CREATE_NET))
2202 if (OBD_FAIL_CHECK(OBD_FAIL_OST_EROFS))
2203 GOTO(out, rc = -EROFS);
2204 rc = ost_create(req->rq_export, req, oti);
2207 CDEBUG(D_INODE, "destroy\n");
2208 req_capsule_set(&req->rq_pill, &RQF_OST_DESTROY);
2209 if (OBD_FAIL_CHECK(OBD_FAIL_OST_DESTROY_NET))
2211 if (OBD_FAIL_CHECK(OBD_FAIL_OST_EROFS))
2212 GOTO(out, rc = -EROFS);
2213 rc = ost_destroy(req->rq_export, req, oti);
2216 CDEBUG(D_INODE, "getattr\n");
2217 req_capsule_set(&req->rq_pill, &RQF_OST_GETATTR);
2218 if (OBD_FAIL_CHECK(OBD_FAIL_OST_GETATTR_NET))
2220 rc = ost_getattr(req->rq_export, req);
2223 CDEBUG(D_INODE, "setattr\n");
2224 req_capsule_set(&req->rq_pill, &RQF_OST_SETATTR);
2225 if (OBD_FAIL_CHECK(OBD_FAIL_OST_SETATTR_NET))
2227 rc = ost_setattr(req->rq_export, req, oti);
2230 req_capsule_set(&req->rq_pill, &RQF_OST_BRW_WRITE);
2231 CDEBUG(D_INODE, "write\n");
2232 /* req->rq_request_portal would be nice, if it was set */
2233 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
2234 CERROR("%s: deny write request from %s to portal %u\n",
2235 req->rq_export->exp_obd->obd_name,
2236 obd_export_nid2str(req->rq_export),
2237 ptlrpc_req2svc(req)->srv_req_portal);
2238 GOTO(out, rc = -EPROTO);
2240 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_NET))
2242 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC))
2243 GOTO(out, rc = -ENOSPC);
2244 if (OBD_FAIL_TIMEOUT(OBD_FAIL_OST_EROFS, 1))
2245 GOTO(out, rc = -EROFS);
2246 rc = ost_brw_write(req, oti);
2247 LASSERT(current->journal_info == NULL);
2248 /* ost_brw_write sends its own replies */
2251 req_capsule_set(&req->rq_pill, &RQF_OST_BRW_READ);
2252 CDEBUG(D_INODE, "read\n");
2253 /* req->rq_request_portal would be nice, if it was set */
2254 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
2255 CERROR("%s: deny read request from %s to portal %u\n",
2256 req->rq_export->exp_obd->obd_name,
2257 obd_export_nid2str(req->rq_export),
2258 ptlrpc_req2svc(req)->srv_req_portal);
2259 GOTO(out, rc = -EPROTO);
2261 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_NET))
2263 rc = ost_brw_read(req, oti);
2264 LASSERT(current->journal_info == NULL);
2265 /* ost_brw_read sends its own replies */
2268 CDEBUG(D_INODE, "punch\n");
2269 req_capsule_set(&req->rq_pill, &RQF_OST_PUNCH);
2270 if (OBD_FAIL_CHECK(OBD_FAIL_OST_PUNCH_NET))
2272 if (OBD_FAIL_CHECK(OBD_FAIL_OST_EROFS))
2273 GOTO(out, rc = -EROFS);
2274 rc = ost_punch(req->rq_export, req, oti);
2277 CDEBUG(D_INODE, "statfs\n");
2278 req_capsule_set(&req->rq_pill, &RQF_OST_STATFS);
2279 if (OBD_FAIL_CHECK(OBD_FAIL_OST_STATFS_NET))
2281 rc = ost_statfs(req);
2284 CDEBUG(D_INODE, "sync\n");
2285 req_capsule_set(&req->rq_pill, &RQF_OST_SYNC);
2286 if (OBD_FAIL_CHECK(OBD_FAIL_OST_SYNC_NET))
2288 rc = ost_sync(req->rq_export, req, oti);
2291 DEBUG_REQ(D_INODE, req, "set_info");
2292 req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
2293 rc = ost_set_info(req->rq_export, req);
2296 DEBUG_REQ(D_INODE, req, "get_info");
2297 req_capsule_set(&req->rq_pill, &RQF_OST_GET_INFO_GENERIC);
2298 rc = ost_get_info(req->rq_export, req);
2300 case OST_QUOTACHECK:
2301 CDEBUG(D_INODE, "quotacheck\n");
2302 req_capsule_set(&req->rq_pill, &RQF_OST_QUOTACHECK);
2303 if (OBD_FAIL_CHECK(OBD_FAIL_OST_QUOTACHECK_NET))
2305 rc = ost_handle_quotacheck(req);
2308 CDEBUG(D_INODE, "quotactl\n");
2309 req_capsule_set(&req->rq_pill, &RQF_OST_QUOTACTL);
2310 if (OBD_FAIL_CHECK(OBD_FAIL_OST_QUOTACTL_NET))
2312 rc = ost_handle_quotactl(req);
2315 DEBUG_REQ(D_INODE, req, "ping");
2316 req_capsule_set(&req->rq_pill, &RQF_OBD_PING);
2317 rc = target_handle_ping(req);
2319 /* FIXME - just reply status */
2320 case LLOG_ORIGIN_CONNECT:
2321 DEBUG_REQ(D_INODE, req, "log connect");
2322 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_CONNECT);
2323 rc = ost_llog_handle_connect(req->rq_export, req);
2324 req->rq_status = rc;
2325 rc = req_capsule_server_pack(&req->rq_pill);
2328 RETURN(ptlrpc_reply(req));
2329 case OBD_LOG_CANCEL:
2330 CDEBUG(D_INODE, "log cancel\n");
2331 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2332 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2334 rc = llog_origin_handle_cancel(req);
2335 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2337 req->rq_status = rc;
2338 rc = req_capsule_server_pack(&req->rq_pill);
2341 RETURN(ptlrpc_reply(req));
2343 CDEBUG(D_INODE, "enqueue\n");
2344 req_capsule_set(&req->rq_pill, &RQF_LDLM_ENQUEUE);
2345 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_NET))
2347 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
2349 ldlm_server_glimpse_ast);
2350 fail = OBD_FAIL_OST_LDLM_REPLY_NET;
2353 CDEBUG(D_INODE, "convert\n");
2354 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2355 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CONVERT_NET))
2357 rc = ldlm_handle_convert(req);
2360 CDEBUG(D_INODE, "cancel\n");
2361 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2362 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_NET))
2364 rc = ldlm_handle_cancel(req);
2366 case LDLM_BL_CALLBACK:
2367 case LDLM_CP_CALLBACK:
2368 CDEBUG(D_INODE, "callback\n");
2369 CERROR("callbacks should not happen on OST\n");
2372 CERROR("Unexpected opcode %d\n",
2373 lustre_msg_get_opc(req->rq_reqmsg));
2374 req->rq_status = -ENOTSUPP;
2375 rc = ptlrpc_error(req);
2379 LASSERT(current->journal_info == NULL);
2382 /* If we're DISCONNECTing, the export_data is already freed */
2383 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != OST_DISCONNECT)
2384 target_committed_to_req(req);
2388 oti_to_request(oti, req);
2390 target_send_reply(req, rc, fail);
2393 EXPORT_SYMBOL(ost_handle);
2396 * free per-thread pool created by ost_io_thread_init().
2398 static void ost_io_thread_done(struct ptlrpc_thread *thread)
2400 struct ost_thread_local_cache *tls; /* TLS stands for Thread-Local
2405 LASSERT(thread != NULL);
2408 * be prepared to handle partially-initialized pools (because this is
2409 * called from ost_io_thread_init() for cleanup.
2411 tls = thread->t_data;
2414 thread->t_data = NULL;
2420 * initialize per-thread page pool (bug 5137).
2422 static int ost_io_thread_init(struct ptlrpc_thread *thread)
2424 struct ost_thread_local_cache *tls;
2428 LASSERT(thread != NULL);
2429 LASSERT(thread->t_data == NULL);
2434 thread->t_data = tls;
2438 #define OST_WATCHDOG_TIMEOUT (obd_timeout * 1000)
2440 static struct cfs_cpt_table *ost_io_cptable;
2442 /* Sigh - really, this is an OSS, the _server_, not the _target_ */
2443 static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
2445 static struct ptlrpc_service_conf svc_conf;
2446 struct ost_obd *ost = &obd->u.ost;
2447 struct lprocfs_static_vars lvars;
2452 rc = cfs_cleanup_group_info();
2456 lprocfs_ost_init_vars(&lvars);
2457 lprocfs_obd_setup(obd, lvars.obd_vars);
2459 mutex_init(&ost->ost_health_mutex);
2461 svc_conf = (typeof(svc_conf)) {
2462 .psc_name = LUSTRE_OSS_NAME,
2463 .psc_watchdog_factor = OSS_SERVICE_WATCHDOG_FACTOR,
2465 .bc_nbufs = OST_NBUFS,
2466 .bc_buf_size = OST_BUFSIZE,
2467 .bc_req_max_size = OST_MAXREQSIZE,
2468 .bc_rep_max_size = OST_MAXREPSIZE,
2469 .bc_req_portal = OST_REQUEST_PORTAL,
2470 .bc_rep_portal = OSC_REPLY_PORTAL,
2473 .tc_thr_name = "ll_ost",
2474 .tc_thr_factor = OSS_THR_FACTOR,
2475 .tc_nthrs_init = OSS_NTHRS_INIT,
2476 .tc_nthrs_base = OSS_NTHRS_BASE,
2477 .tc_nthrs_max = OSS_NTHRS_MAX,
2478 .tc_nthrs_user = oss_num_threads,
2479 .tc_cpu_affinity = 1,
2480 .tc_ctx_tags = LCT_DT_THREAD,
2483 .cc_pattern = oss_cpts,
2486 .so_req_handler = ost_handle,
2487 .so_req_printer = target_print_req,
2488 .so_hpreq_handler = ptlrpc_hpreq_handler,
2491 ost->ost_service = ptlrpc_register_service(&svc_conf,
2492 obd->obd_proc_entry);
2493 if (IS_ERR(ost->ost_service)) {
2494 rc = PTR_ERR(ost->ost_service);
2495 CERROR("failed to start service: %d\n", rc);
2496 GOTO(out_lprocfs, rc);
2499 memset(&svc_conf, 0, sizeof(svc_conf));
2500 svc_conf = (typeof(svc_conf)) {
2501 .psc_name = "ost_create",
2502 .psc_watchdog_factor = OSS_SERVICE_WATCHDOG_FACTOR,
2504 .bc_nbufs = OST_NBUFS,
2505 .bc_buf_size = OST_BUFSIZE,
2506 .bc_req_max_size = OST_MAXREQSIZE,
2507 .bc_rep_max_size = OST_MAXREPSIZE,
2508 .bc_req_portal = OST_CREATE_PORTAL,
2509 .bc_rep_portal = OSC_REPLY_PORTAL,
2512 .tc_thr_name = "ll_ost_create",
2513 .tc_thr_factor = OSS_CR_THR_FACTOR,
2514 .tc_nthrs_init = OSS_CR_NTHRS_INIT,
2515 .tc_nthrs_base = OSS_CR_NTHRS_BASE,
2516 .tc_nthrs_max = OSS_CR_NTHRS_MAX,
2517 .tc_nthrs_user = oss_num_create_threads,
2518 .tc_cpu_affinity = 1,
2519 .tc_ctx_tags = LCT_DT_THREAD,
2522 .cc_pattern = oss_cpts,
2525 .so_req_handler = ost_handle,
2526 .so_req_printer = target_print_req,
2529 ost->ost_create_service = ptlrpc_register_service(&svc_conf,
2530 obd->obd_proc_entry);
2531 if (IS_ERR(ost->ost_create_service)) {
2532 rc = PTR_ERR(ost->ost_create_service);
2533 CERROR("failed to start OST create service: %d\n", rc);
2534 GOTO(out_service, rc);
2537 mask = cfs_cpt_table->ctb_nodemask;
2538 /* event CPT feature is disabled in libcfs level by set partition
2539 * number to 1, we still want to set node affinity for io service */
2540 if (cfs_cpt_number(cfs_cpt_table) == 1 && nodes_weight(*mask) > 1) {
2544 ost_io_cptable = cfs_cpt_table_alloc(nodes_weight(*mask));
2545 for_each_node_mask(i, *mask) {
2546 if (ost_io_cptable == NULL) {
2547 CWARN("OSS failed to create CPT table\n");
2551 rc = cfs_cpt_set_node(ost_io_cptable, cpt++, i);
2553 CWARN("OSS Failed to set node %d for"
2554 "IO CPT table\n", i);
2555 cfs_cpt_table_free(ost_io_cptable);
2556 ost_io_cptable = NULL;
2562 memset(&svc_conf, 0, sizeof(svc_conf));
2563 svc_conf = (typeof(svc_conf)) {
2564 .psc_name = "ost_io",
2565 .psc_watchdog_factor = OSS_SERVICE_WATCHDOG_FACTOR,
2567 .bc_nbufs = OST_NBUFS,
2568 .bc_buf_size = OST_BUFSIZE,
2569 .bc_req_max_size = OST_MAXREQSIZE,
2570 .bc_rep_max_size = OST_MAXREPSIZE,
2571 .bc_req_portal = OST_IO_PORTAL,
2572 .bc_rep_portal = OSC_REPLY_PORTAL,
2575 .tc_thr_name = "ll_ost_io",
2576 .tc_thr_factor = OSS_THR_FACTOR,
2577 .tc_nthrs_init = OSS_NTHRS_INIT,
2578 .tc_nthrs_base = OSS_NTHRS_BASE,
2579 .tc_nthrs_max = OSS_NTHRS_MAX,
2580 .tc_nthrs_user = oss_num_threads,
2581 .tc_cpu_affinity = 1,
2582 .tc_ctx_tags = LCT_DT_THREAD,
2585 .cc_cptable = ost_io_cptable,
2586 .cc_pattern = ost_io_cptable == NULL ?
2590 .so_thr_init = ost_io_thread_init,
2591 .so_thr_done = ost_io_thread_done,
2592 .so_req_handler = ost_handle,
2593 .so_hpreq_handler = ost_io_hpreq_handler,
2594 .so_req_printer = target_print_req,
2597 ost->ost_io_service = ptlrpc_register_service(&svc_conf,
2598 obd->obd_proc_entry);
2599 if (IS_ERR(ost->ost_io_service)) {
2600 rc = PTR_ERR(ost->ost_io_service);
2601 CERROR("failed to start OST I/O service: %d\n", rc);
2602 ost->ost_io_service = NULL;
2603 GOTO(out_create, rc);
2606 ping_evictor_start();
2611 ptlrpc_unregister_service(ost->ost_create_service);
2612 ost->ost_create_service = NULL;
2614 ptlrpc_unregister_service(ost->ost_service);
2615 ost->ost_service = NULL;
2617 lprocfs_obd_cleanup(obd);
2621 static int ost_cleanup(struct obd_device *obd)
2623 struct ost_obd *ost = &obd->u.ost;
2627 ping_evictor_stop();
2629 /* there is no recovery for OST OBD, all recovery is controlled by
2631 LASSERT(obd->obd_recovering == 0);
2632 mutex_lock(&ost->ost_health_mutex);
2633 ptlrpc_unregister_service(ost->ost_service);
2634 ptlrpc_unregister_service(ost->ost_create_service);
2635 ptlrpc_unregister_service(ost->ost_io_service);
2636 ost->ost_service = NULL;
2637 ost->ost_create_service = NULL;
2638 ost->ost_io_service = NULL;
2640 mutex_unlock(&ost->ost_health_mutex);
2642 lprocfs_obd_cleanup(obd);
2644 if (ost_io_cptable != NULL) {
2645 cfs_cpt_table_free(ost_io_cptable);
2646 ost_io_cptable = NULL;
2652 static int ost_health_check(const struct lu_env *env, struct obd_device *obd)
2654 struct ost_obd *ost = &obd->u.ost;
2657 mutex_lock(&ost->ost_health_mutex);
2658 rc |= ptlrpc_service_health_check(ost->ost_service);
2659 rc |= ptlrpc_service_health_check(ost->ost_create_service);
2660 rc |= ptlrpc_service_health_check(ost->ost_io_service);
2661 mutex_unlock(&ost->ost_health_mutex);
2664 * health_check to return 0 on healthy
2665 * and 1 on unhealthy.
2673 struct ost_thread_local_cache *ost_tls(struct ptlrpc_request *r)
2675 return (struct ost_thread_local_cache *)(r->rq_svc_thread->t_data);
2678 /* use obd ops to offer management infrastructure */
2679 static struct obd_ops ost_obd_ops = {
2680 .o_owner = THIS_MODULE,
2681 .o_setup = ost_setup,
2682 .o_cleanup = ost_cleanup,
2683 .o_health_check = ost_health_check,
2687 static int __init ost_init(void)
2689 struct lprocfs_static_vars lvars;
2693 ost_page_to_corrupt = cfs_alloc_page(CFS_ALLOC_STD);
2695 lprocfs_ost_init_vars(&lvars);
2696 rc = class_register_type(&ost_obd_ops, NULL, lvars.module_vars,
2697 LUSTRE_OSS_NAME, NULL);
2699 if (ost_num_threads != 0 && oss_num_threads == 0) {
2700 LCONSOLE_INFO("ost_num_threads module parameter is deprecated, "
2701 "use oss_num_threads instead or unset both for "
2702 "dynamic thread startup\n");
2703 oss_num_threads = ost_num_threads;
2709 static void /*__exit*/ ost_exit(void)
2711 if (ost_page_to_corrupt)
2712 page_cache_release(ost_page_to_corrupt);
2714 class_unregister_type(LUSTRE_OSS_NAME);
2717 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2718 MODULE_DESCRIPTION("Lustre Object Storage Target (OST) v0.01");
2719 MODULE_LICENSE("GPL");
2721 module_init(ost_init);
2722 module_exit(ost_exit);