1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/ctype.h>
40 # include <linux/init.h>
41 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/workqueue.h>
43 # include <linux/smp_lock.h>
45 # include <linux/locks.h>
47 #else /* __KERNEL__ */
48 # include <liblustre.h>
51 #include <linux/lustre_dlm.h>
52 #include <libcfs/kp30.h>
53 #include <linux/lustre_net.h>
54 #include <linux/lustre_sec.h>
55 #include <lustre/lustre_user.h>
56 #include <linux/obd_ost.h>
57 #include <linux/obd_lov.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include <linux/lustre_lite.h>
67 #include <linux/lustre_audit.h>
68 #include <linux/lustre_gs.h>
70 #include "osc_internal.h"
72 /* Pack OSC object metadata for disk storage (LE byte order). */
73 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
74 struct lov_stripe_md *lsm)
79 lmm_size = sizeof(**lmmp);
84 OBD_FREE(*lmmp, lmm_size);
90 OBD_ALLOC(*lmmp, lmm_size);
96 LASSERT(lsm->lsm_object_id);
97 LASSERT(lsm->lsm_object_gr);
98 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
99 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
105 /* Unpack OSC object metadata from disk storage (LE byte order). */
106 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
107 struct lov_mds_md *lmm, int lmm_bytes)
113 if (lmm_bytes < sizeof (*lmm)) {
114 CERROR("lov_mds_md too small: %d, need %d\n",
115 lmm_bytes, (int)sizeof(*lmm));
118 /* XXX LOV_MAGIC etc check? */
120 if (lmm->lmm_object_id == 0) {
121 CERROR("lov_mds_md: zero lmm_object_id\n");
126 lsm_size = lov_stripe_md_size(1);
130 if (*lsmp != NULL && lmm == NULL) {
131 OBD_FREE(*lsmp, lsm_size);
137 OBD_ALLOC(*lsmp, lsm_size);
140 loi_init((*lsmp)->lsm_oinfo);
144 /* XXX zero *lsmp? */
145 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
146 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
147 LASSERT((*lsmp)->lsm_object_id);
148 LASSERT((*lsmp)->lsm_object_gr);
151 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
156 static int osc_getattr_interpret(struct ptlrpc_request *req,
157 struct osc_getattr_async_args *aa, int rc)
159 struct ost_body *body;
165 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
167 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
168 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
170 /* This should really be sent by the OST */
171 aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
172 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
174 CERROR("can't unpack ost_body\n");
176 aa->aa_oa->o_valid = 0;
182 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
183 struct lov_stripe_md *md,
184 struct ptlrpc_request_set *set)
186 struct ptlrpc_request *request;
187 struct ost_body *body;
188 int size = sizeof(*body);
189 struct osc_getattr_async_args *aa;
192 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
193 OST_GETATTR, 1, &size, NULL);
197 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
198 memcpy(&body->oa, oa, sizeof(*oa));
200 request->rq_replen = lustre_msg_size(1, &size);
201 request->rq_interpret_reply = osc_getattr_interpret;
203 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
204 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
207 ptlrpc_set_add_req (set, request);
211 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
212 struct lov_stripe_md *md)
214 struct ptlrpc_request *request;
215 struct ost_body *body;
216 int rc, size = sizeof(*body);
219 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
220 OST_GETATTR, 1, &size, NULL);
224 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
225 memcpy(&body->oa, oa, sizeof(*oa));
227 request->rq_replen = lustre_msg_size(1, &size);
229 rc = ptlrpc_queue_wait(request);
231 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
235 body = lustre_swab_repbuf(request, 0, sizeof (*body),
236 lustre_swab_ost_body);
238 CERROR ("can't unpack ost_body\n");
239 GOTO (out, rc = -EPROTO);
242 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
243 memcpy(oa, &body->oa, sizeof(*oa));
245 /* This should really be sent by the OST */
246 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
247 oa->o_valid |= OBD_MD_FLBLKSZ;
251 ptlrpc_req_finished(request);
255 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
256 struct lov_stripe_md *md, struct obd_trans_info *oti,
257 struct lustre_capa *capa)
259 struct ptlrpc_request *request;
260 struct ost_body *body;
261 int rc, size = sizeof(*body);
264 LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
266 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
267 OST_SETATTR, 1, &size, NULL);
271 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
272 memcpy(&body->oa, oa, sizeof(*oa));
274 request->rq_replen = lustre_msg_size(1, &size);
276 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
277 ptlrpcd_add_req(request);
280 rc = ptlrpc_queue_wait(request);
284 body = lustre_swab_repbuf(request, 0, sizeof(*body),
285 lustre_swab_ost_body);
287 GOTO(out, rc = -EPROTO);
289 memcpy(oa, &body->oa, sizeof(*oa));
293 ptlrpc_req_finished(request);
297 int osc_real_create(struct obd_export *exp, struct obdo *oa,
298 struct lov_stripe_md **ea, struct obd_trans_info *oti)
300 struct osc_creator *oscc = &exp->exp_obd->u.cli.cl_oscc;
301 struct ptlrpc_request *request;
302 struct ost_body *body;
303 struct lov_stripe_md *lsm;
304 int rc, size = sizeof(*body);
312 rc = obd_alloc_memmd(exp, &lsm);
317 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
318 OST_CREATE, 1, &size, NULL);
320 GOTO(out, rc = -ENOMEM);
322 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
323 memcpy(&body->oa, oa, sizeof(body->oa));
325 request->rq_replen = lustre_msg_size(1, &size);
326 if (oa->o_valid & OBD_MD_FLINLINE) {
327 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
328 oa->o_flags == OBD_FL_DELORPHAN);
329 DEBUG_REQ(D_HA, request,
330 "delorphan from OST integration");
331 /* Don't resend the delorphan request */
332 request->rq_no_resend = request->rq_no_delay = 1;
335 rc = ptlrpc_queue_wait(request);
339 body = lustre_swab_repbuf(request, 0, sizeof(*body),
340 lustre_swab_ost_body);
342 CERROR ("can't unpack ost_body\n");
343 GOTO (out_req, rc = -EPROTO);
346 if ((oa->o_valid & OBD_MD_FLFLAGS) && oa->o_flags == OBD_FL_DELORPHAN) {
347 struct obd_import *imp = class_exp2cliimp(exp);
348 /* MDS declares last known object, OSS responses
349 * with next possible object -bzzz */
350 spin_lock(&oscc->oscc_lock);
351 oscc->oscc_next_id = body->oa.o_id;
352 spin_unlock(&oscc->oscc_lock);
353 CDEBUG(D_HA, "%s: set nextid "LPD64" after recovery\n",
354 imp->imp_target_uuid.uuid, oa->o_id);
356 memcpy(oa, &body->oa, sizeof(*oa));
358 /* This should really be sent by the OST */
359 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
360 oa->o_valid |= OBD_MD_FLBLKSZ;
362 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
363 * have valid lsm_oinfo data structs, so don't go touching that.
364 * This needs to be fixed in a big way.
366 lsm->lsm_object_id = oa->o_id;
367 lsm->lsm_object_gr = oa->o_gr;
371 oti->oti_transno = request->rq_repmsg->transno;
373 if (oa->o_valid & OBD_MD_FLCOOKIE) {
374 if (!oti->oti_logcookies)
375 oti_alloc_cookies(oti, 1);
376 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
377 sizeof(oti->oti_onecookie));
381 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
384 ptlrpc_req_finished(request);
387 obd_free_memmd(exp, &lsm);
391 static int osc_punch(struct obd_export *exp, struct obdo *oa,
392 struct lov_stripe_md *md, obd_size start,
393 obd_size end, struct obd_trans_info *oti,
394 struct lustre_capa *capa)
396 struct ptlrpc_request *request;
397 struct ost_body *body;
398 struct lustre_capa *req_capa;
400 int rc, size[2] = { sizeof(*body), sizeof(*capa) };
408 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
409 OST_PUNCH, capa ? 2 : 1, size, NULL);
413 body = lustre_msg_buf(request->rq_reqmsg, bufcnt++, sizeof (*body));
415 memcpy(&body->oa, oa, sizeof(*oa));
417 /* overload the size and blocks fields in the oa with start/end */
418 body->oa.o_size = start;
419 body->oa.o_blocks = end;
420 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
423 req_capa = lustre_msg_buf(request->rq_reqmsg, bufcnt++,
425 capa_dup2(req_capa, capa);
426 body->oa.o_valid |= OBD_MD_CAPA;
429 request->rq_replen = lustre_msg_size(1, size);
431 rc = ptlrpc_queue_wait(request);
435 body = lustre_swab_repbuf (request, 0, sizeof (*body),
436 lustre_swab_ost_body);
438 CERROR ("can't unpack ost_body\n");
439 GOTO (out, rc = -EPROTO);
442 memcpy(oa, &body->oa, sizeof(*oa));
446 ptlrpc_req_finished(request);
450 static int osc_sync(struct obd_export *exp, struct obdo *oa,
451 struct lov_stripe_md *md, obd_size start,
454 struct ptlrpc_request *request;
455 struct ost_body *body;
456 int rc, size = sizeof(*body);
464 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
465 OST_SYNC, 1, &size, NULL);
469 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
470 memcpy(&body->oa, oa, sizeof(*oa));
472 /* overload the size and blocks fields in the oa with start/end */
473 body->oa.o_size = start;
474 body->oa.o_blocks = end;
475 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
477 request->rq_replen = lustre_msg_size(1, &size);
479 rc = ptlrpc_queue_wait(request);
483 body = lustre_swab_repbuf(request, 0, sizeof(*body),
484 lustre_swab_ost_body);
486 CERROR ("can't unpack ost_body\n");
487 GOTO (out, rc = -EPROTO);
490 memcpy(oa, &body->oa, sizeof(*oa));
494 ptlrpc_req_finished(request);
498 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
499 struct lov_stripe_md *ea, struct obd_trans_info *oti)
501 struct ptlrpc_request *request;
502 struct ost_body *body;
503 int rc, size = sizeof(*body);
511 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
512 OST_DESTROY, 1, &size, NULL);
515 request->rq_request_portal = OST_DESTROY_PORTAL;
517 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
519 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
520 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
521 sizeof(*oti->oti_logcookies));
522 oti->oti_logcookies++;
525 memcpy(&body->oa, oa, sizeof(*oa));
526 request->rq_replen = lustre_msg_size(1, &size);
528 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
529 ptlrpcd_add_req(request);
532 rc = ptlrpc_queue_wait(request);
538 ptlrpc_req_finished(request);
542 body = lustre_swab_repbuf(request, 0, sizeof(*body),
543 lustre_swab_ost_body);
545 CERROR ("Can't unpack body\n");
546 ptlrpc_req_finished(request);
550 memcpy(oa, &body->oa, sizeof(*oa));
551 ptlrpc_req_finished(request);
556 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
559 obd_valid bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
561 LASSERT(!(oa->o_valid & bits));
564 spin_lock(&cli->cl_loi_list_lock);
565 oa->o_dirty = cli->cl_dirty;
566 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
567 oa->o_grant = cli->cl_avail_grant;
568 oa->o_dropped = cli->cl_lost_grant;
569 cli->cl_lost_grant = 0;
570 spin_unlock(&cli->cl_loi_list_lock);
571 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
572 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
575 /* caller must hold loi_list_lock */
576 static void osc_consume_write_grant(struct client_obd *cli,
577 struct osc_async_page *oap)
579 cli->cl_dirty += PAGE_SIZE;
580 cli->cl_avail_grant -= PAGE_SIZE;
581 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
582 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
583 LASSERT(cli->cl_avail_grant >= 0);
586 static unsigned long rpcs_in_flight(struct client_obd *cli)
588 return cli->cl_r_in_flight + cli->cl_w_in_flight;
591 /* caller must hold loi_list_lock */
592 void osc_wake_cache_waiters(struct client_obd *cli)
594 struct list_head *l, *tmp;
595 struct osc_cache_waiter *ocw;
597 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
598 /* if we can't dirty more, we must wait until some is written */
599 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
600 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
601 cli->cl_dirty, cli->cl_dirty_max);
605 /* if still dirty cache but no grant wait for pending RPCs that
606 * may yet return us some grant before doing sync writes */
607 if (cli->cl_w_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
608 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
609 cli->cl_w_in_flight);
611 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
612 list_del_init(&ocw->ocw_entry);
613 if (cli->cl_avail_grant < PAGE_SIZE) {
614 /* no more RPCs in flight to return grant, do sync IO */
615 ocw->ocw_rc = -EDQUOT;
616 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
618 osc_consume_write_grant(cli, ocw->ocw_oap);
621 wake_up(&ocw->ocw_waitq);
627 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
629 spin_lock(&cli->cl_loi_list_lock);
630 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
631 cli->cl_avail_grant += body->oa.o_grant;
632 /* waiters are woken in brw_interpret_oap */
633 spin_unlock(&cli->cl_loi_list_lock);
636 /* We assume that the reason this OSC got a short read is because it read
637 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
638 * via the LOV, and it _knows_ it's reading inside the file, it's just that
639 * this stripe never got written at or beyond this stripe offset yet. */
640 static void handle_short_read(int nob_read, obd_count page_count,
641 struct brw_page *pga)
645 /* skip bytes read OK */
646 while (nob_read > 0) {
647 LASSERT (page_count > 0);
649 if (pga->count > nob_read) {
650 /* EOF inside this page */
651 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
652 memset(ptr + nob_read, 0, pga->count - nob_read);
659 nob_read -= pga->count;
664 /* zero remaining pages */
665 while (page_count-- > 0) {
666 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
667 memset(ptr, 0, pga->count);
673 static int check_write_rcs(struct ptlrpc_request *request,
674 int requested_nob, int niocount,
675 obd_count page_count, struct brw_page *pga)
679 /* return error if any niobuf was in error */
680 remote_rcs = lustre_swab_repbuf(request, 1,
681 sizeof(*remote_rcs) * niocount, NULL);
682 if (remote_rcs == NULL) {
683 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
686 if (lustre_msg_swabbed(request->rq_repmsg))
687 for (i = 0; i < niocount; i++)
688 __swab32s((__u32 *)&remote_rcs[i]);
690 for (i = 0; i < niocount; i++) {
691 if (remote_rcs[i] < 0)
692 return(remote_rcs[i]);
694 if (remote_rcs[i] != 0) {
695 CERROR("rc[%d] invalid (%d) req %p\n",
696 i, remote_rcs[i], request);
701 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
702 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
703 requested_nob, request->rq_bulk->bd_nob_transferred);
710 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
712 if (p1->flag != p2->flag) {
713 unsigned mask = ~OBD_BRW_FROM_GRANT;
715 /* warn if we try to combine flags that we don't know to be
717 if ((p1->flag & mask) != (p2->flag & mask))
718 CERROR("is it ok to have flags 0x%x and 0x%x in the "
719 "same brw?\n", p1->flag, p2->flag);
723 return (p1->disk_offset + p1->count == p2->disk_offset);
727 static obd_count cksum_pages(int nob, obd_count page_count,
728 struct brw_page *pga)
734 LASSERT (page_count > 0);
737 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
738 pga->count > nob ? nob : pga->count);
750 #define osc_encrypt_page(page, off, count) \
751 osc_crypt_page(page, off, count, ENCRYPT_DATA)
752 #define osc_decrypt_page(page, off, count) \
753 osc_crypt_page(page, off, count, DECRYPT_DATA)
754 /*Put a global call back var here is Ugly, but put it to client_obd
755 *also seems not a good idea, WangDi*/
756 crypt_cb_t osc_crypt_cb = NULL;
758 static int osc_crypt_page(struct page *page, obd_off page_off, obd_off count,
764 if (osc_crypt_cb != NULL)
765 rc = osc_crypt_cb(page, page_off, count, flags);
767 CERROR("crypt page error %d \n", rc);
771 static int osc_decrypt_pages(struct brw_page *pga, int page_count)
776 for (i = 0; i < page_count; i++) {
777 struct brw_page *pg = &pga[i];
778 osc_decrypt_page(pg->pg, pg->page_offset, pg->count);
783 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
784 struct lov_stripe_md *lsm, obd_count page_count,
785 struct brw_page *pga, int *requested_nobp,
786 int *niocountp, struct ptlrpc_request **reqp)
788 struct ptlrpc_request *req;
789 struct ptlrpc_bulk_desc *desc;
790 struct client_obd *cli = &imp->imp_obd->u.cli;
791 struct ost_body *body;
792 struct lustre_id *raw_id = obdo_id(oa);
793 struct obd_capa *ocapa = NULL;
794 struct lustre_capa *capa = NULL;
795 struct obd_ioobj *ioobj;
796 struct niobuf_remote *niobuf;
805 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
807 for (niocount = i = 1; i < page_count; i++)
808 if (!can_merge_pages(&pga[i - 1], &pga[i]))
811 /* TODO: this could be optimized: thie capability can be
812 * found from ll_inode_info->lli_capas. */
813 capa_op = (opc == OST_WRITE) ? CAPA_WRITE : CAPA_READ;
815 ocapa = capa_get(oa->o_fsuid, capa_op, raw_id->li_fid.lf_group,
816 raw_id->li_stc.u.e3s.l3s_ino, CLIENT_CAPA);
818 if (opc == OST_READ && capa_op == MAY_READ) {
819 /* partial write might cause read, MAY_WRITE capability
820 * should be used here */
826 size[bufcnt++] = sizeof(*body);
827 size[bufcnt++] = sizeof(*ioobj);
829 size[bufcnt++] = sizeof(*capa);
830 size[bufcnt++] = niocount * sizeof(*niobuf);
832 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, opc, bufcnt, size, NULL);
836 if (opc == OST_WRITE)
837 desc = ptlrpc_prep_bulk_imp (req, page_count,
838 BULK_GET_SOURCE, OST_BULK_PORTAL);
840 desc = ptlrpc_prep_bulk_imp (req, page_count,
841 BULK_PUT_SINK, OST_BULK_PORTAL);
843 GOTO(out, rc = -ENOMEM);
844 /* NB request now owns desc and will free it when it gets freed */
847 body = lustre_msg_buf(req->rq_reqmsg, bufcnt++, sizeof(*body));
848 memcpy(&body->oa, oa, sizeof(*oa));
849 ioobj = lustre_msg_buf(req->rq_reqmsg, bufcnt++, sizeof(*ioobj));
851 capa = lustre_msg_buf(req->rq_reqmsg, bufcnt++, sizeof(*capa));
852 capa_dup(capa, ocapa);
853 body->oa.o_valid |= OBD_MD_CAPA;
855 niobuf = lustre_msg_buf(req->rq_reqmsg, bufcnt++,
856 niocount * sizeof(*niobuf));
858 obdo_to_ioobj(oa, ioobj);
859 ioobj->ioo_bufcnt = niocount;
861 LASSERT (page_count > 0);
863 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
864 struct brw_page *pg = &pga[i];
865 struct brw_page *pg_prev = pg - 1;
867 LASSERT(pg->count > 0);
868 LASSERTF((pg->page_offset & ~PAGE_MASK)+ pg->count <= PAGE_SIZE,
869 "i: %d pg: %p pg_off: "LPU64", count: %u\n", i, pg,
870 pg->page_offset, pg->count);
871 LASSERTF(i == 0 || pg->disk_offset > pg_prev->disk_offset,
872 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
873 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
875 pg->pg, pg->pg->private, pg->pg->index, pg->disk_offset,
876 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
877 pg_prev->disk_offset);
879 if (opc == OST_WRITE) {
880 rc = osc_encrypt_page(pg->pg, pg->page_offset, pg->count);
885 ptlrpc_prep_bulk_page(desc, pg->pg,
886 pg->page_offset & ~PAGE_MASK, pg->count);
887 requested_nob += pg->count;
889 if (i > 0 && can_merge_pages(pg_prev, pg)) {
891 niobuf->len += pg->count;
893 niobuf->offset = pg->disk_offset;
894 niobuf->len = pg->count;
895 niobuf->flags = pg->flag;
899 LASSERT((void *)(niobuf - niocount) ==
900 lustre_msg_buf(req->rq_reqmsg, bufcnt - 1,
901 niocount * sizeof(*niobuf)));
902 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
904 /* size[0] still sizeof (*body) */
905 if (opc == OST_WRITE) {
907 body->oa.o_valid |= OBD_MD_FLCKSUM;
908 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
910 /* 1 RC per niobuf */
911 size[1] = sizeof(__u32) * niocount;
912 req->rq_replen = lustre_msg_size(2, size);
914 /* 1 RC for the whole I/O */
915 req->rq_replen = lustre_msg_size(1, size);
918 *niocountp = niocount;
919 *requested_nobp = requested_nob;
924 ptlrpc_req_finished (req);
928 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
929 int requested_nob, int niocount,
930 obd_count page_count, struct brw_page *pga,
933 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
934 struct ost_body *body;
940 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
942 CERROR ("Can't unpack body\n");
946 osc_update_grant(cli, body);
947 memcpy(oa, &body->oa, sizeof(*oa));
949 if (req->rq_reqmsg->opc == OST_WRITE) {
951 CERROR ("Unexpected +ve rc %d\n", rc);
954 LASSERT (req->rq_bulk->bd_nob == requested_nob);
955 osc_decrypt_pages(pga, page_count);
956 RETURN(check_write_rcs(req, requested_nob, niocount,
960 if (rc > requested_nob) {
961 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
965 if (rc != req->rq_bulk->bd_nob_transferred) {
966 CERROR ("Unexpected rc %d (%d transferred)\n",
967 rc, req->rq_bulk->bd_nob_transferred);
971 if (rc < requested_nob)
972 handle_short_read(rc, page_count, pga);
975 if (oa->o_valid & OBD_MD_FLCKSUM) {
976 const struct ptlrpc_peer *peer =
977 &req->rq_import->imp_connection->c_peer;
978 static int cksum_counter;
979 obd_count server_cksum = oa->o_cksum;
980 obd_count cksum = cksum_pages(rc, page_count, pga);
981 char str[PTL_NALFMT_SIZE];
983 ptlrpc_peernid2str(peer, str);
986 if (server_cksum != cksum) {
987 CERROR("Bad checksum: server %x, client %x, server NID "
988 LPX64" (%s)\n", server_cksum, cksum,
989 peer->peer_id.nid, str);
992 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
993 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
994 cksum_counter, peer->peer_id.nid, str, cksum);
997 static int cksum_missed;
1000 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1001 CERROR("Request checksum %u from "LPX64", no reply\n",
1003 req->rq_import->imp_connection->c_peer.peer_id.nid);
1006 osc_decrypt_pages(pga, page_count);
1010 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
1011 struct lov_stripe_md *lsm,
1012 obd_count page_count, struct brw_page *pga)
1016 struct ptlrpc_request *request;
1021 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
1022 page_count, pga, &requested_nob, &niocount,
1027 rc = ptlrpc_queue_wait(request);
1029 if (rc == -ETIMEDOUT && request->rq_resend) {
1030 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
1031 ptlrpc_req_finished(request);
1035 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
1036 page_count, pga, rc);
1038 ptlrpc_req_finished(request);
1042 static int brw_interpret(struct ptlrpc_request *request,
1043 struct osc_brw_async_args *aa, int rc)
1045 struct obdo *oa = aa->aa_oa;
1046 int requested_nob = aa->aa_requested_nob;
1047 int niocount = aa->aa_nio_count;
1048 obd_count page_count = aa->aa_page_count;
1049 struct brw_page *pga = aa->aa_pga;
1052 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
1053 page_count, pga, rc);
1057 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1058 struct lov_stripe_md *lsm, obd_count page_count,
1059 struct brw_page *pga, struct ptlrpc_request_set *set)
1061 struct ptlrpc_request *request;
1064 struct osc_brw_async_args *aa;
1068 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
1069 page_count, pga, &requested_nob, &nio_count,
1072 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1073 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1075 aa->aa_requested_nob = requested_nob;
1076 aa->aa_nio_count = nio_count;
1077 aa->aa_page_count = page_count;
1080 request->rq_interpret_reply = brw_interpret;
1081 ptlrpc_set_add_req(set, request);
1087 #define min_t(type,x,y) \
1088 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
1092 * ugh, we want disk allocation on the target to happen in offset order. we'll
1093 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1094 * fine for our small page arrays and doesn't require allocation. its an
1095 * insertion sort that swaps elements that are strides apart, shrinking the
1096 * stride down until its '1' and the array is sorted.
1098 static void sort_brw_pages(struct brw_page *array, int num)
1101 struct brw_page tmp;
1105 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1110 for (i = stride ; i < num ; i++) {
1113 while (j >= stride && array[j - stride].disk_offset >
1115 array[j] = array[j - stride];
1120 } while (stride > 1);
1123 /* make sure we the regions we're passing to elan don't violate its '4
1124 * fragments' constraint. portal headers are a fragment, all full
1125 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1126 * counts as a fragment. I think. see bug 934. */
1127 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1130 int saw_whole_frag = 0;
1133 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1134 if (pg->count == PAGE_SIZE) {
1135 if (!saw_whole_frag) {
1146 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1147 struct lov_stripe_md *lsm, obd_count page_count,
1148 struct brw_page *pga, struct obd_trans_info *oti)
1152 if (cmd == OBD_BRW_CHECK) {
1153 /* The caller just wants to know if there's a chance that this
1154 * I/O can succeed */
1155 struct obd_import *imp = class_exp2cliimp(exp);
1157 if (imp == NULL || imp->imp_invalid)
1162 while (page_count) {
1163 obd_count pages_per_brw;
1166 if (page_count > PTLRPC_MAX_BRW_PAGES)
1167 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1169 pages_per_brw = page_count;
1171 sort_brw_pages(pga, pages_per_brw);
1172 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1174 rc = osc_brw_internal(cmd, exp, oa, lsm, pages_per_brw, pga);
1179 page_count -= pages_per_brw;
1180 pga += pages_per_brw;
1185 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1186 struct lov_stripe_md *lsm, obd_count page_count,
1187 struct brw_page *pga, struct ptlrpc_request_set *set,
1188 struct obd_trans_info *oti)
1192 if (cmd == OBD_BRW_CHECK) {
1193 /* The caller just wants to know if there's a chance that this
1194 * I/O can succeed */
1195 struct obd_import *imp = class_exp2cliimp(exp);
1197 if (imp == NULL || imp->imp_invalid)
1202 while (page_count) {
1203 obd_count pages_per_brw;
1206 if (page_count > PTLRPC_MAX_BRW_PAGES)
1207 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1209 pages_per_brw = page_count;
1211 sort_brw_pages(pga, pages_per_brw);
1212 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1214 rc = async_internal(cmd, exp, oa, lsm, pages_per_brw, pga, set);
1219 page_count -= pages_per_brw;
1220 pga += pages_per_brw;
1225 static void osc_check_rpcs(struct client_obd *cli);
1226 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1228 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1229 static void lop_update_pending(struct client_obd *cli,
1230 struct loi_oap_pages *lop, int cmd, int delta);
1232 /* this is called when a sync waiter receives an interruption. Its job is to
1233 * get the caller woken as soon as possible. If its page hasn't been put in an
1234 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1235 * desiring interruption which will forcefully complete the rpc once the rpc
1237 static void osc_occ_interrupted(struct oig_callback_context *occ)
1239 struct osc_async_page *oap;
1240 struct loi_oap_pages *lop;
1241 struct lov_oinfo *loi;
1244 /* XXX member_of() */
1245 oap = list_entry(occ, struct osc_async_page, oap_occ);
1247 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1249 oap->oap_interrupted = 1;
1251 /* ok, it's been put in an rpc. */
1252 if (oap->oap_request != NULL) {
1253 ptlrpc_mark_interrupted(oap->oap_request);
1254 ptlrpcd_wake(oap->oap_request);
1258 /* we don't get interruption callbacks until osc_trigger_sync_io()
1259 * has been called and put the sync oaps in the pending/urgent lists.*/
1260 if (!list_empty(&oap->oap_pending_item)) {
1261 list_del_init(&oap->oap_pending_item);
1262 if (oap->oap_async_flags & ASYNC_URGENT)
1263 list_del_init(&oap->oap_urgent_item);
1266 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1267 &loi->loi_write_lop : &loi->loi_read_lop;
1268 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1269 loi_list_maint(oap->oap_cli, oap->oap_loi);
1271 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1272 oap->oap_oig = NULL;
1276 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1279 /* this must be called holding the loi list lock to give coverage to exit_cache,
1280 * async_flag maintenance, and oap_request */
1281 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1282 struct osc_async_page *oap, int sent, int rc)
1284 osc_exit_cache(cli, oap, sent);
1285 oap->oap_async_flags = 0;
1286 oap->oap_interrupted = 0;
1288 if (oap->oap_request != NULL) {
1289 ptlrpc_req_finished(oap->oap_request);
1290 oap->oap_request = NULL;
1293 if (rc == 0 && oa != NULL)
1294 oap->oap_loi->loi_blocks = oa->o_blocks;
1297 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1298 oap->oap_oig = NULL;
1303 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1307 static int brw_interpret_oap(struct ptlrpc_request *request,
1308 struct osc_brw_async_args *aa, int rc)
1310 struct osc_async_page *oap;
1311 struct client_obd *cli;
1312 struct list_head *pos, *n;
1316 do_gettimeofday(&now);
1317 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1318 aa->aa_nio_count, aa->aa_page_count,
1321 CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
1325 /* in failout recovery we ignore writeback failure and want
1326 * to just tell llite to unlock the page and continue */
1327 if (request->rq_reqmsg->opc == OST_WRITE &&
1328 (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
1329 CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
1331 cli->cl_import ? cli->cl_import->imp_invalid : -1);
1335 spin_lock(&cli->cl_loi_list_lock);
1337 if (request->rq_reqmsg->opc == OST_WRITE)
1338 lprocfs_stime_record(&cli->cl_write_stime, &now,
1339 &request->rq_rpcd_start);
1341 lprocfs_stime_record(&cli->cl_read_stime, &now,
1342 &request->rq_rpcd_start);
1344 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1345 * is called so we know whether to go to sync BRWs or wait for more
1346 * RPCs to complete */
1347 if (request->rq_reqmsg->opc == OST_WRITE)
1348 cli->cl_w_in_flight--;
1350 cli->cl_r_in_flight--;
1352 /* the caller may re-use the oap after the completion call so
1353 * we need to clean it up a little */
1354 list_for_each_safe(pos, n, &aa->aa_oaps) {
1355 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1357 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1358 //oap->oap_page, oap->oap_page->index, oap);
1360 list_del_init(&oap->oap_rpc_item);
1361 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
1364 /* no write RPCs in flight, reset the time */
1365 if (request->rq_reqmsg->opc == OST_WRITE && cli->cl_w_in_flight == 0)
1366 do_gettimeofday(&cli->cl_last_write_time);
1368 osc_wake_cache_waiters(cli);
1369 osc_check_rpcs(cli);
1370 spin_unlock(&cli->cl_loi_list_lock);
1372 obdo_free(aa->aa_oa);
1373 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1378 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1379 struct list_head *rpc_list,
1380 int page_count, int cmd)
1382 struct ptlrpc_request *req;
1383 struct brw_page *pga = NULL;
1384 int requested_nob, nio_count;
1385 struct osc_brw_async_args *aa;
1386 struct obdo *oa = NULL;
1387 struct obd_async_page_ops *ops = NULL;
1388 void *caller_data = NULL;
1389 struct list_head *pos;
1392 LASSERT(!list_empty(rpc_list));
1394 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1396 RETURN(ERR_PTR(-ENOMEM));
1400 GOTO(out, req = ERR_PTR(-ENOMEM));
1403 list_for_each(pos, rpc_list) {
1404 struct osc_async_page *oap;
1406 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1408 ops = oap->oap_caller_ops;
1409 caller_data = oap->oap_caller_data;
1411 pga[i].disk_offset = oap->oap_obj_off + oap->oap_page_off;
1412 pga[i].page_offset = pga[i].disk_offset;
1413 pga[i].pg = oap->oap_page;
1414 pga[i].count = oap->oap_count;
1415 pga[i].flag = oap->oap_brw_flags;
1416 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1417 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1421 /* always get the data for the obdo for the rpc */
1422 LASSERT(ops != NULL);
1423 ops->ap_fill_obdo(caller_data, cmd, oa);
1425 sort_brw_pages(pga, page_count);
1426 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1427 pga, &requested_nob, &nio_count, &req);
1429 CERROR("prep_req failed: %d\n", rc);
1430 GOTO(out, req = ERR_PTR(rc));
1433 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1434 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1436 aa->aa_requested_nob = requested_nob;
1437 aa->aa_nio_count = nio_count;
1438 aa->aa_page_count = page_count;
1447 OBD_FREE(pga, sizeof(*pga) * page_count);
1452 /* strange write gap too long (15s) */
1453 #define CLI_ODD_WRITE_GAP 15000000
1455 static void lop_update_pending(struct client_obd *cli,
1456 struct loi_oap_pages *lop, int cmd, int delta)
1458 lop->lop_num_pending += delta;
1459 if (cmd == OBD_BRW_WRITE)
1460 cli->cl_pending_w_pages += delta;
1462 cli->cl_pending_r_pages += delta;
1465 static long timeval_sub(struct timeval *large, struct timeval *small)
1467 return (large->tv_sec - small->tv_sec) * 1000000 +
1468 (large->tv_usec - small->tv_usec);
1471 /* the loi lock is held across this function but it's allowed to release
1472 * and reacquire it during its work */
1473 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1474 int cmd, struct loi_oap_pages *lop)
1476 struct ptlrpc_request *request;
1477 obd_count page_count = 0;
1478 struct list_head *tmp, *pos;
1479 struct osc_async_page *oap = NULL;
1480 struct osc_brw_async_args *aa;
1481 struct obd_async_page_ops *ops;
1482 LIST_HEAD(rpc_list);
1485 LASSERT(lop != LP_POISON);
1486 LASSERT(lop->lop_pending.next != LP_POISON);
1488 /* first we find the pages we're allowed to work with */
1489 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1490 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1491 ops = oap->oap_caller_ops;
1493 LASSERT(oap->oap_magic == OAP_MAGIC);
1495 /* in llite being 'ready' equates to the page being locked
1496 * until completion unlocks it. commit_write submits a page
1497 * as not ready because its unlock will happen unconditionally
1498 * as the call returns. if we race with commit_write giving
1499 * us that page we dont' want to create a hole in the page
1500 * stream, so we stop and leave the rpc to be fired by
1501 * another dirtier or kupdated interval (the not ready page
1502 * will still be on the dirty list). we could call in
1503 * at the end of ll_file_write to process the queue again. */
1504 if (!(oap->oap_async_flags & ASYNC_READY)) {
1505 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1507 CDEBUG(D_INODE, "oap %p page %p returned %d "
1508 "instead of ready\n", oap,
1512 /* llite is telling us that the page is still
1513 * in commit_write and that we should try
1514 * and put it in an rpc again later. we
1515 * break out of the loop so we don't create
1516 * a hole in the sequence of pages in the rpc
1521 /* the io isn't needed.. tell the checks
1522 * below to complete the rpc with EINTR */
1523 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1524 oap->oap_count = -EINTR;
1527 oap->oap_async_flags |= ASYNC_READY;
1530 LASSERTF(0, "oap %p page %p returned %d "
1531 "from make_ready\n", oap,
1539 * Page submitted for IO has to be locked. Either by
1540 * ->ap_make_ready() or by higher layers.
1542 * XXX nikita: this assertion should be adjusted when lustre
1543 * starts using PG_writeback for pages being written out.
1545 LASSERT(PageLocked(oap->oap_page));
1547 /* take the page out of our book-keeping */
1548 list_del_init(&oap->oap_pending_item);
1549 lop_update_pending(cli, lop, cmd, -1);
1550 list_del_init(&oap->oap_urgent_item);
1552 /* ask the caller for the size of the io as the rpc leaves. */
1553 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1555 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1556 if (oap->oap_count <= 0) {
1557 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1559 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
1563 /* now put the page back in our accounting */
1564 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1565 if (++page_count >= cli->cl_max_pages_per_rpc)
1569 osc_wake_cache_waiters(cli);
1571 if (page_count == 0)
1574 loi_list_maint(cli, loi);
1575 spin_unlock(&cli->cl_loi_list_lock);
1577 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1578 if (IS_ERR(request)) {
1579 /* this should happen rarely and is pretty bad, it makes the
1580 * pending list not follow the dirty order */
1581 spin_lock(&cli->cl_loi_list_lock);
1582 list_for_each_safe(pos, tmp, &rpc_list) {
1583 oap = list_entry(pos, struct osc_async_page,
1585 list_del_init(&oap->oap_rpc_item);
1587 /* queued sync pages can be torn down while the pages
1588 * were between the pending list and the rpc */
1589 if (oap->oap_interrupted) {
1590 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1591 osc_ap_completion(cli, NULL, oap, 0,
1596 /* put the page back in the loi/lop lists */
1597 list_add_tail(&oap->oap_pending_item,
1599 lop_update_pending(cli, lop, cmd, 1);
1600 if (oap->oap_async_flags & ASYNC_URGENT)
1601 list_add(&oap->oap_urgent_item,
1604 loi_list_maint(cli, loi);
1605 RETURN(PTR_ERR(request));
1608 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1609 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1610 INIT_LIST_HEAD(&aa->aa_oaps);
1611 list_splice(&rpc_list, &aa->aa_oaps);
1612 INIT_LIST_HEAD(&rpc_list);
1615 if (cmd == OBD_BRW_READ) {
1616 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1617 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1619 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1620 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1621 cli->cl_w_in_flight);
1625 spin_lock(&cli->cl_loi_list_lock);
1627 /* collect write gaps and sum of them */
1628 if (cmd == OBD_BRW_WRITE && cli->cl_w_in_flight == 0) {
1632 do_gettimeofday(&now);
1634 if (cli->cl_last_write_time.tv_sec) {
1635 diff = timeval_sub(&now, &cli->cl_last_write_time);
1636 if (diff < CLI_ODD_WRITE_GAP) {
1637 cli->cl_write_gap_sum += diff;
1638 cli->cl_write_gaps++;
1643 if (cmd == OBD_BRW_READ) {
1644 cli->cl_r_in_flight++;
1647 cli->cl_w_in_flight++;
1648 cli->cl_write_num++;
1651 /* queued sync pages can be torn down while the pages
1652 * were between the pending list and the rpc */
1653 list_for_each(pos, &aa->aa_oaps) {
1654 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1655 if (oap->oap_interrupted) {
1656 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1658 ptlrpc_mark_interrupted(request);
1663 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %dr/%dw in flight\n",
1664 request, page_count, aa, cli->cl_r_in_flight,
1665 cli->cl_w_in_flight);
1667 oap->oap_request = ptlrpc_request_addref(request);
1668 request->rq_interpret_reply = brw_interpret_oap;
1670 ptlrpcd_add_req(request);
1674 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1680 if (lop->lop_num_pending == 0)
1683 /* if we have an invalid import we want to drain the queued pages
1684 * by forcing them through rpcs that immediately fail and complete
1685 * the pages. recovery relies on this to empty the queued pages
1686 * before canceling the locks and evicting down the llite pages */
1687 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1690 /* stream rpcs in queue order as long as as there is an urgent page
1691 * queued. this is our cheap solution for good batching in the case
1692 * where writepage marks some random page in the middle of the file as
1693 * urgent because of, say, memory pressure */
1694 if (!list_empty(&lop->lop_urgent))
1697 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1698 optimal = cli->cl_max_pages_per_rpc;
1699 if (cmd == OBD_BRW_WRITE) {
1700 /* trigger a write rpc stream as long as there are dirtiers
1701 * waiting for space. as they're waiting, they're not going to
1702 * create more pages to coallesce with what's waiting.. */
1703 if (!list_empty(&cli->cl_cache_waiters))
1706 /* *2 to avoid triggering rpcs that would want to include pages
1707 * that are being queued but which can't be made ready until
1708 * the queuer finishes with the page. this is a wart for
1709 * llite::commit_write() */
1712 if (lop->lop_num_pending >= optimal)
1718 static void on_list(struct list_head *item, struct list_head *list,
1721 if (list_empty(item) && should_be_on)
1722 list_add_tail(item, list);
1723 else if (!list_empty(item) && !should_be_on)
1724 list_del_init(item);
1727 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1728 * can find pages to build into rpcs quickly */
1729 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1731 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1732 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1733 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1735 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1736 loi->loi_write_lop.lop_num_pending);
1738 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1739 loi->loi_read_lop.lop_num_pending);
1742 #define LOI_DEBUG(LOI, STR, args...) \
1743 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1744 !list_empty(&(LOI)->loi_cli_item), \
1745 (LOI)->loi_write_lop.lop_num_pending, \
1746 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1747 (LOI)->loi_read_lop.lop_num_pending, \
1748 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1751 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1754 /* first return all objects which we already know to have
1755 * pages ready to be stuffed into rpcs */
1756 if (!list_empty(&cli->cl_loi_ready_list))
1757 RETURN(list_entry(cli->cl_loi_ready_list.next,
1758 struct lov_oinfo, loi_cli_item));
1760 /* then if we have cache waiters, return all objects with queued
1761 * writes. This is especially important when many small files
1762 * have filled up the cache and not been fired into rpcs because
1763 * they don't pass the nr_pending/object threshhold */
1764 if (!list_empty(&cli->cl_cache_waiters) &&
1765 !list_empty(&cli->cl_loi_write_list))
1766 RETURN(list_entry(cli->cl_loi_write_list.next,
1767 struct lov_oinfo, loi_write_item));
1769 /* then return all queued objects when we have an invalid import
1770 * so that they get flushed */
1771 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
1772 if (!list_empty(&cli->cl_loi_write_list))
1773 RETURN(list_entry(cli->cl_loi_write_list.next,
1774 struct lov_oinfo, loi_write_item));
1775 if (!list_empty(&cli->cl_loi_read_list))
1776 RETURN(list_entry(cli->cl_loi_read_list.next,
1777 struct lov_oinfo, loi_read_item));
1782 /* called with the loi list lock held */
1783 static void osc_check_rpcs(struct client_obd *cli)
1785 struct lov_oinfo *loi;
1786 int rc = 0, race_counter = 0;
1789 while ((loi = osc_next_loi(cli)) != NULL) {
1790 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
1791 LASSERT(loi->loi_ost_idx != LL_POISON);
1793 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
1796 /* attempt some read/write balancing by alternating between
1797 * reads and writes in an object. The makes_rpc checks here
1798 * would be redundant if we were getting read/write work items
1799 * instead of objects. we don't want send_oap_rpc to drain a
1800 * partial read pending queue when we're given this object to
1801 * do io on writes while there are cache waiters */
1802 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1803 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1804 &loi->loi_write_lop);
1812 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1813 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1814 &loi->loi_read_lop);
1823 /* attempt some inter-object balancing by issueing rpcs
1824 * for each object in turn */
1825 if (!list_empty(&loi->loi_cli_item))
1826 list_del_init(&loi->loi_cli_item);
1827 if (!list_empty(&loi->loi_write_item))
1828 list_del_init(&loi->loi_write_item);
1829 if (!list_empty(&loi->loi_read_item))
1830 list_del_init(&loi->loi_read_item);
1832 loi_list_maint(cli, loi);
1834 /* send_oap_rpc fails with 0 when make_ready tells it to
1835 * back off. llite's make_ready does this when it tries
1836 * to lock a page queued for write that is already locked.
1837 * we want to try sending rpcs from many objects, but we
1838 * don't want to spin failing with 0. */
1839 if (race_counter == 10)
1845 /* we're trying to queue a page in the osc so we're subject to the
1846 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1847 * If the osc's queued pages are already at that limit, then we want to sleep
1848 * until there is space in the osc's queue for us. We also may be waiting for
1849 * write credits from the OST if there are RPCs in flight that may return some
1850 * before we fall back to sync writes.
1852 * We need this know our allocation was granted in the presence of signals */
1853 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1857 spin_lock(&cli->cl_loi_list_lock);
1858 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
1859 spin_unlock(&cli->cl_loi_list_lock);
1863 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1864 * grant or cache space. */
1865 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1866 struct osc_async_page *oap)
1868 struct osc_cache_waiter ocw;
1869 struct l_wait_info lwi = { 0 };
1870 struct timeval start, stop;
1873 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1874 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1875 cli->cl_avail_grant);
1877 if (cli->cl_dirty_max < PAGE_SIZE)
1880 if (~0ul - cli->cl_dirty_sum <= cli->cl_dirty) {
1881 cli->cl_dirty_av = (cli->cl_dirty_av +
1882 (cli->cl_dirty_sum / cli->cl_dirty_num)) / 2;
1883 cli->cl_dirty_num = 0;
1884 cli->cl_dirty_sum = 0;
1886 if (cli->cl_dirty_num)
1887 cli->cl_dirty_av = (cli->cl_dirty_sum / cli->cl_dirty_num);
1890 cli->cl_dirty_num++;
1891 cli->cl_dirty_sum += cli->cl_dirty;
1893 if (cli->cl_dirty > cli->cl_dirty_dmax)
1894 cli->cl_dirty_dmax = cli->cl_dirty;
1895 if (cli->cl_dirty < cli->cl_dirty_dmin || !cli->cl_dirty_dmin)
1896 cli->cl_dirty_dmin = cli->cl_dirty;
1898 /* Hopefully normal case - cache space and write credits available */
1899 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1900 cli->cl_avail_grant >= PAGE_SIZE) {
1901 /* account for ourselves */
1902 osc_consume_write_grant(cli, oap);
1906 /* Make sure that there are write rpcs in flight to wait for. This
1907 * is a little silly as this object may not have any pending but
1908 * other objects sure might. */
1909 if (cli->cl_w_in_flight) {
1910 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1911 init_waitqueue_head(&ocw.ocw_waitq);
1915 loi_list_maint(cli, loi);
1916 osc_check_rpcs(cli);
1917 spin_unlock(&cli->cl_loi_list_lock);
1919 CDEBUG(0, "sleeping for cache space\n");
1920 do_gettimeofday(&start);
1921 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1922 do_gettimeofday(&stop);
1924 cli->cl_cache_wait_num++;
1925 cli->cl_cache_wait_sum += timeval_sub(&stop, &start);
1927 spin_lock(&cli->cl_loi_list_lock);
1928 lprocfs_stime_record(&cli->cl_enter_stime, &stop, &start);
1929 if (!list_empty(&ocw.ocw_entry)) {
1930 list_del(&ocw.ocw_entry);
1938 cli->cl_sync_rpcs++;
1942 /* the companion to enter_cache, called when an oap is no longer part of the
1943 * dirty accounting.. so writeback completes or truncate happens before writing
1944 * starts. must be called with the loi lock held. */
1945 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1950 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1955 if (~0ul - cli->cl_dirty_sum <= cli->cl_dirty) {
1956 cli->cl_dirty_av = (cli->cl_dirty_av +
1957 (cli->cl_dirty_sum / cli->cl_dirty_num)) / 2;
1958 cli->cl_dirty_num = 0;
1959 cli->cl_dirty_sum = 0;
1961 if (cli->cl_dirty_num)
1962 cli->cl_dirty_av = (cli->cl_dirty_sum / cli->cl_dirty_num);
1965 cli->cl_dirty_num++;
1966 cli->cl_dirty_sum += cli->cl_dirty;
1968 if (cli->cl_dirty > cli->cl_dirty_dmax)
1969 cli->cl_dirty_dmax = cli->cl_dirty;
1970 if (cli->cl_dirty < cli->cl_dirty_dmin || !cli->cl_dirty_dmin)
1971 cli->cl_dirty_dmin = cli->cl_dirty;
1973 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1974 cli->cl_dirty -= PAGE_SIZE;
1976 cli->cl_lost_grant += PAGE_SIZE;
1977 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1978 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1984 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1985 struct lov_oinfo *loi, struct page *page,
1986 obd_off offset, struct obd_async_page_ops *ops,
1987 void *data, void **res)
1989 struct osc_async_page *oap;
1992 OBD_ALLOC(oap, sizeof(*oap));
1996 oap->oap_magic = OAP_MAGIC;
1997 oap->oap_cli = &exp->exp_obd->u.cli;
2000 oap->oap_caller_ops = ops;
2001 oap->oap_caller_data = data;
2003 oap->oap_page = page;
2004 oap->oap_obj_off = offset;
2006 INIT_LIST_HEAD(&oap->oap_pending_item);
2007 INIT_LIST_HEAD(&oap->oap_urgent_item);
2008 INIT_LIST_HEAD(&oap->oap_rpc_item);
2010 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
2012 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2017 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
2018 struct lov_oinfo *loi, void *cookie,
2019 int cmd, obd_off off, int count,
2020 obd_flags brw_flags, enum async_flags async_flags)
2022 struct client_obd *cli = &exp->exp_obd->u.cli;
2023 struct osc_async_page *oap;
2024 struct loi_oap_pages *lop;
2028 oap = OAP_FROM_COOKIE(cookie);
2030 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2033 if (!list_empty(&oap->oap_pending_item) ||
2034 !list_empty(&oap->oap_urgent_item) ||
2035 !list_empty(&oap->oap_rpc_item))
2039 loi = &lsm->lsm_oinfo[0];
2041 spin_lock(&cli->cl_loi_list_lock);
2044 oap->oap_async_flags = async_flags;
2045 oap->oap_page_off = off;
2046 oap->oap_count = count;
2047 oap->oap_brw_flags = brw_flags;
2049 if (cmd == OBD_BRW_WRITE) {
2050 rc = osc_enter_cache(cli, loi, oap);
2052 spin_unlock(&cli->cl_loi_list_lock);
2055 lop = &loi->loi_write_lop;
2057 lop = &loi->loi_read_lop;
2060 if (oap->oap_async_flags & ASYNC_URGENT)
2061 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2062 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2063 lop_update_pending(cli, lop, cmd, 1);
2065 loi_list_maint(cli, loi);
2067 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
2070 osc_check_rpcs(cli);
2071 spin_unlock(&cli->cl_loi_list_lock);
2076 /* aka (~was & now & flag), but this is more clear :) */
2077 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
2079 static int osc_set_async_flags(struct obd_export *exp,
2080 struct lov_stripe_md *lsm,
2081 struct lov_oinfo *loi, void *cookie,
2082 obd_flags async_flags)
2084 struct client_obd *cli = &exp->exp_obd->u.cli;
2085 struct loi_oap_pages *lop;
2086 struct osc_async_page *oap;
2090 oap = OAP_FROM_COOKIE(cookie);
2092 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2096 loi = &lsm->lsm_oinfo[0];
2098 if (oap->oap_cmd == OBD_BRW_WRITE) {
2099 lop = &loi->loi_write_lop;
2101 lop = &loi->loi_read_lop;
2104 spin_lock(&cli->cl_loi_list_lock);
2106 if (list_empty(&oap->oap_pending_item))
2107 GOTO(out, rc = -EINVAL);
2109 if ((oap->oap_async_flags & async_flags) == async_flags)
2112 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
2113 oap->oap_async_flags |= ASYNC_READY;
2115 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
2116 if (list_empty(&oap->oap_rpc_item)) {
2117 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2118 loi_list_maint(cli, loi);
2122 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
2123 oap->oap_async_flags);
2125 osc_check_rpcs(cli);
2126 spin_unlock(&cli->cl_loi_list_lock);
2130 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
2131 struct lov_oinfo *loi,
2132 struct obd_io_group *oig, void *cookie,
2133 int cmd, obd_off off, int count,
2134 obd_flags brw_flags,
2135 obd_flags async_flags)
2137 struct client_obd *cli = &exp->exp_obd->u.cli;
2138 struct osc_async_page *oap;
2139 struct loi_oap_pages *lop;
2142 oap = OAP_FROM_COOKIE(cookie);
2144 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2147 if (!list_empty(&oap->oap_pending_item) ||
2148 !list_empty(&oap->oap_urgent_item) ||
2149 !list_empty(&oap->oap_rpc_item))
2153 loi = &lsm->lsm_oinfo[0];
2155 spin_lock(&cli->cl_loi_list_lock);
2158 oap->oap_page_off = off;
2159 oap->oap_count = count;
2160 oap->oap_brw_flags = brw_flags;
2161 oap->oap_async_flags = async_flags;
2163 if (cmd == OBD_BRW_WRITE)
2164 lop = &loi->loi_write_lop;
2166 lop = &loi->loi_read_lop;
2168 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
2169 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
2171 oig_add_one(oig, &oap->oap_occ);
2174 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
2176 spin_unlock(&cli->cl_loi_list_lock);
2181 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
2182 struct loi_oap_pages *lop, int cmd)
2184 struct list_head *pos, *tmp;
2185 struct osc_async_page *oap;
2187 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2188 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2189 list_del(&oap->oap_pending_item);
2190 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2191 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2192 lop_update_pending(cli, lop, cmd, 1);
2194 loi_list_maint(cli, loi);
2197 static int osc_trigger_group_io(struct obd_export *exp,
2198 struct lov_stripe_md *lsm,
2199 struct lov_oinfo *loi,
2200 struct obd_io_group *oig)
2202 struct client_obd *cli = &exp->exp_obd->u.cli;
2206 loi = &lsm->lsm_oinfo[0];
2208 spin_lock(&cli->cl_loi_list_lock);
2210 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2211 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2213 osc_check_rpcs(cli);
2214 spin_unlock(&cli->cl_loi_list_lock);
2219 static int osc_teardown_async_page(struct obd_export *exp,
2220 struct lov_stripe_md *lsm,
2221 struct lov_oinfo *loi, void *cookie)
2223 struct client_obd *cli = &exp->exp_obd->u.cli;
2224 struct loi_oap_pages *lop;
2225 struct osc_async_page *oap;
2229 oap = OAP_FROM_COOKIE(cookie);
2232 loi = &lsm->lsm_oinfo[0];
2234 if (oap->oap_cmd == OBD_BRW_WRITE) {
2235 lop = &loi->loi_write_lop;
2237 lop = &loi->loi_read_lop;
2240 spin_lock(&cli->cl_loi_list_lock);
2242 if (!list_empty(&oap->oap_rpc_item))
2243 GOTO(out, rc = -EBUSY);
2245 osc_exit_cache(cli, oap, 0);
2246 osc_wake_cache_waiters(cli);
2248 if (!list_empty(&oap->oap_urgent_item)) {
2249 list_del_init(&oap->oap_urgent_item);
2250 oap->oap_async_flags &= ~ASYNC_URGENT;
2252 if (!list_empty(&oap->oap_pending_item)) {
2253 list_del_init(&oap->oap_pending_item);
2254 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2256 loi_list_maint(cli, loi);
2258 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2260 spin_unlock(&cli->cl_loi_list_lock);
2262 OBD_FREE(oap, sizeof(*oap));
2267 /* Note: caller will lock/unlock, and set uptodate on the pages */
2268 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2269 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2270 struct lov_stripe_md *lsm, obd_count page_count,
2271 struct brw_page *pga)
2273 struct ptlrpc_request *request = NULL;
2274 struct ost_body *body;
2275 struct niobuf_remote *nioptr;
2276 struct obd_ioobj *iooptr;
2277 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2281 /* XXX does not handle 'new' brw protocol */
2283 size[1] = sizeof(struct obd_ioobj);
2284 size[2] = page_count * sizeof(*nioptr);
2286 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2287 OST_SAN_READ, 3, size, NULL);
2291 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2292 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2293 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2294 sizeof(*nioptr) * page_count);
2296 memcpy(&body->oa, oa, sizeof(body->oa));
2298 obdo_to_ioobj(oa, iooptr);
2299 iooptr->ioo_bufcnt = page_count;
2301 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2302 LASSERT(PageLocked(pga[mapped].pg));
2303 LASSERT(mapped == 0 ||
2304 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2306 nioptr->offset = pga[mapped].disk_offset;
2307 nioptr->len = pga[mapped].count;
2308 nioptr->flags = pga[mapped].flag;
2311 size[1] = page_count * sizeof(*nioptr);
2312 request->rq_replen = lustre_msg_size(2, size);
2314 rc = ptlrpc_queue_wait(request);
2318 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2319 lustre_swab_ost_body);
2321 CERROR("Can't unpack body\n");
2322 GOTO(out_req, rc = -EPROTO);
2325 memcpy(oa, &body->oa, sizeof(*oa));
2327 swab = lustre_msg_swabbed(request->rq_repmsg);
2328 LASSERT_REPSWAB(request, 1);
2329 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2331 /* nioptr missing or short */
2332 GOTO(out_req, rc = -EPROTO);
2336 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2337 struct page *page = pga[mapped].pg;
2338 struct buffer_head *bh;
2342 lustre_swab_niobuf_remote (nioptr);
2344 /* got san device associated */
2345 LASSERT(exp->exp_obd != NULL);
2346 dev = exp->exp_obd->u.cli.cl_sandev;
2349 if (!nioptr->offset) {
2350 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2351 page->mapping->host->i_ino,
2353 memset(page_address(page), 0, PAGE_SIZE);
2357 if (!page->buffers) {
2358 create_empty_buffers(page, dev, PAGE_SIZE);
2361 clear_bit(BH_New, &bh->b_state);
2362 set_bit(BH_Mapped, &bh->b_state);
2363 bh->b_blocknr = (unsigned long)nioptr->offset;
2365 clear_bit(BH_Uptodate, &bh->b_state);
2367 ll_rw_block(READ, 1, &bh);
2371 /* if buffer already existed, it must be the
2372 * one we mapped before, check it */
2373 LASSERT(!test_bit(BH_New, &bh->b_state));
2374 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2375 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2377 /* wait it's io completion */
2378 if (test_bit(BH_Lock, &bh->b_state))
2381 if (!test_bit(BH_Uptodate, &bh->b_state))
2382 ll_rw_block(READ, 1, &bh);
2386 /* must do syncronous write here */
2388 if (!buffer_uptodate(bh)) {
2396 ptlrpc_req_finished(request);
2400 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2401 struct lov_stripe_md *lsm, obd_count page_count,
2402 struct brw_page *pga)
2404 struct ptlrpc_request *request = NULL;
2405 struct ost_body *body;
2406 struct niobuf_remote *nioptr;
2407 struct obd_ioobj *iooptr;
2408 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2412 size[1] = sizeof(struct obd_ioobj);
2413 size[2] = page_count * sizeof(*nioptr);
2415 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2416 OST_SAN_WRITE, 3, size, NULL);
2420 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2421 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2422 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2423 sizeof (*nioptr) * page_count);
2425 memcpy(&body->oa, oa, sizeof(body->oa));
2427 obdo_to_ioobj(oa, iooptr);
2428 iooptr->ioo_bufcnt = page_count;
2431 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2432 LASSERT(PageLocked(pga[mapped].pg));
2433 LASSERT(mapped == 0 ||
2434 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2436 nioptr->offset = pga[mapped].disk_offset;
2437 nioptr->len = pga[mapped].count;
2438 nioptr->flags = pga[mapped].flag;
2441 size[1] = page_count * sizeof(*nioptr);
2442 request->rq_replen = lustre_msg_size(2, size);
2444 rc = ptlrpc_queue_wait(request);
2448 swab = lustre_msg_swabbed (request->rq_repmsg);
2449 LASSERT_REPSWAB (request, 1);
2450 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2452 CERROR("absent/short niobuf array\n");
2453 GOTO(out_req, rc = -EPROTO);
2457 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2458 struct page *page = pga[mapped].pg;
2459 struct buffer_head *bh;
2463 lustre_swab_niobuf_remote (nioptr);
2465 /* got san device associated */
2466 LASSERT(exp->exp_obd != NULL);
2467 dev = exp->exp_obd->u.cli.cl_sandev;
2469 if (!page->buffers) {
2470 create_empty_buffers(page, dev, PAGE_SIZE);
2473 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2474 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2475 LASSERT(page->buffers->b_blocknr ==
2476 (unsigned long)nioptr->offset);
2482 /* if buffer locked, wait it's io completion */
2483 if (test_bit(BH_Lock, &bh->b_state))
2486 clear_bit(BH_New, &bh->b_state);
2487 set_bit(BH_Mapped, &bh->b_state);
2489 /* override the block nr */
2490 bh->b_blocknr = (unsigned long)nioptr->offset;
2492 /* we are about to write it, so set it
2494 * page lock should garentee no race condition here */
2495 set_bit(BH_Uptodate, &bh->b_state);
2496 set_bit(BH_Dirty, &bh->b_state);
2498 ll_rw_block(WRITE, 1, &bh);
2500 /* must do syncronous write here */
2502 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2510 ptlrpc_req_finished(request);
2514 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2515 struct lov_stripe_md *lsm, obd_count page_count,
2516 struct brw_page *pga, struct obd_trans_info *oti)
2520 while (page_count) {
2521 obd_count pages_per_brw;
2524 if (page_count > PTLRPC_MAX_BRW_PAGES)
2525 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
2527 pages_per_brw = page_count;
2529 if (cmd & OBD_BRW_WRITE)
2530 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2532 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2537 page_count -= pages_per_brw;
2538 pga += pages_per_brw;
2545 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2547 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2550 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
2554 lock_res_and_lock(lock);
2556 if (lock->l_ast_data && lock->l_ast_data != data) {
2557 struct inode *new_inode = data;
2558 struct inode *old_inode = lock->l_ast_data;
2559 if (!(old_inode->i_state & I_FREEING))
2560 LDLM_ERROR(lock, "inconsistent l_ast_data found");
2561 LASSERTF(old_inode->i_state & I_FREEING,
2562 "Found existing inode %p/%lu/%u state %lu in lock: "
2563 "setting data to %p/%lu/%u\n", old_inode,
2564 old_inode->i_ino, old_inode->i_generation,
2566 new_inode, new_inode->i_ino, new_inode->i_generation);
2569 lock->l_ast_data = data;
2570 unlock_res_and_lock(lock);
2571 LDLM_LOCK_PUT(lock);
2574 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2575 ldlm_iterator_t replace, void *data)
2577 struct ldlm_res_id res_id = { .name = {0} };
2578 struct obd_device *obd = class_exp2obd(exp);
2580 res_id.name[0] = lsm->lsm_object_id;
2581 res_id.name[2] = lsm->lsm_object_gr;
2582 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2586 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2587 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2588 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2589 void *data, __u32 lvb_len, void *lvb_swabber,
2590 struct lustre_handle *lockh)
2592 struct obd_device *obd = exp->exp_obd;
2593 struct ldlm_res_id res_id = { .name = {0} };
2595 struct ldlm_reply *rep;
2596 struct ptlrpc_request *req = NULL;
2600 res_id.name[0] = lsm->lsm_object_id;
2601 res_id.name[2] = lsm->lsm_object_gr;
2603 /* Filesystem lock extents are extended to page boundaries so that
2604 * dealing with the page cache is a little smoother. */
2605 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2606 policy->l_extent.end |= ~PAGE_MASK;
2608 if (lsm->lsm_oinfo->loi_kms_valid == 0)
2611 /* Next, search for already existing extent locks that will cover us */
2612 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2615 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2616 /* return immediately if no credential held */
2617 ldlm_lock_decref(lockh, mode);
2621 osc_set_data_with_check(lockh, data);
2622 if (*flags & LDLM_FL_HAS_INTENT) {
2623 /* I would like to be able to ASSERT here that rss <=
2624 * kms, but I can't, for reasons which are explained in
2627 /* We already have a lock, and it's referenced */
2631 /* If we're trying to read, we also search for an existing PW lock. The
2632 * VFS and page cache already protect us locally, so lots of readers/
2633 * writers can share a single PW lock.
2635 * There are problems with conversion deadlocks, so instead of
2636 * converting a read lock to a write lock, we'll just enqueue a new
2639 * At some point we should cancel the read lock instead of making them
2640 * send us a blocking callback, but there are problems with canceling
2641 * locks out from other users right now, too. */
2643 if (mode == LCK_PR) {
2644 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2645 policy, LCK_PW, lockh);
2647 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2648 /* return immediately if no credential held */
2649 ldlm_lock_decref(lockh, LCK_PW);
2653 /* FIXME: This is not incredibly elegant, but it might
2654 * be more elegant than adding another parameter to
2655 * lock_match. I want a second opinion. */
2656 ldlm_lock_addref(lockh, LCK_PR);
2657 ldlm_lock_decref(lockh, LCK_PW);
2658 osc_set_data_with_check(lockh, data);
2662 if (mode == LCK_PW) {
2663 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2664 policy, LCK_PR, lockh);
2666 rc = ldlm_cli_convert(lockh, mode, flags);
2668 /* Update readers/writers accounting */
2669 ldlm_lock_addref(lockh, LCK_PW);
2670 ldlm_lock_decref(lockh, LCK_PR);
2671 osc_set_data_with_check(lockh, data);
2674 /* If the conversion failed, we need to drop refcount
2675 on matched lock before we get new one */
2676 /* XXX Won't it save us some efforts if we cancel PR
2677 lock here? We are going to take PW lock anyway and it
2678 will invalidate PR lock */
2679 ldlm_lock_decref(lockh, LCK_PR);
2680 if (rc != EDEADLOCK) {
2687 if (*flags & LDLM_FL_HAS_INTENT) {
2688 int size[2] = {0, sizeof(struct ldlm_request)};
2690 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
2691 LDLM_ENQUEUE, 2, size, NULL);
2695 size[0] = sizeof(*rep);
2696 size[1] = sizeof(lvb);
2697 req->rq_replen = lustre_msg_size(2, size);
2699 rc = ldlm_cli_enqueue(exp, req, obd->obd_namespace, res_id, type,
2700 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2701 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2703 if (rc == ELDLM_LOCK_ABORTED) {
2704 /* swabbed by ldlm_cli_enqueue() */
2705 LASSERT_REPSWABBED(req, 0);
2706 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
2707 LASSERT(rep != NULL);
2708 if (rep->lock_policy_res1)
2709 rc = rep->lock_policy_res1;
2711 ptlrpc_req_finished(req);
2714 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
2715 CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
2716 lvb.lvb_size, lvb.lvb_blocks);
2717 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2718 lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
2724 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2725 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2726 int *flags, void *data, struct lustre_handle *lockh)
2728 struct ldlm_res_id res_id = { .name = {0} };
2729 struct obd_device *obd = exp->exp_obd;
2733 res_id.name[0] = lsm->lsm_object_id;
2734 res_id.name[2] = lsm->lsm_object_gr;
2736 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2738 /* Filesystem lock extents are extended to page boundaries so that
2739 * dealing with the page cache is a little smoother */
2740 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2741 policy->l_extent.end |= ~PAGE_MASK;
2743 /* Next, search for already existing extent locks that will cover us */
2744 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2745 policy, mode, lockh);
2747 // if (!(*flags & LDLM_FL_TEST_LOCK))
2748 osc_set_data_with_check(lockh, data);
2751 /* If we're trying to read, we also search for an existing PW lock. The
2752 * VFS and page cache already protect us locally, so lots of readers/
2753 * writers can share a single PW lock. */
2754 if (mode == LCK_PR) {
2755 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2756 policy, LCK_PW, lockh);
2757 if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
2758 /* FIXME: This is not incredibly elegant, but it might
2759 * be more elegant than adding another parameter to
2760 * lock_match. I want a second opinion. */
2761 osc_set_data_with_check(lockh, data);
2762 ldlm_lock_addref(lockh, LCK_PR);
2763 ldlm_lock_decref(lockh, LCK_PW);
2769 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2770 __u32 mode, struct lustre_handle *lockh)
2774 if (mode == LCK_GROUP)
2775 ldlm_lock_decref_and_cancel(lockh, mode);
2777 ldlm_lock_decref(lockh, mode);
2782 static int osc_cancel_unused(struct obd_export *exp,
2783 struct lov_stripe_md *lsm,
2784 int flags, void *opaque)
2786 struct obd_device *obd = class_exp2obd(exp);
2787 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
2790 res_id.name[0] = lsm->lsm_object_id;
2791 res_id.name[2] = lsm->lsm_object_gr;
2795 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
2798 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2799 unsigned long max_age)
2801 struct obd_statfs *msfs;
2802 struct ptlrpc_request *request;
2803 int rc, size = sizeof(*osfs);
2806 /* We could possibly pass max_age in the request (as an absolute
2807 * timestamp or a "seconds.usec ago") so the target can avoid doing
2808 * extra calls into the filesystem if that isn't necessary (e.g.
2809 * during mount that would help a bit). Having relative timestamps
2810 * is not so great if request processing is slow, while absolute
2811 * timestamps are not ideal because they need time synchronization. */
2812 request = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
2813 OST_STATFS, 0, NULL, NULL);
2817 request->rq_replen = lustre_msg_size(1, &size);
2818 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2820 rc = ptlrpc_queue_wait(request);
2824 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2825 lustre_swab_obd_statfs);
2827 CERROR("Can't unpack obd_statfs\n");
2828 GOTO(out, rc = -EPROTO);
2831 memcpy(osfs, msfs, sizeof(*osfs));
2835 ptlrpc_req_finished(request);
2839 /* Retrieve object striping information.
2841 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2842 * the maximum number of OST indices which will fit in the user buffer.
2843 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2845 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2847 struct lov_user_md lum, *lumk;
2854 rc = copy_from_user(&lum, lump, sizeof(lum));
2858 if (lum.lmm_magic != LOV_USER_MAGIC)
2861 if (lum.lmm_stripe_count > 0) {
2862 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2863 OBD_ALLOC(lumk, lum_size);
2867 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2868 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
2870 lum_size = sizeof(lum);
2874 lumk->lmm_object_id = lsm->lsm_object_id;
2875 lumk->lmm_object_gr = lsm->lsm_object_gr;
2876 lumk->lmm_stripe_count = 1;
2878 if (copy_to_user(lump, lumk, lum_size))
2882 OBD_FREE(lumk, lum_size);
2887 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2888 void *karg, void *uarg)
2890 struct obd_device *obd = exp->exp_obd;
2891 struct obd_ioctl_data *data = karg;
2895 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2898 if (!try_module_get(THIS_MODULE)) {
2899 CERROR("Can't get module. Is it alive?");
2904 case OBD_IOC_LOV_GET_CONFIG: {
2906 struct lov_desc *desc;
2907 struct obd_uuid uuid;
2911 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2912 GOTO(out, err = -EINVAL);
2914 data = (struct obd_ioctl_data *)buf;
2916 if (sizeof(*desc) > data->ioc_inllen1) {
2918 GOTO(out, err = -EINVAL);
2921 if (data->ioc_inllen2 < sizeof(uuid)) {
2923 GOTO(out, err = -EINVAL);
2926 if (data->ioc_inllen3 < sizeof(__u32)) {
2928 GOTO(out, err = -EINVAL);
2931 desc = (struct lov_desc *)data->ioc_inlbuf1;
2932 desc->ld_tgt_count = 1;
2933 desc->ld_active_tgt_count = 1;
2934 desc->ld_default_stripe_count = 1;
2935 desc->ld_default_stripe_size = 0;
2936 desc->ld_default_stripe_offset = 0;
2937 desc->ld_pattern = 0;
2938 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2939 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2940 *((__u32 *)data->ioc_inlbuf3) = 1;
2942 err = copy_to_user((void *)uarg, buf, len);
2945 obd_ioctl_freedata(buf, len);
2948 case LL_IOC_LOV_SETSTRIPE:
2949 err = obd_alloc_memmd(exp, karg);
2953 case LL_IOC_LOV_GETSTRIPE:
2954 err = osc_getstripe(karg, uarg);
2956 case OBD_IOC_CLIENT_RECOVER:
2957 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2962 case IOC_OSC_SET_ACTIVE:
2963 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2966 case IOC_OSC_CTL_RECOVERY:
2967 err = ptlrpc_import_control_recovery(obd->u.cli.cl_import,
2971 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2972 GOTO(out, err = -ENOTTY);
2975 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2978 module_put(THIS_MODULE);
2983 static int osc_get_info(struct obd_export *exp, __u32 keylen,
2984 void *key, __u32 *vallen, void *val)
2987 if (!vallen || !val)
2990 if (keylen > strlen("lock_to_stripe") &&
2991 strcmp(key, "lock_to_stripe") == 0) {
2992 __u32 *stripe = val;
2993 *vallen = sizeof(*stripe);
2996 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2997 struct ptlrpc_request *req;
2999 char *bufs[1] = {key};
3001 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
3002 OST_GET_INFO, 1, (int *)&keylen, bufs);
3006 req->rq_replen = lustre_msg_size(1, (int *)vallen);
3007 rc = ptlrpc_queue_wait(req);
3011 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
3012 lustre_swab_ost_last_id);
3013 if (reply == NULL) {
3014 CERROR("Can't unpack OST last ID\n");
3015 GOTO(out, rc = -EPROTO);
3017 *((obd_id *)val) = *reply;
3019 ptlrpc_req_finished(req);
3021 } else if (keylen == 10 && strcmp(key, "client_nid") == 0) {
3022 struct ptlrpc_connection * conn;
3023 ptl_nid_t * nid = val;
3024 ptl_process_id_t id;
3027 *vallen = sizeof(*nid);
3028 conn = class_exp2cliimp(exp)->imp_connection;
3030 if (!conn || !conn->c_peer.peer_ni)
3033 rc = PtlGetId(conn->c_peer.peer_ni->pni_ni_h, &id);
3042 static int osc_set_info(struct obd_export *exp, obd_count keylen,
3043 void *key, obd_count vallen, void *val)
3045 struct obd_device *obd = exp->exp_obd;
3046 struct obd_import *imp = class_exp2cliimp(exp);
3047 struct llog_ctxt *ctxt;
3051 if (keylen == strlen("unlinked") &&
3052 memcmp(key, "unlinked", keylen) == 0) {
3053 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3054 spin_lock(&oscc->oscc_lock);
3055 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3056 spin_unlock(&oscc->oscc_lock);
3060 if (keylen == strlen("unrecovery") &&
3061 memcmp(key, "unrecovery", keylen) == 0) {
3062 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3063 spin_lock(&oscc->oscc_lock);
3064 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
3065 spin_unlock(&oscc->oscc_lock);
3069 if (keylen == strlen("initial_recov") &&
3070 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
3071 struct obd_import *imp = class_exp2cliimp(exp);
3072 if (vallen != sizeof(int))
3074 imp->imp_initial_recov = *(int *)val;
3075 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
3076 exp->exp_obd->obd_name,
3077 imp->imp_initial_recov);
3081 if (keylen == strlen("async") &&
3082 memcmp(key, "async", keylen) == 0) {
3083 struct client_obd *cl = &obd->u.cli;
3084 if (vallen != sizeof(int))
3086 cl->cl_async = *(int *)val;
3087 CDEBUG(D_HA, "%s: set async = %d\n",
3088 obd->obd_name, cl->cl_async);
3092 if (keylen == 5 && strcmp(key, "audit") == 0) {
3093 struct ptlrpc_request *req;
3094 char *bufs[2] = {key, val};
3095 int size[2] = {keylen, vallen};
3097 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
3098 OST_SET_INFO, 2, size, bufs);
3102 req->rq_replen = lustre_msg_size(0, size);
3103 lustre_swab_reqbuf(req, 1, sizeof(struct audit_attr_msg),
3104 lustre_swab_audit_attr);
3105 rc = ptlrpc_queue_wait(req);
3107 ptlrpc_req_finished(req);
3111 if (keylen == 9 && strcmp(key, "audit_obj") == 0) {
3112 struct ptlrpc_request *req;
3113 char *bufs[2] = {key, val};
3114 int size[2] = {keylen, vallen};
3116 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
3117 OST_SET_INFO, 2, size, bufs);
3121 req->rq_replen = lustre_msg_size(0, size);
3122 lustre_swab_reqbuf(req, 1, sizeof(struct obdo),
3124 rc = ptlrpc_queue_wait(req);
3126 ptlrpc_req_finished(req);
3130 if (keylen == 8 && memcmp(key, "auditlog", 8) == 0) {
3131 struct ptlrpc_request *req;
3132 char *bufs[2] = {key, val};
3133 int size[2] = {keylen, vallen};
3135 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
3136 OST_SET_INFO, 2, size, bufs);
3140 req->rq_replen = lustre_msg_size(0, size);
3141 lustre_swab_reqbuf(req, 1, sizeof(struct audit_msg),
3142 lustre_swab_audit_msg);
3143 rc = ptlrpc_queue_wait(req);
3145 ptlrpc_req_finished(req);
3149 if (keylen == strlen("sec") && memcmp(key, "sec", keylen) == 0) {
3150 struct client_obd *cli = &exp->exp_obd->u.cli;
3152 cli->cl_sec_flavor = ptlrpcs_name2flavor(val);
3153 if (cli->cl_sec_flavor == PTLRPCS_FLVR_INVALID) {
3154 CERROR("unrecognized security flavor %s\n", (char*) val);
3161 if (keylen == strlen("sec_flags") &&
3162 memcmp(key, "sec_flags", keylen) == 0) {
3163 struct client_obd *cli = &exp->exp_obd->u.cli;
3165 cli->cl_sec_flags = *((unsigned long *) val);
3169 if (keylen == strlen("flush_cred") &&
3170 memcmp(key, "flush_cred", keylen) == 0) {
3171 struct client_obd *cli = &exp->exp_obd->u.cli;
3174 ptlrpcs_import_flush_current_creds(cli->cl_import);
3177 if (keylen == strlen("crypto_cb") &&
3178 memcmp(key, "crypto_cb", keylen) == 0) {
3179 LASSERT(vallen == sizeof(crypt_cb_t));
3180 osc_crypt_cb = (crypt_cb_t)val;
3184 if (keylen == 8 && memcmp(key, "capa_key", 8) == 0) {
3185 struct ptlrpc_request *req;
3186 char *bufs[2] = {key, val};
3187 unsigned long irqflags;
3188 int rc, size[2] = {keylen, vallen};
3190 LASSERT(vallen == sizeof(struct lustre_capa_key));
3192 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
3193 OST_SET_INFO, 2, size, bufs);
3197 spin_lock_irqsave (&req->rq_lock, irqflags);
3199 spin_unlock_irqrestore (&req->rq_lock, irqflags);
3201 req->rq_replen = lustre_msg_size(0, NULL);
3202 rc = ptlrpc_queue_wait(req);
3203 ptlrpc_req_finished(req);
3207 if (keylen == strlen("setext") &&
3208 memcmp(key, "setext", keylen) == 0) {
3209 struct client_obd *cli = &exp->exp_obd->u.cli;
3210 struct osc_creator *oscc = &cli->cl_oscc;
3211 struct fid_extent *ext = val;
3213 oscc->oscc_next_id = (obd_id)ext->fe_start;
3217 if (keylen < strlen("mds_conn") ||
3218 memcmp(key, "mds_conn", keylen) != 0)
3221 ctxt = llog_get_context(&exp->exp_obd->obd_llogs,
3222 LLOG_UNLINK_ORIG_CTXT);
3225 rc = llog_initiator_connect(ctxt);
3227 CERROR("cannot establish the connect for "
3228 "ctxt %p: %d\n", ctxt, rc);
3231 imp->imp_server_timeout = 1;
3232 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
3233 imp->imp_pingable = 1;
3239 static struct llog_operations osc_size_repl_logops = {
3240 lop_cancel: llog_obd_repl_cancel
3243 static struct llog_operations osc_unlink_orig_logops;
3245 static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
3246 struct obd_device *tgt, int count,
3247 struct llog_catid *catid)
3252 osc_unlink_orig_logops = llog_lvfs_ops;
3253 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
3254 osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
3255 osc_unlink_orig_logops.lop_add = llog_catalog_add;
3256 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
3258 rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
3259 &catid->lci_logid, &osc_unlink_orig_logops);
3263 rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
3264 &osc_size_repl_logops);
3268 static int osc_llog_finish(struct obd_device *obd,
3269 struct obd_llogs *llogs, int count)
3274 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
3278 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
3282 static int osc_connect(struct lustre_handle *exph,
3283 struct obd_device *obd, struct obd_uuid *cluuid,
3284 struct obd_connect_data *data,
3285 unsigned long connect_flags)
3289 rc = client_connect_import(exph, obd, cluuid, data, connect_flags);
3293 static int osc_disconnect(struct obd_export *exp, unsigned long flags)
3295 struct obd_device *obd = class_exp2obd(exp);
3296 struct llog_ctxt *ctxt;
3300 ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
3301 if (obd->u.cli.cl_conn_count == 1)
3302 /* flush any remaining cancel messages out to the target */
3303 llog_sync(ctxt, exp);
3305 rc = client_disconnect_export(exp, flags);
3309 static int osc_import_event(struct obd_device *obd,
3310 struct obd_import *imp,
3311 enum obd_import_event event)
3313 struct client_obd *cli;
3317 LASSERT(imp->imp_obd == obd);
3320 case IMP_EVENT_DISCON: {
3321 /* Only do this on the MDS OSC's */
3322 if (imp->imp_server_timeout) {
3323 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3325 spin_lock(&oscc->oscc_lock);
3326 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3327 spin_unlock(&oscc->oscc_lock);
3331 case IMP_EVENT_INACTIVE: {
3332 if (obd->obd_observer)
3333 rc = obd_notify(obd->obd_observer, obd, 0, 0);
3336 case IMP_EVENT_INVALIDATE: {
3337 struct ldlm_namespace *ns = obd->obd_namespace;
3341 spin_lock(&cli->cl_loi_list_lock);
3342 cli->cl_avail_grant = 0;
3343 cli->cl_lost_grant = 0;
3344 /* all pages go to failing rpcs due to the invalid import */
3345 osc_check_rpcs(cli);
3346 spin_unlock(&cli->cl_loi_list_lock);
3348 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3352 case IMP_EVENT_ACTIVE: {
3353 /* Only do this on the MDS OSC's */
3354 if (imp->imp_server_timeout) {
3355 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3357 spin_lock(&oscc->oscc_lock);
3358 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3359 spin_unlock(&oscc->oscc_lock);
3362 if (obd->obd_observer)
3363 rc = obd_notify(obd->obd_observer, obd, 1, 0);
3367 CERROR("Unknown import event %d\n", event);
3373 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
3375 struct lprocfs_static_vars lvars;
3379 lprocfs_init_vars(osc,&lvars);
3380 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3384 rc = lproc_osc_attach_seqstat(dev);
3386 lprocfs_obd_detach(dev);
3390 ptlrpc_lprocfs_register_obd(dev);
3394 static int osc_detach(struct obd_device *dev)
3396 ptlrpc_lprocfs_unregister_obd(dev);
3397 return lprocfs_obd_detach(dev);
3400 static int osc_setup(struct obd_device *obd, obd_count len, void *buf)
3404 rc = ptlrpcd_addref();
3408 rc = client_obd_setup(obd, len, buf);
3417 static int osc_cleanup(struct obd_device *obd, int flags)
3419 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3422 rc = ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
3423 LDLM_FL_CONFIG_CHANGE, NULL);
3427 spin_lock(&oscc->oscc_lock);
3428 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
3429 oscc->oscc_flags |= OSCC_FLAG_EXITING;
3430 spin_unlock(&oscc->oscc_lock);
3432 rc = client_obd_cleanup(obd, flags);
3438 struct obd_ops osc_obd_ops = {
3439 .o_owner = THIS_MODULE,
3440 .o_attach = osc_attach,
3441 .o_detach = osc_detach,
3442 .o_setup = osc_setup,
3443 .o_cleanup = osc_cleanup,
3444 .o_add_conn = client_import_add_conn,
3445 .o_del_conn = client_import_del_conn,
3446 .o_connect = osc_connect,
3447 .o_disconnect = osc_disconnect,
3448 .o_statfs = osc_statfs,
3449 .o_packmd = osc_packmd,
3450 .o_unpackmd = osc_unpackmd,
3451 .o_create = osc_create,
3452 .o_destroy = osc_destroy,
3453 .o_getattr = osc_getattr,
3454 .o_getattr_async = osc_getattr_async,
3455 .o_setattr = osc_setattr,
3457 .o_brw_async = osc_brw_async,
3458 .o_prep_async_page = osc_prep_async_page,
3459 .o_queue_async_io = osc_queue_async_io,
3460 .o_set_async_flags = osc_set_async_flags,
3461 .o_queue_group_io = osc_queue_group_io,
3462 .o_trigger_group_io = osc_trigger_group_io,
3463 .o_teardown_async_page = osc_teardown_async_page,
3464 .o_punch = osc_punch,
3466 .o_enqueue = osc_enqueue,
3467 .o_match = osc_match,
3468 .o_change_cbdata = osc_change_cbdata,
3469 .o_cancel = osc_cancel,
3470 .o_cancel_unused = osc_cancel_unused,
3471 .o_iocontrol = osc_iocontrol,
3472 .o_get_info = osc_get_info,
3473 .o_set_info = osc_set_info,
3474 .o_import_event = osc_import_event,
3475 .o_llog_init = osc_llog_init,
3476 .o_llog_finish = osc_llog_finish,
3479 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3480 struct obd_ops sanosc_obd_ops = {
3481 .o_owner = THIS_MODULE,
3482 .o_attach = osc_attach,
3483 .o_detach = osc_detach,
3484 .o_cleanup = client_obd_cleanup,
3485 .o_add_conn = client_import_add_conn,
3486 .o_del_conn = client_import_del_conn,
3487 .o_connect = osc_connect,
3488 .o_disconnect = client_disconnect_export,
3489 .o_statfs = osc_statfs,
3490 .o_packmd = osc_packmd,
3491 .o_unpackmd = osc_unpackmd,
3492 .o_create = osc_real_create,
3493 .o_destroy = osc_destroy,
3494 .o_getattr = osc_getattr,
3495 .o_getattr_async = osc_getattr_async,
3496 .o_setattr = osc_setattr,
3497 .o_setup = client_sanobd_setup,
3498 .o_brw = sanosc_brw,
3499 .o_punch = osc_punch,
3501 .o_enqueue = osc_enqueue,
3502 .o_match = osc_match,
3503 .o_change_cbdata = osc_change_cbdata,
3504 .o_cancel = osc_cancel,
3505 .o_cancel_unused = osc_cancel_unused,
3506 .o_iocontrol = osc_iocontrol,
3507 .o_import_event = osc_import_event,
3508 .o_llog_init = osc_llog_init,
3509 .o_llog_finish = osc_llog_finish,
3513 int __init osc_init(void)
3515 struct lprocfs_static_vars lvars;
3516 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3517 struct lprocfs_static_vars sanlvars;
3522 lprocfs_init_vars(osc, &lvars);
3523 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3524 lprocfs_init_vars(osc, &sanlvars);
3527 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
3528 OBD_OSC_DEVICENAME);
3532 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3533 rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
3534 OBD_SANOSC_DEVICENAME);
3536 class_unregister_type(OBD_OSC_DEVICENAME);
3543 static void /*__exit*/ osc_exit(void)
3545 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3546 class_unregister_type(OBD_SANOSC_DEVICENAME);
3548 class_unregister_type(OBD_OSC_DEVICENAME);
3551 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3552 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3553 MODULE_LICENSE("GPL");
3555 module_init(osc_init);
3556 module_exit(osc_exit);