1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/ctype.h>
40 # include <linux/init.h>
41 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/workqueue.h>
43 # include <linux/smp_lock.h>
45 # include <linux/locks.h>
47 #else /* __KERNEL__ */
48 # include <liblustre.h>
51 #include <linux/lustre_dlm.h>
52 #include <libcfs/kp30.h>
53 #include <linux/lustre_net.h>
54 #include <linux/lustre_sec.h>
55 #include <lustre/lustre_user.h>
56 #include <linux/obd_ost.h>
57 #include <linux/obd_lov.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include "osc_internal.h"
68 /* Pack OSC object metadata for disk storage (LE byte order). */
69 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
70 struct lov_stripe_md *lsm)
75 lmm_size = sizeof(**lmmp);
80 OBD_FREE(*lmmp, lmm_size);
86 OBD_ALLOC(*lmmp, lmm_size);
92 LASSERT(lsm->lsm_object_id);
93 LASSERT(lsm->lsm_object_gr);
94 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
95 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
101 /* Unpack OSC object metadata from disk storage (LE byte order). */
102 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
103 struct lov_mds_md *lmm, int lmm_bytes)
109 if (lmm_bytes < sizeof (*lmm)) {
110 CERROR("lov_mds_md too small: %d, need %d\n",
111 lmm_bytes, (int)sizeof(*lmm));
114 /* XXX LOV_MAGIC etc check? */
116 if (lmm->lmm_object_id == 0) {
117 CERROR("lov_mds_md: zero lmm_object_id\n");
122 lsm_size = lov_stripe_md_size(1);
126 if (*lsmp != NULL && lmm == NULL) {
127 OBD_FREE(*lsmp, lsm_size);
133 OBD_ALLOC(*lsmp, lsm_size);
136 loi_init((*lsmp)->lsm_oinfo);
140 /* XXX zero *lsmp? */
141 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
142 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
143 LASSERT((*lsmp)->lsm_object_id);
144 LASSERT((*lsmp)->lsm_object_gr);
147 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
152 static int osc_getattr_interpret(struct ptlrpc_request *req,
153 struct osc_getattr_async_args *aa, int rc)
155 struct ost_body *body;
161 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
163 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
164 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
166 /* This should really be sent by the OST */
167 aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
168 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
170 CERROR("can't unpack ost_body\n");
172 aa->aa_oa->o_valid = 0;
178 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
179 struct lov_stripe_md *md,
180 struct ptlrpc_request_set *set)
182 struct ptlrpc_request *request;
183 struct ost_body *body;
184 int size = sizeof(*body);
185 struct osc_getattr_async_args *aa;
188 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
189 OST_GETATTR, 1, &size, NULL);
193 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
194 memcpy(&body->oa, oa, sizeof(*oa));
196 request->rq_replen = lustre_msg_size(1, &size);
197 request->rq_interpret_reply = osc_getattr_interpret;
199 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
200 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
203 ptlrpc_set_add_req (set, request);
207 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
208 struct lov_stripe_md *md)
210 struct ptlrpc_request *request;
211 struct ost_body *body;
212 int rc, size = sizeof(*body);
215 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
216 OST_GETATTR, 1, &size, NULL);
220 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
221 memcpy(&body->oa, oa, sizeof(*oa));
223 request->rq_replen = lustre_msg_size(1, &size);
225 rc = ptlrpc_queue_wait(request);
227 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
231 body = lustre_swab_repbuf(request, 0, sizeof (*body),
232 lustre_swab_ost_body);
234 CERROR ("can't unpack ost_body\n");
235 GOTO (out, rc = -EPROTO);
238 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
239 memcpy(oa, &body->oa, sizeof(*oa));
241 /* This should really be sent by the OST */
242 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
243 oa->o_valid |= OBD_MD_FLBLKSZ;
247 ptlrpc_req_finished(request);
251 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
252 struct lov_stripe_md *md, struct obd_trans_info *oti)
254 struct ptlrpc_request *request;
255 struct ost_body *body;
256 int rc, size = sizeof(*body);
259 LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
261 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
262 OST_SETATTR, 1, &size, NULL);
266 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
267 memcpy(&body->oa, oa, sizeof(*oa));
269 request->rq_replen = lustre_msg_size(1, &size);
271 rc = ptlrpc_queue_wait(request);
275 body = lustre_swab_repbuf(request, 0, sizeof(*body),
276 lustre_swab_ost_body);
278 GOTO(out, rc = -EPROTO);
280 memcpy(oa, &body->oa, sizeof(*oa));
284 ptlrpc_req_finished(request);
288 int osc_real_create(struct obd_export *exp, struct obdo *oa,
289 struct lov_stripe_md **ea, struct obd_trans_info *oti)
291 struct ptlrpc_request *request;
292 struct ost_body *body;
293 struct lov_stripe_md *lsm;
294 int rc, size = sizeof(*body);
302 rc = obd_alloc_memmd(exp, &lsm);
307 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
308 OST_CREATE, 1, &size, NULL);
310 GOTO(out, rc = -ENOMEM);
312 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
313 memcpy(&body->oa, oa, sizeof(body->oa));
315 request->rq_replen = lustre_msg_size(1, &size);
316 if (oa->o_valid & OBD_MD_FLINLINE) {
317 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
318 oa->o_flags == OBD_FL_DELORPHAN);
319 DEBUG_REQ(D_HA, request,
320 "delorphan from OST integration");
321 /* Don't resend the delorphan request */
322 request->rq_no_resend = request->rq_no_delay = 1;
325 rc = ptlrpc_queue_wait(request);
329 body = lustre_swab_repbuf(request, 0, sizeof(*body),
330 lustre_swab_ost_body);
332 CERROR ("can't unpack ost_body\n");
333 GOTO (out_req, rc = -EPROTO);
336 memcpy(oa, &body->oa, sizeof(*oa));
338 /* This should really be sent by the OST */
339 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
340 oa->o_valid |= OBD_MD_FLBLKSZ;
342 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
343 * have valid lsm_oinfo data structs, so don't go touching that.
344 * This needs to be fixed in a big way.
346 lsm->lsm_object_id = oa->o_id;
347 lsm->lsm_object_gr = oa->o_gr;
351 oti->oti_transno = request->rq_repmsg->transno;
353 if (oa->o_valid & OBD_MD_FLCOOKIE) {
354 if (!oti->oti_logcookies)
355 oti_alloc_cookies(oti, 1);
356 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
357 sizeof(oti->oti_onecookie));
361 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
364 ptlrpc_req_finished(request);
367 obd_free_memmd(exp, &lsm);
371 static int osc_punch(struct obd_export *exp, struct obdo *oa,
372 struct lov_stripe_md *md, obd_size start,
373 obd_size end, struct obd_trans_info *oti)
375 struct ptlrpc_request *request;
376 struct ost_body *body;
377 int rc, size = sizeof(*body);
385 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
386 OST_PUNCH, 1, &size, NULL);
390 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
391 memcpy(&body->oa, oa, sizeof(*oa));
393 /* overload the size and blocks fields in the oa with start/end */
394 body->oa.o_size = start;
395 body->oa.o_blocks = end;
396 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
398 request->rq_replen = lustre_msg_size(1, &size);
400 rc = ptlrpc_queue_wait(request);
404 body = lustre_swab_repbuf (request, 0, sizeof (*body),
405 lustre_swab_ost_body);
407 CERROR ("can't unpack ost_body\n");
408 GOTO (out, rc = -EPROTO);
411 memcpy(oa, &body->oa, sizeof(*oa));
415 ptlrpc_req_finished(request);
419 static int osc_sync(struct obd_export *exp, struct obdo *oa,
420 struct lov_stripe_md *md, obd_size start, obd_size end)
422 struct ptlrpc_request *request;
423 struct ost_body *body;
424 int rc, size = sizeof(*body);
432 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
433 OST_SYNC, 1, &size, NULL);
437 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
438 memcpy(&body->oa, oa, sizeof(*oa));
440 /* overload the size and blocks fields in the oa with start/end */
441 body->oa.o_size = start;
442 body->oa.o_blocks = end;
443 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
445 request->rq_replen = lustre_msg_size(1, &size);
447 rc = ptlrpc_queue_wait(request);
451 body = lustre_swab_repbuf(request, 0, sizeof(*body),
452 lustre_swab_ost_body);
454 CERROR ("can't unpack ost_body\n");
455 GOTO (out, rc = -EPROTO);
458 memcpy(oa, &body->oa, sizeof(*oa));
462 ptlrpc_req_finished(request);
466 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
467 struct lov_stripe_md *ea, struct obd_trans_info *oti)
469 struct ptlrpc_request *request;
470 struct ost_body *body;
471 int rc, size = sizeof(*body);
479 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
480 OST_DESTROY, 1, &size, NULL);
484 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
486 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
487 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
488 sizeof(*oti->oti_logcookies));
489 oti->oti_logcookies++;
492 memcpy(&body->oa, oa, sizeof(*oa));
493 request->rq_replen = lustre_msg_size(1, &size);
495 if (oti != NULL && oti->oti_async) {
496 /* asynchrounous destroy */
497 ptlrpcd_add_req(request);
500 rc = ptlrpc_queue_wait(request);
506 ptlrpc_req_finished(request);
510 body = lustre_swab_repbuf(request, 0, sizeof(*body),
511 lustre_swab_ost_body);
513 CERROR ("Can't unpack body\n");
514 ptlrpc_req_finished(request);
518 memcpy(oa, &body->oa, sizeof(*oa));
519 ptlrpc_req_finished(request);
524 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
527 obd_valid bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
529 LASSERT(!(oa->o_valid & bits));
532 spin_lock(&cli->cl_loi_list_lock);
533 oa->o_dirty = cli->cl_dirty;
534 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
535 oa->o_grant = cli->cl_avail_grant;
536 oa->o_dropped = cli->cl_lost_grant;
537 cli->cl_lost_grant = 0;
538 spin_unlock(&cli->cl_loi_list_lock);
539 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
540 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
543 /* caller must hold loi_list_lock */
544 static void osc_consume_write_grant(struct client_obd *cli,
545 struct osc_async_page *oap)
547 cli->cl_dirty += PAGE_SIZE;
548 cli->cl_avail_grant -= PAGE_SIZE;
549 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
550 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
551 LASSERT(cli->cl_avail_grant >= 0);
554 static unsigned long rpcs_in_flight(struct client_obd *cli)
556 return cli->cl_r_in_flight + cli->cl_w_in_flight;
559 /* caller must hold loi_list_lock */
560 void osc_wake_cache_waiters(struct client_obd *cli)
562 struct list_head *l, *tmp;
563 struct osc_cache_waiter *ocw;
565 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
566 /* if we can't dirty more, we must wait until some is written */
567 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
568 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
569 cli->cl_dirty, cli->cl_dirty_max);
573 /* if still dirty cache but no grant wait for pending RPCs that
574 * may yet return us some grant before doing sync writes */
575 if (cli->cl_w_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
576 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
577 cli->cl_w_in_flight);
579 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
580 list_del_init(&ocw->ocw_entry);
581 if (cli->cl_avail_grant < PAGE_SIZE) {
582 /* no more RPCs in flight to return grant, do sync IO */
583 ocw->ocw_rc = -EDQUOT;
584 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
586 osc_consume_write_grant(cli, ocw->ocw_oap);
589 wake_up(&ocw->ocw_waitq);
595 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
597 spin_lock(&cli->cl_loi_list_lock);
598 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
599 cli->cl_avail_grant += body->oa.o_grant;
600 /* waiters are woken in brw_interpret_oap */
601 spin_unlock(&cli->cl_loi_list_lock);
604 /* We assume that the reason this OSC got a short read is because it read
605 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
606 * via the LOV, and it _knows_ it's reading inside the file, it's just that
607 * this stripe never got written at or beyond this stripe offset yet. */
608 static void handle_short_read(int nob_read, obd_count page_count,
609 struct brw_page *pga)
613 /* skip bytes read OK */
614 while (nob_read > 0) {
615 LASSERT (page_count > 0);
617 if (pga->count > nob_read) {
618 /* EOF inside this page */
619 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
620 memset(ptr + nob_read, 0, pga->count - nob_read);
627 nob_read -= pga->count;
632 /* zero remaining pages */
633 while (page_count-- > 0) {
634 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
635 memset(ptr, 0, pga->count);
641 static int check_write_rcs(struct ptlrpc_request *request,
642 int requested_nob, int niocount,
643 obd_count page_count, struct brw_page *pga)
647 /* return error if any niobuf was in error */
648 remote_rcs = lustre_swab_repbuf(request, 1,
649 sizeof(*remote_rcs) * niocount, NULL);
650 if (remote_rcs == NULL) {
651 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
654 if (lustre_msg_swabbed(request->rq_repmsg))
655 for (i = 0; i < niocount; i++)
656 __swab32s((__u32 *)&remote_rcs[i]);
658 for (i = 0; i < niocount; i++) {
659 if (remote_rcs[i] < 0)
660 return(remote_rcs[i]);
662 if (remote_rcs[i] != 0) {
663 CERROR("rc[%d] invalid (%d) req %p\n",
664 i, remote_rcs[i], request);
669 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
670 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
671 requested_nob, request->rq_bulk->bd_nob_transferred);
678 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
680 if (p1->flag != p2->flag) {
681 unsigned mask = ~OBD_BRW_FROM_GRANT;
683 /* warn if we try to combine flags that we don't know to be
685 if ((p1->flag & mask) != (p2->flag & mask))
686 CERROR("is it ok to have flags 0x%x and 0x%x in the "
687 "same brw?\n", p1->flag, p2->flag);
691 return (p1->disk_offset + p1->count == p2->disk_offset);
695 static obd_count cksum_pages(int nob, obd_count page_count,
696 struct brw_page *pga)
702 LASSERT (page_count > 0);
705 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
706 pga->count > nob ? nob : pga->count);
718 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
719 struct lov_stripe_md *lsm, obd_count page_count,
720 struct brw_page *pga, int *requested_nobp,
721 int *niocountp, struct ptlrpc_request **reqp)
723 struct ptlrpc_request *req;
724 struct ptlrpc_bulk_desc *desc;
725 struct client_obd *cli = &imp->imp_obd->u.cli;
726 struct ost_body *body;
727 struct obd_ioobj *ioobj;
728 struct niobuf_remote *niobuf;
736 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
738 for (niocount = i = 1; i < page_count; i++)
739 if (!can_merge_pages(&pga[i - 1], &pga[i]))
742 size[0] = sizeof(*body);
743 size[1] = sizeof(*ioobj);
744 size[2] = niocount * sizeof(*niobuf);
746 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, opc, 3, size, NULL);
750 if (opc == OST_WRITE)
751 desc = ptlrpc_prep_bulk_imp (req, page_count,
752 BULK_GET_SOURCE, OST_BULK_PORTAL);
754 desc = ptlrpc_prep_bulk_imp (req, page_count,
755 BULK_PUT_SINK, OST_BULK_PORTAL);
757 GOTO(out, rc = -ENOMEM);
758 /* NB request now owns desc and will free it when it gets freed */
760 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
761 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
762 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
764 memcpy(&body->oa, oa, sizeof(*oa));
766 obdo_to_ioobj(oa, ioobj);
767 ioobj->ioo_bufcnt = niocount;
769 LASSERT (page_count > 0);
771 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
772 struct brw_page *pg = &pga[i];
773 struct brw_page *pg_prev = pg - 1;
775 LASSERT(pg->count > 0);
776 LASSERTF((pg->page_offset & ~PAGE_MASK)+ pg->count <= PAGE_SIZE,
777 "i: %d pg: %p pg_off: "LPU64", count: %u\n", i, pg,
778 pg->page_offset, pg->count);
779 LASSERTF(i == 0 || pg->disk_offset > pg_prev->disk_offset,
780 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
781 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
783 pg->pg, pg->pg->private, pg->pg->index, pg->disk_offset,
784 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
785 pg_prev->disk_offset);
787 ptlrpc_prep_bulk_page(desc, pg->pg,
788 pg->page_offset & ~PAGE_MASK, pg->count);
789 requested_nob += pg->count;
791 if (i > 0 && can_merge_pages(pg_prev, pg)) {
793 niobuf->len += pg->count;
795 niobuf->offset = pg->disk_offset;
796 niobuf->len = pg->count;
797 niobuf->flags = pg->flag;
801 LASSERT((void *)(niobuf - niocount) ==
802 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
803 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
805 /* size[0] still sizeof (*body) */
806 if (opc == OST_WRITE) {
808 body->oa.o_valid |= OBD_MD_FLCKSUM;
809 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
811 /* 1 RC per niobuf */
812 size[1] = sizeof(__u32) * niocount;
813 req->rq_replen = lustre_msg_size(2, size);
815 /* 1 RC for the whole I/O */
816 req->rq_replen = lustre_msg_size(1, size);
819 *niocountp = niocount;
820 *requested_nobp = requested_nob;
825 ptlrpc_req_finished (req);
829 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
830 int requested_nob, int niocount,
831 obd_count page_count, struct brw_page *pga,
834 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
835 struct ost_body *body;
841 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
843 CERROR ("Can't unpack body\n");
847 osc_update_grant(cli, body);
848 memcpy(oa, &body->oa, sizeof(*oa));
850 if (req->rq_reqmsg->opc == OST_WRITE) {
852 CERROR ("Unexpected +ve rc %d\n", rc);
855 LASSERT (req->rq_bulk->bd_nob == requested_nob);
857 RETURN(check_write_rcs(req, requested_nob, niocount,
861 if (rc > requested_nob) {
862 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
866 if (rc != req->rq_bulk->bd_nob_transferred) {
867 CERROR ("Unexpected rc %d (%d transferred)\n",
868 rc, req->rq_bulk->bd_nob_transferred);
872 if (rc < requested_nob)
873 handle_short_read(rc, page_count, pga);
876 if (oa->o_valid & OBD_MD_FLCKSUM) {
877 const struct ptlrpc_peer *peer =
878 &req->rq_import->imp_connection->c_peer;
879 static int cksum_counter;
880 obd_count server_cksum = oa->o_cksum;
881 obd_count cksum = cksum_pages(rc, page_count, pga);
882 char str[PTL_NALFMT_SIZE];
884 ptlrpc_peernid2str(peer, str);
887 if (server_cksum != cksum) {
888 CERROR("Bad checksum: server %x, client %x, server NID "
889 LPX64" (%s)\n", server_cksum, cksum,
890 peer->peer_id.nid, str);
893 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
894 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
895 cksum_counter, peer->peer_id.nid, str, cksum);
898 static int cksum_missed;
901 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
902 CERROR("Request checksum %u from "LPX64", no reply\n",
904 req->rq_import->imp_connection->c_peer.peer_id.nid);
910 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
911 struct lov_stripe_md *lsm,
912 obd_count page_count, struct brw_page *pga)
916 struct ptlrpc_request *request;
921 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
922 page_count, pga, &requested_nob, &niocount,
927 rc = ptlrpc_queue_wait(request);
929 if (rc == -ETIMEDOUT && request->rq_resend) {
930 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
931 ptlrpc_req_finished(request);
935 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
936 page_count, pga, rc);
938 ptlrpc_req_finished(request);
942 static int brw_interpret(struct ptlrpc_request *request,
943 struct osc_brw_async_args *aa, int rc)
945 struct obdo *oa = aa->aa_oa;
946 int requested_nob = aa->aa_requested_nob;
947 int niocount = aa->aa_nio_count;
948 obd_count page_count = aa->aa_page_count;
949 struct brw_page *pga = aa->aa_pga;
952 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
953 page_count, pga, rc);
957 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
958 struct lov_stripe_md *lsm, obd_count page_count,
959 struct brw_page *pga, struct ptlrpc_request_set *set)
961 struct ptlrpc_request *request;
964 struct osc_brw_async_args *aa;
968 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
969 page_count, pga, &requested_nob, &nio_count,
972 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
973 aa = (struct osc_brw_async_args *)&request->rq_async_args;
975 aa->aa_requested_nob = requested_nob;
976 aa->aa_nio_count = nio_count;
977 aa->aa_page_count = page_count;
980 request->rq_interpret_reply = brw_interpret;
981 ptlrpc_set_add_req(set, request);
987 #define min_t(type,x,y) \
988 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
992 * ugh, we want disk allocation on the target to happen in offset order. we'll
993 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
994 * fine for our small page arrays and doesn't require allocation. its an
995 * insertion sort that swaps elements that are strides apart, shrinking the
996 * stride down until its '1' and the array is sorted.
998 static void sort_brw_pages(struct brw_page *array, int num)
1001 struct brw_page tmp;
1005 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1010 for (i = stride ; i < num ; i++) {
1013 while (j >= stride && array[j - stride].disk_offset >
1015 array[j] = array[j - stride];
1020 } while (stride > 1);
1023 /* make sure we the regions we're passing to elan don't violate its '4
1024 * fragments' constraint. portal headers are a fragment, all full
1025 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1026 * counts as a fragment. I think. see bug 934. */
1027 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1030 int saw_whole_frag = 0;
1033 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1034 if (pg->count == PAGE_SIZE) {
1035 if (!saw_whole_frag) {
1046 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1047 struct lov_stripe_md *lsm, obd_count page_count,
1048 struct brw_page *pga, struct obd_trans_info *oti)
1052 if (cmd == OBD_BRW_CHECK) {
1053 /* The caller just wants to know if there's a chance that this
1054 * I/O can succeed */
1055 struct obd_import *imp = class_exp2cliimp(exp);
1057 if (imp == NULL || imp->imp_invalid)
1062 while (page_count) {
1063 obd_count pages_per_brw;
1066 if (page_count > PTLRPC_MAX_BRW_PAGES)
1067 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1069 pages_per_brw = page_count;
1071 sort_brw_pages(pga, pages_per_brw);
1072 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1074 rc = osc_brw_internal(cmd, exp, oa, lsm, pages_per_brw, pga);
1079 page_count -= pages_per_brw;
1080 pga += pages_per_brw;
1085 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1086 struct lov_stripe_md *lsm, obd_count page_count,
1087 struct brw_page *pga, struct ptlrpc_request_set *set,
1088 struct obd_trans_info *oti)
1092 if (cmd == OBD_BRW_CHECK) {
1093 /* The caller just wants to know if there's a chance that this
1094 * I/O can succeed */
1095 struct obd_import *imp = class_exp2cliimp(exp);
1097 if (imp == NULL || imp->imp_invalid)
1102 while (page_count) {
1103 obd_count pages_per_brw;
1106 if (page_count > PTLRPC_MAX_BRW_PAGES)
1107 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1109 pages_per_brw = page_count;
1111 sort_brw_pages(pga, pages_per_brw);
1112 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1114 rc = async_internal(cmd, exp, oa, lsm, pages_per_brw, pga, set);
1119 page_count -= pages_per_brw;
1120 pga += pages_per_brw;
1125 static void osc_check_rpcs(struct client_obd *cli);
1126 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1128 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1129 static void lop_update_pending(struct client_obd *cli,
1130 struct loi_oap_pages *lop, int cmd, int delta);
1132 /* this is called when a sync waiter receives an interruption. Its job is to
1133 * get the caller woken as soon as possible. If its page hasn't been put in an
1134 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1135 * desiring interruption which will forcefully complete the rpc once the rpc
1137 static void osc_occ_interrupted(struct oig_callback_context *occ)
1139 struct osc_async_page *oap;
1140 struct loi_oap_pages *lop;
1141 struct lov_oinfo *loi;
1144 /* XXX member_of() */
1145 oap = list_entry(occ, struct osc_async_page, oap_occ);
1147 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1149 oap->oap_interrupted = 1;
1151 /* ok, it's been put in an rpc. */
1152 if (oap->oap_request != NULL) {
1153 ptlrpc_mark_interrupted(oap->oap_request);
1154 ptlrpcd_wake(oap->oap_request);
1158 /* we don't get interruption callbacks until osc_trigger_sync_io()
1159 * has been called and put the sync oaps in the pending/urgent lists.*/
1160 if (!list_empty(&oap->oap_pending_item)) {
1161 list_del_init(&oap->oap_pending_item);
1162 if (oap->oap_async_flags & ASYNC_URGENT)
1163 list_del_init(&oap->oap_urgent_item);
1166 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1167 &loi->loi_write_lop : &loi->loi_read_lop;
1168 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1169 loi_list_maint(oap->oap_cli, oap->oap_loi);
1171 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1172 oap->oap_oig = NULL;
1176 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1179 /* this must be called holding the loi list lock to give coverage to exit_cache,
1180 * async_flag maintenance, and oap_request */
1181 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1182 struct osc_async_page *oap, int sent, int rc)
1184 osc_exit_cache(cli, oap, sent);
1185 oap->oap_async_flags = 0;
1186 oap->oap_interrupted = 0;
1188 if (oap->oap_request != NULL) {
1189 ptlrpc_req_finished(oap->oap_request);
1190 oap->oap_request = NULL;
1193 if (rc == 0 && oa != NULL)
1194 oap->oap_loi->loi_blocks = oa->o_blocks;
1197 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1198 oap->oap_oig = NULL;
1203 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1207 static int brw_interpret_oap(struct ptlrpc_request *request,
1208 struct osc_brw_async_args *aa, int rc)
1210 struct osc_async_page *oap;
1211 struct client_obd *cli;
1212 struct list_head *pos, *n;
1216 do_gettimeofday(&now);
1217 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1218 aa->aa_nio_count, aa->aa_page_count,
1221 CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
1224 /* in failout recovery we ignore writeback failure and want
1225 * to just tell llite to unlock the page and continue */
1226 if (request->rq_reqmsg->opc == OST_WRITE &&
1227 (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
1228 CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
1230 cli->cl_import ? cli->cl_import->imp_invalid : -1);
1234 spin_lock(&cli->cl_loi_list_lock);
1236 if (request->rq_reqmsg->opc == OST_WRITE)
1237 lprocfs_stime_record(&cli->cl_write_stime, &now,
1238 &request->rq_rpcd_start);
1240 lprocfs_stime_record(&cli->cl_read_stime, &now,
1241 &request->rq_rpcd_start);
1245 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1246 * is called so we know whether to go to sync BRWs or wait for more
1247 * RPCs to complete */
1248 if (request->rq_reqmsg->opc == OST_WRITE)
1249 cli->cl_w_in_flight--;
1251 cli->cl_r_in_flight--;
1253 /* the caller may re-use the oap after the completion call so
1254 * we need to clean it up a little */
1255 list_for_each_safe(pos, n, &aa->aa_oaps) {
1256 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1258 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1259 //oap->oap_page, oap->oap_page->index, oap);
1261 list_del_init(&oap->oap_rpc_item);
1262 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
1265 osc_wake_cache_waiters(cli);
1266 osc_check_rpcs(cli);
1268 spin_unlock(&cli->cl_loi_list_lock);
1270 obdo_free(aa->aa_oa);
1271 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1276 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1277 struct list_head *rpc_list,
1278 int page_count, int cmd)
1280 struct ptlrpc_request *req;
1281 struct brw_page *pga = NULL;
1282 int requested_nob, nio_count;
1283 struct osc_brw_async_args *aa;
1284 struct obdo *oa = NULL;
1285 struct obd_async_page_ops *ops = NULL;
1286 void *caller_data = NULL;
1287 struct list_head *pos;
1290 LASSERT(!list_empty(rpc_list));
1292 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1294 RETURN(ERR_PTR(-ENOMEM));
1298 GOTO(out, req = ERR_PTR(-ENOMEM));
1301 list_for_each(pos, rpc_list) {
1302 struct osc_async_page *oap;
1304 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1306 ops = oap->oap_caller_ops;
1307 caller_data = oap->oap_caller_data;
1309 pga[i].disk_offset = oap->oap_obj_off + oap->oap_page_off;
1310 pga[i].page_offset = pga[i].disk_offset;
1311 pga[i].pg = oap->oap_page;
1312 pga[i].count = oap->oap_count;
1313 pga[i].flag = oap->oap_brw_flags;
1314 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1315 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1319 /* always get the data for the obdo for the rpc */
1320 LASSERT(ops != NULL);
1321 ops->ap_fill_obdo(caller_data, cmd, oa);
1323 sort_brw_pages(pga, page_count);
1324 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1325 pga, &requested_nob, &nio_count, &req);
1327 CERROR("prep_req failed: %d\n", rc);
1328 GOTO(out, req = ERR_PTR(rc));
1331 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1332 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1334 aa->aa_requested_nob = requested_nob;
1335 aa->aa_nio_count = nio_count;
1336 aa->aa_page_count = page_count;
1345 OBD_FREE(pga, sizeof(*pga) * page_count);
1350 static void lop_update_pending(struct client_obd *cli,
1351 struct loi_oap_pages *lop, int cmd, int delta)
1353 lop->lop_num_pending += delta;
1354 if (cmd == OBD_BRW_WRITE)
1355 cli->cl_pending_w_pages += delta;
1357 cli->cl_pending_r_pages += delta;
1360 /* the loi lock is held across this function but it's allowed to release
1361 * and reacquire it during its work */
1362 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1363 int cmd, struct loi_oap_pages *lop)
1365 struct ptlrpc_request *request;
1366 obd_count page_count = 0;
1367 struct list_head *tmp, *pos;
1368 struct osc_async_page *oap = NULL;
1369 struct osc_brw_async_args *aa;
1370 struct obd_async_page_ops *ops;
1371 LIST_HEAD(rpc_list);
1374 /* first we find the pages we're allowed to work with */
1375 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1376 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1377 ops = oap->oap_caller_ops;
1379 LASSERT(oap->oap_magic == OAP_MAGIC);
1381 /* in llite being 'ready' equates to the page being locked
1382 * until completion unlocks it. commit_write submits a page
1383 * as not ready because its unlock will happen unconditionally
1384 * as the call returns. if we race with commit_write giving
1385 * us that page we dont' want to create a hole in the page
1386 * stream, so we stop and leave the rpc to be fired by
1387 * another dirtier or kupdated interval (the not ready page
1388 * will still be on the dirty list). we could call in
1389 * at the end of ll_file_write to process the queue again. */
1390 if (!(oap->oap_async_flags & ASYNC_READY)) {
1391 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1393 CDEBUG(D_INODE, "oap %p page %p returned %d "
1394 "instead of ready\n", oap,
1398 /* llite is telling us that the page is still
1399 * in commit_write and that we should try
1400 * and put it in an rpc again later. we
1401 * break out of the loop so we don't create
1402 * a hole in the sequence of pages in the rpc
1407 /* the io isn't needed.. tell the checks
1408 * below to complete the rpc with EINTR */
1409 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1410 oap->oap_count = -EINTR;
1413 oap->oap_async_flags |= ASYNC_READY;
1416 LASSERTF(0, "oap %p page %p returned %d "
1417 "from make_ready\n", oap,
1425 /* take the page out of our book-keeping */
1426 list_del_init(&oap->oap_pending_item);
1427 lop_update_pending(cli, lop, cmd, -1);
1428 list_del_init(&oap->oap_urgent_item);
1430 /* ask the caller for the size of the io as the rpc leaves. */
1431 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1433 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1434 if (oap->oap_count <= 0) {
1435 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1437 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
1441 /* now put the page back in our accounting */
1442 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1443 if (++page_count >= cli->cl_max_pages_per_rpc)
1447 osc_wake_cache_waiters(cli);
1449 if (page_count == 0)
1452 loi_list_maint(cli, loi);
1453 spin_unlock(&cli->cl_loi_list_lock);
1455 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1456 if (IS_ERR(request)) {
1457 /* this should happen rarely and is pretty bad, it makes the
1458 * pending list not follow the dirty order */
1459 spin_lock(&cli->cl_loi_list_lock);
1460 list_for_each_safe(pos, tmp, &rpc_list) {
1461 oap = list_entry(pos, struct osc_async_page,
1463 list_del_init(&oap->oap_rpc_item);
1465 /* queued sync pages can be torn down while the pages
1466 * were between the pending list and the rpc */
1467 if (oap->oap_interrupted) {
1468 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1469 osc_ap_completion(cli, NULL, oap, 0,
1474 /* put the page back in the loi/lop lists */
1475 list_add_tail(&oap->oap_pending_item,
1477 lop_update_pending(cli, lop, cmd, 1);
1478 if (oap->oap_async_flags & ASYNC_URGENT)
1479 list_add(&oap->oap_urgent_item,
1482 loi_list_maint(cli, loi);
1483 RETURN(PTR_ERR(request));
1486 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1487 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1488 INIT_LIST_HEAD(&aa->aa_oaps);
1489 list_splice(&rpc_list, &aa->aa_oaps);
1490 INIT_LIST_HEAD(&rpc_list);
1493 if (cmd == OBD_BRW_READ) {
1494 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1495 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1497 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1498 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1499 cli->cl_w_in_flight);
1503 spin_lock(&cli->cl_loi_list_lock);
1505 if (cmd == OBD_BRW_READ)
1506 cli->cl_r_in_flight++;
1508 cli->cl_w_in_flight++;
1509 /* queued sync pages can be torn down while the pages
1510 * were between the pending list and the rpc */
1511 list_for_each(pos, &aa->aa_oaps) {
1512 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1513 if (oap->oap_interrupted) {
1514 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1516 ptlrpc_mark_interrupted(request);
1521 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %dr/%dw in flight\n",
1522 request, page_count, aa, cli->cl_r_in_flight,
1523 cli->cl_w_in_flight);
1525 oap->oap_request = ptlrpc_request_addref(request);
1526 request->rq_interpret_reply = brw_interpret_oap;
1527 ptlrpcd_add_req(request);
1531 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1537 if (lop->lop_num_pending == 0)
1540 /* if we have an invalid import we want to drain the queued pages
1541 * by forcing them through rpcs that immediately fail and complete
1542 * the pages. recovery relies on this to empty the queued pages
1543 * before canceling the locks and evicting down the llite pages */
1544 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1547 /* stream rpcs in queue order as long as as there is an urgent page
1548 * queued. this is our cheap solution for good batching in the case
1549 * where writepage marks some random page in the middle of the file as
1550 * urgent because of, say, memory pressure */
1551 if (!list_empty(&lop->lop_urgent))
1554 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1555 optimal = cli->cl_max_pages_per_rpc;
1556 if (cmd == OBD_BRW_WRITE) {
1557 /* trigger a write rpc stream as long as there are dirtiers
1558 * waiting for space. as they're waiting, they're not going to
1559 * create more pages to coallesce with what's waiting.. */
1560 if (!list_empty(&cli->cl_cache_waiters))
1563 /* *2 to avoid triggering rpcs that would want to include pages
1564 * that are being queued but which can't be made ready until
1565 * the queuer finishes with the page. this is a wart for
1566 * llite::commit_write() */
1569 if (lop->lop_num_pending >= optimal)
1575 static void on_list(struct list_head *item, struct list_head *list,
1578 if (list_empty(item) && should_be_on)
1579 list_add_tail(item, list);
1580 else if (!list_empty(item) && !should_be_on)
1581 list_del_init(item);
1584 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1585 * can find pages to build into rpcs quickly */
1586 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1588 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1589 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1590 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1592 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1593 loi->loi_write_lop.lop_num_pending);
1595 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1596 loi->loi_read_lop.lop_num_pending);
1599 #define LOI_DEBUG(LOI, STR, args...) \
1600 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1601 !list_empty(&(LOI)->loi_cli_item), \
1602 (LOI)->loi_write_lop.lop_num_pending, \
1603 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1604 (LOI)->loi_read_lop.lop_num_pending, \
1605 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1608 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1611 /* first return all objects which we already know to have
1612 * pages ready to be stuffed into rpcs */
1613 if (!list_empty(&cli->cl_loi_ready_list))
1614 RETURN(list_entry(cli->cl_loi_ready_list.next,
1615 struct lov_oinfo, loi_cli_item));
1617 /* then if we have cache waiters, return all objects with queued
1618 * writes. This is especially important when many small files
1619 * have filled up the cache and not been fired into rpcs because
1620 * they don't pass the nr_pending/object threshhold */
1621 if (!list_empty(&cli->cl_cache_waiters) &&
1622 !list_empty(&cli->cl_loi_write_list))
1623 RETURN(list_entry(cli->cl_loi_write_list.next,
1624 struct lov_oinfo, loi_write_item));
1626 /* then return all queued objects when we have an invalid import
1627 * so that they get flushed */
1628 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
1629 if (!list_empty(&cli->cl_loi_write_list))
1630 RETURN(list_entry(cli->cl_loi_write_list.next,
1631 struct lov_oinfo, loi_write_item));
1632 if (!list_empty(&cli->cl_loi_read_list))
1633 RETURN(list_entry(cli->cl_loi_read_list.next,
1634 struct lov_oinfo, loi_read_item));
1639 /* called with the loi list lock held */
1640 static void osc_check_rpcs(struct client_obd *cli)
1642 struct lov_oinfo *loi;
1643 int rc = 0, race_counter = 0;
1646 while ((loi = osc_next_loi(cli)) != NULL) {
1647 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
1649 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
1652 /* attempt some read/write balancing by alternating between
1653 * reads and writes in an object. The makes_rpc checks here
1654 * would be redundant if we were getting read/write work items
1655 * instead of objects. we don't want send_oap_rpc to drain a
1656 * partial read pending queue when we're given this object to
1657 * do io on writes while there are cache waiters */
1658 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1659 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1660 &loi->loi_write_lop);
1668 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1669 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1670 &loi->loi_read_lop);
1679 /* attempt some inter-object balancing by issueing rpcs
1680 * for each object in turn */
1681 if (!list_empty(&loi->loi_cli_item))
1682 list_del_init(&loi->loi_cli_item);
1683 if (!list_empty(&loi->loi_write_item))
1684 list_del_init(&loi->loi_write_item);
1685 if (!list_empty(&loi->loi_read_item))
1686 list_del_init(&loi->loi_read_item);
1688 loi_list_maint(cli, loi);
1690 /* send_oap_rpc fails with 0 when make_ready tells it to
1691 * back off. llite's make_ready does this when it tries
1692 * to lock a page queued for write that is already locked.
1693 * we want to try sending rpcs from many objects, but we
1694 * don't want to spin failing with 0. */
1695 if (race_counter == 10)
1701 /* we're trying to queue a page in the osc so we're subject to the
1702 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1703 * If the osc's queued pages are already at that limit, then we want to sleep
1704 * until there is space in the osc's queue for us. We also may be waiting for
1705 * write credits from the OST if there are RPCs in flight that may return some
1706 * before we fall back to sync writes.
1708 * We need this know our allocation was granted in the presence of signals */
1709 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1713 spin_lock(&cli->cl_loi_list_lock);
1714 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
1715 spin_unlock(&cli->cl_loi_list_lock);
1719 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1720 * grant or cache space. */
1721 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1722 struct osc_async_page *oap)
1724 struct osc_cache_waiter ocw;
1725 struct l_wait_info lwi = { 0 };
1726 struct timeval start, stop;
1728 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1729 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1730 cli->cl_avail_grant);
1732 if (cli->cl_dirty_max < PAGE_SIZE)
1735 /* Hopefully normal case - cache space and write credits available */
1736 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1737 cli->cl_avail_grant >= PAGE_SIZE) {
1738 /* account for ourselves */
1739 osc_consume_write_grant(cli, oap);
1743 /* Make sure that there are write rpcs in flight to wait for. This
1744 * is a little silly as this object may not have any pending but
1745 * other objects sure might. */
1746 if (cli->cl_w_in_flight) {
1747 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1748 init_waitqueue_head(&ocw.ocw_waitq);
1752 loi_list_maint(cli, loi);
1753 osc_check_rpcs(cli);
1754 spin_unlock(&cli->cl_loi_list_lock);
1756 CDEBUG(0, "sleeping for cache space\n");
1757 do_gettimeofday(&start);
1758 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1759 do_gettimeofday(&stop);
1760 spin_lock(&cli->cl_loi_list_lock);
1761 lprocfs_stime_record(&cli->cl_enter_stime, &stop, &start);
1762 if (!list_empty(&ocw.ocw_entry)) {
1763 list_del(&ocw.ocw_entry);
1772 /* the companion to enter_cache, called when an oap is no longer part of the
1773 * dirty accounting.. so writeback completes or truncate happens before writing
1774 * starts. must be called with the loi lock held. */
1775 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1780 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1785 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1786 cli->cl_dirty -= PAGE_SIZE;
1788 cli->cl_lost_grant += PAGE_SIZE;
1789 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1790 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1796 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1797 struct lov_oinfo *loi, struct page *page,
1798 obd_off offset, struct obd_async_page_ops *ops,
1799 void *data, void **res)
1801 struct osc_async_page *oap;
1804 OBD_ALLOC(oap, sizeof(*oap));
1808 oap->oap_magic = OAP_MAGIC;
1809 oap->oap_cli = &exp->exp_obd->u.cli;
1812 oap->oap_caller_ops = ops;
1813 oap->oap_caller_data = data;
1815 oap->oap_page = page;
1816 oap->oap_obj_off = offset;
1818 INIT_LIST_HEAD(&oap->oap_pending_item);
1819 INIT_LIST_HEAD(&oap->oap_urgent_item);
1820 INIT_LIST_HEAD(&oap->oap_rpc_item);
1822 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1824 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1829 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1830 struct lov_oinfo *loi, void *cookie,
1831 int cmd, obd_off off, int count,
1832 obd_flags brw_flags, enum async_flags async_flags)
1834 struct client_obd *cli = &exp->exp_obd->u.cli;
1835 struct osc_async_page *oap;
1836 struct loi_oap_pages *lop;
1840 oap = OAP_FROM_COOKIE(cookie);
1842 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1845 if (!list_empty(&oap->oap_pending_item) ||
1846 !list_empty(&oap->oap_urgent_item) ||
1847 !list_empty(&oap->oap_rpc_item))
1851 loi = &lsm->lsm_oinfo[0];
1853 spin_lock(&cli->cl_loi_list_lock);
1856 oap->oap_async_flags = async_flags;
1857 oap->oap_page_off = off;
1858 oap->oap_count = count;
1859 oap->oap_brw_flags = brw_flags;
1861 if (cmd == OBD_BRW_WRITE) {
1862 rc = osc_enter_cache(cli, loi, oap);
1864 spin_unlock(&cli->cl_loi_list_lock);
1867 lop = &loi->loi_write_lop;
1869 lop = &loi->loi_read_lop;
1872 if (oap->oap_async_flags & ASYNC_URGENT)
1873 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1874 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1875 lop_update_pending(cli, lop, cmd, 1);
1877 loi_list_maint(cli, loi);
1879 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1882 osc_check_rpcs(cli);
1883 spin_unlock(&cli->cl_loi_list_lock);
1888 /* aka (~was & now & flag), but this is more clear :) */
1889 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1891 static int osc_set_async_flags(struct obd_export *exp,
1892 struct lov_stripe_md *lsm,
1893 struct lov_oinfo *loi, void *cookie,
1894 obd_flags async_flags)
1896 struct client_obd *cli = &exp->exp_obd->u.cli;
1897 struct loi_oap_pages *lop;
1898 struct osc_async_page *oap;
1902 oap = OAP_FROM_COOKIE(cookie);
1904 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1908 loi = &lsm->lsm_oinfo[0];
1910 if (oap->oap_cmd == OBD_BRW_WRITE) {
1911 lop = &loi->loi_write_lop;
1913 lop = &loi->loi_read_lop;
1916 spin_lock(&cli->cl_loi_list_lock);
1918 if (list_empty(&oap->oap_pending_item))
1919 GOTO(out, rc = -EINVAL);
1921 if ((oap->oap_async_flags & async_flags) == async_flags)
1924 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1925 oap->oap_async_flags |= ASYNC_READY;
1927 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1928 if (list_empty(&oap->oap_rpc_item)) {
1929 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1930 loi_list_maint(cli, loi);
1934 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1935 oap->oap_async_flags);
1937 osc_check_rpcs(cli);
1938 spin_unlock(&cli->cl_loi_list_lock);
1942 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1943 struct lov_oinfo *loi,
1944 struct obd_io_group *oig, void *cookie,
1945 int cmd, obd_off off, int count,
1946 obd_flags brw_flags,
1947 obd_flags async_flags)
1949 struct client_obd *cli = &exp->exp_obd->u.cli;
1950 struct osc_async_page *oap;
1951 struct loi_oap_pages *lop;
1954 oap = OAP_FROM_COOKIE(cookie);
1956 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1959 if (!list_empty(&oap->oap_pending_item) ||
1960 !list_empty(&oap->oap_urgent_item) ||
1961 !list_empty(&oap->oap_rpc_item))
1965 loi = &lsm->lsm_oinfo[0];
1967 spin_lock(&cli->cl_loi_list_lock);
1970 oap->oap_page_off = off;
1971 oap->oap_count = count;
1972 oap->oap_brw_flags = brw_flags;
1973 oap->oap_async_flags = async_flags;
1975 if (cmd == OBD_BRW_WRITE)
1976 lop = &loi->loi_write_lop;
1978 lop = &loi->loi_read_lop;
1980 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
1981 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
1983 oig_add_one(oig, &oap->oap_occ);
1986 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
1988 spin_unlock(&cli->cl_loi_list_lock);
1993 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
1994 struct loi_oap_pages *lop, int cmd)
1996 struct list_head *pos, *tmp;
1997 struct osc_async_page *oap;
1999 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2000 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2001 list_del(&oap->oap_pending_item);
2002 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2003 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2004 lop_update_pending(cli, lop, cmd, 1);
2006 loi_list_maint(cli, loi);
2009 static int osc_trigger_group_io(struct obd_export *exp,
2010 struct lov_stripe_md *lsm,
2011 struct lov_oinfo *loi,
2012 struct obd_io_group *oig)
2014 struct client_obd *cli = &exp->exp_obd->u.cli;
2018 loi = &lsm->lsm_oinfo[0];
2020 spin_lock(&cli->cl_loi_list_lock);
2022 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2023 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2025 osc_check_rpcs(cli);
2026 spin_unlock(&cli->cl_loi_list_lock);
2031 static int osc_teardown_async_page(struct obd_export *exp,
2032 struct lov_stripe_md *lsm,
2033 struct lov_oinfo *loi, void *cookie)
2035 struct client_obd *cli = &exp->exp_obd->u.cli;
2036 struct loi_oap_pages *lop;
2037 struct osc_async_page *oap;
2041 oap = OAP_FROM_COOKIE(cookie);
2044 loi = &lsm->lsm_oinfo[0];
2046 if (oap->oap_cmd == OBD_BRW_WRITE) {
2047 lop = &loi->loi_write_lop;
2049 lop = &loi->loi_read_lop;
2052 spin_lock(&cli->cl_loi_list_lock);
2054 if (!list_empty(&oap->oap_rpc_item))
2055 GOTO(out, rc = -EBUSY);
2057 osc_exit_cache(cli, oap, 0);
2058 osc_wake_cache_waiters(cli);
2060 if (!list_empty(&oap->oap_urgent_item)) {
2061 list_del_init(&oap->oap_urgent_item);
2062 oap->oap_async_flags &= ~ASYNC_URGENT;
2064 if (!list_empty(&oap->oap_pending_item)) {
2065 list_del_init(&oap->oap_pending_item);
2066 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2068 loi_list_maint(cli, loi);
2070 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2072 spin_unlock(&cli->cl_loi_list_lock);
2074 OBD_FREE(oap, sizeof(*oap));
2079 /* Note: caller will lock/unlock, and set uptodate on the pages */
2080 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2081 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2082 struct lov_stripe_md *lsm, obd_count page_count,
2083 struct brw_page *pga)
2085 struct ptlrpc_request *request = NULL;
2086 struct ost_body *body;
2087 struct niobuf_remote *nioptr;
2088 struct obd_ioobj *iooptr;
2089 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2093 /* XXX does not handle 'new' brw protocol */
2095 size[1] = sizeof(struct obd_ioobj);
2096 size[2] = page_count * sizeof(*nioptr);
2098 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2099 OST_SAN_READ, 3, size, NULL);
2103 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2104 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2105 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2106 sizeof(*nioptr) * page_count);
2108 memcpy(&body->oa, oa, sizeof(body->oa));
2110 obdo_to_ioobj(oa, iooptr);
2111 iooptr->ioo_bufcnt = page_count;
2113 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2114 LASSERT(PageLocked(pga[mapped].pg));
2115 LASSERT(mapped == 0 ||
2116 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2118 nioptr->offset = pga[mapped].disk_offset;
2119 nioptr->len = pga[mapped].count;
2120 nioptr->flags = pga[mapped].flag;
2123 size[1] = page_count * sizeof(*nioptr);
2124 request->rq_replen = lustre_msg_size(2, size);
2126 rc = ptlrpc_queue_wait(request);
2130 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2131 lustre_swab_ost_body);
2133 CERROR("Can't unpack body\n");
2134 GOTO(out_req, rc = -EPROTO);
2137 memcpy(oa, &body->oa, sizeof(*oa));
2139 swab = lustre_msg_swabbed(request->rq_repmsg);
2140 LASSERT_REPSWAB(request, 1);
2141 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2143 /* nioptr missing or short */
2144 GOTO(out_req, rc = -EPROTO);
2148 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2149 struct page *page = pga[mapped].pg;
2150 struct buffer_head *bh;
2154 lustre_swab_niobuf_remote (nioptr);
2156 /* got san device associated */
2157 LASSERT(exp->exp_obd != NULL);
2158 dev = exp->exp_obd->u.cli.cl_sandev;
2161 if (!nioptr->offset) {
2162 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2163 page->mapping->host->i_ino,
2165 memset(page_address(page), 0, PAGE_SIZE);
2169 if (!page->buffers) {
2170 create_empty_buffers(page, dev, PAGE_SIZE);
2173 clear_bit(BH_New, &bh->b_state);
2174 set_bit(BH_Mapped, &bh->b_state);
2175 bh->b_blocknr = (unsigned long)nioptr->offset;
2177 clear_bit(BH_Uptodate, &bh->b_state);
2179 ll_rw_block(READ, 1, &bh);
2183 /* if buffer already existed, it must be the
2184 * one we mapped before, check it */
2185 LASSERT(!test_bit(BH_New, &bh->b_state));
2186 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2187 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2189 /* wait it's io completion */
2190 if (test_bit(BH_Lock, &bh->b_state))
2193 if (!test_bit(BH_Uptodate, &bh->b_state))
2194 ll_rw_block(READ, 1, &bh);
2198 /* must do syncronous write here */
2200 if (!buffer_uptodate(bh)) {
2208 ptlrpc_req_finished(request);
2212 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2213 struct lov_stripe_md *lsm, obd_count page_count,
2214 struct brw_page *pga)
2216 struct ptlrpc_request *request = NULL;
2217 struct ost_body *body;
2218 struct niobuf_remote *nioptr;
2219 struct obd_ioobj *iooptr;
2220 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2224 size[1] = sizeof(struct obd_ioobj);
2225 size[2] = page_count * sizeof(*nioptr);
2227 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2228 OST_SAN_WRITE, 3, size, NULL);
2232 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2233 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2234 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2235 sizeof (*nioptr) * page_count);
2237 memcpy(&body->oa, oa, sizeof(body->oa));
2239 obdo_to_ioobj(oa, iooptr);
2240 iooptr->ioo_bufcnt = page_count;
2243 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2244 LASSERT(PageLocked(pga[mapped].pg));
2245 LASSERT(mapped == 0 ||
2246 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2248 nioptr->offset = pga[mapped].disk_offset;
2249 nioptr->len = pga[mapped].count;
2250 nioptr->flags = pga[mapped].flag;
2253 size[1] = page_count * sizeof(*nioptr);
2254 request->rq_replen = lustre_msg_size(2, size);
2256 rc = ptlrpc_queue_wait(request);
2260 swab = lustre_msg_swabbed (request->rq_repmsg);
2261 LASSERT_REPSWAB (request, 1);
2262 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2264 CERROR("absent/short niobuf array\n");
2265 GOTO(out_req, rc = -EPROTO);
2269 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2270 struct page *page = pga[mapped].pg;
2271 struct buffer_head *bh;
2275 lustre_swab_niobuf_remote (nioptr);
2277 /* got san device associated */
2278 LASSERT(exp->exp_obd != NULL);
2279 dev = exp->exp_obd->u.cli.cl_sandev;
2281 if (!page->buffers) {
2282 create_empty_buffers(page, dev, PAGE_SIZE);
2285 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2286 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2287 LASSERT(page->buffers->b_blocknr ==
2288 (unsigned long)nioptr->offset);
2294 /* if buffer locked, wait it's io completion */
2295 if (test_bit(BH_Lock, &bh->b_state))
2298 clear_bit(BH_New, &bh->b_state);
2299 set_bit(BH_Mapped, &bh->b_state);
2301 /* override the block nr */
2302 bh->b_blocknr = (unsigned long)nioptr->offset;
2304 /* we are about to write it, so set it
2306 * page lock should garentee no race condition here */
2307 set_bit(BH_Uptodate, &bh->b_state);
2308 set_bit(BH_Dirty, &bh->b_state);
2310 ll_rw_block(WRITE, 1, &bh);
2312 /* must do syncronous write here */
2314 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2322 ptlrpc_req_finished(request);
2326 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2327 struct lov_stripe_md *lsm, obd_count page_count,
2328 struct brw_page *pga, struct obd_trans_info *oti)
2332 while (page_count) {
2333 obd_count pages_per_brw;
2336 if (page_count > PTLRPC_MAX_BRW_PAGES)
2337 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
2339 pages_per_brw = page_count;
2341 if (cmd & OBD_BRW_WRITE)
2342 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2344 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2349 page_count -= pages_per_brw;
2350 pga += pages_per_brw;
2357 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2359 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2362 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
2366 l_lock(&lock->l_resource->lr_namespace->ns_lock);
2368 if (lock->l_ast_data && lock->l_ast_data != data) {
2369 struct inode *new_inode = data;
2370 struct inode *old_inode = lock->l_ast_data;
2371 if (!(old_inode->i_state & I_FREEING))
2372 LDLM_ERROR(lock, "inconsistent l_ast_data found");
2373 LASSERTF(old_inode->i_state & I_FREEING,
2374 "Found existing inode %p/%lu/%u state %lu in lock: "
2375 "setting data to %p/%lu/%u\n", old_inode,
2376 old_inode->i_ino, old_inode->i_generation,
2378 new_inode, new_inode->i_ino, new_inode->i_generation);
2381 lock->l_ast_data = data;
2382 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
2383 LDLM_LOCK_PUT(lock);
2386 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2387 ldlm_iterator_t replace, void *data)
2389 struct ldlm_res_id res_id = { .name = {0} };
2390 struct obd_device *obd = class_exp2obd(exp);
2392 res_id.name[0] = lsm->lsm_object_id;
2393 res_id.name[2] = lsm->lsm_object_gr;
2394 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2398 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2399 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2400 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2401 void *data, __u32 lvb_len, void *lvb_swabber,
2402 struct lustre_handle *lockh)
2404 struct obd_device *obd = exp->exp_obd;
2405 struct ldlm_res_id res_id = { .name = {0} };
2407 struct ldlm_reply *rep;
2408 struct ptlrpc_request *req = NULL;
2412 res_id.name[0] = lsm->lsm_object_id;
2413 res_id.name[2] = lsm->lsm_object_gr;
2415 /* Filesystem lock extents are extended to page boundaries so that
2416 * dealing with the page cache is a little smoother. */
2417 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2418 policy->l_extent.end |= ~PAGE_MASK;
2420 if (lsm->lsm_oinfo->loi_kms_valid == 0)
2423 /* Next, search for already existing extent locks that will cover us */
2424 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2427 osc_set_data_with_check(lockh, data);
2428 if (*flags & LDLM_FL_HAS_INTENT) {
2429 /* I would like to be able to ASSERT here that rss <=
2430 * kms, but I can't, for reasons which are explained in
2433 /* We already have a lock, and it's referenced */
2437 /* If we're trying to read, we also search for an existing PW lock. The
2438 * VFS and page cache already protect us locally, so lots of readers/
2439 * writers can share a single PW lock.
2441 * There are problems with conversion deadlocks, so instead of
2442 * converting a read lock to a write lock, we'll just enqueue a new
2445 * At some point we should cancel the read lock instead of making them
2446 * send us a blocking callback, but there are problems with canceling
2447 * locks out from other users right now, too. */
2449 if (mode == LCK_PR) {
2450 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2451 policy, LCK_PW, lockh);
2453 /* FIXME: This is not incredibly elegant, but it might
2454 * be more elegant than adding another parameter to
2455 * lock_match. I want a second opinion. */
2456 ldlm_lock_addref(lockh, LCK_PR);
2457 ldlm_lock_decref(lockh, LCK_PW);
2458 osc_set_data_with_check(lockh, data);
2462 if (mode == LCK_PW) {
2463 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2464 policy, LCK_PR, lockh);
2466 rc = ldlm_cli_convert(lockh, mode, flags);
2468 /* Update readers/writers accounting */
2469 ldlm_lock_addref(lockh, LCK_PW);
2470 ldlm_lock_decref(lockh, LCK_PR);
2471 osc_set_data_with_check(lockh, data);
2474 /* If the conversion failed, we need to drop refcount
2475 on matched lock before we get new one */
2476 /* XXX Won't it save us some efforts if we cancel PR
2477 lock here? We are going to take PW lock anyway and it
2478 will invalidate PR lock */
2479 ldlm_lock_decref(lockh, LCK_PR);
2480 if (rc != EDEADLOCK) {
2486 if (mode == LCK_PW) {
2487 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2488 policy, LCK_PR, lockh);
2490 rc = ldlm_cli_convert(lockh, mode, flags);
2492 /* Update readers/writers accounting */
2493 ldlm_lock_addref(lockh, LCK_PW);
2494 ldlm_lock_decref(lockh, LCK_PR);
2495 osc_set_data_with_check(lockh, data);
2498 /* If the conversion failed, we need to drop refcount
2499 on matched lock before we get new one */
2500 /* XXX Won't it save us some efforts if we cancel PR
2501 lock here? We are going to take PW lock anyway and it
2502 will invalidate PR lock */
2503 ldlm_lock_decref(lockh, LCK_PR);
2504 if (rc != EDEADLOCK) {
2511 if (*flags & LDLM_FL_HAS_INTENT) {
2512 int size[2] = {0, sizeof(struct ldlm_request)};
2514 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
2515 LDLM_ENQUEUE, 2, size, NULL);
2519 size[0] = sizeof(*rep);
2520 size[1] = sizeof(lvb);
2521 req->rq_replen = lustre_msg_size(2, size);
2523 rc = ldlm_cli_enqueue(exp, req, obd->obd_namespace, res_id, type,
2524 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2525 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2527 if (rc == ELDLM_LOCK_ABORTED) {
2528 /* swabbed by ldlm_cli_enqueue() */
2529 LASSERT_REPSWABBED(req, 0);
2530 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
2531 LASSERT(rep != NULL);
2532 if (rep->lock_policy_res1)
2533 rc = rep->lock_policy_res1;
2535 ptlrpc_req_finished(req);
2538 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
2539 CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
2540 lvb.lvb_size, lvb.lvb_blocks);
2541 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2542 lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
2548 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2549 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2550 int *flags, void *data, struct lustre_handle *lockh)
2552 struct ldlm_res_id res_id = { .name = {0} };
2553 struct obd_device *obd = exp->exp_obd;
2557 res_id.name[0] = lsm->lsm_object_id;
2558 res_id.name[2] = lsm->lsm_object_gr;
2560 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2562 /* Filesystem lock extents are extended to page boundaries so that
2563 * dealing with the page cache is a little smoother */
2564 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2565 policy->l_extent.end |= ~PAGE_MASK;
2567 /* Next, search for already existing extent locks that will cover us */
2568 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2569 policy, mode, lockh);
2571 // if (!(*flags & LDLM_FL_TEST_LOCK))
2572 osc_set_data_with_check(lockh, data);
2575 /* If we're trying to read, we also search for an existing PW lock. The
2576 * VFS and page cache already protect us locally, so lots of readers/
2577 * writers can share a single PW lock. */
2578 if (mode == LCK_PR) {
2579 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2580 policy, LCK_PW, lockh);
2581 if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
2582 /* FIXME: This is not incredibly elegant, but it might
2583 * be more elegant than adding another parameter to
2584 * lock_match. I want a second opinion. */
2585 osc_set_data_with_check(lockh, data);
2586 ldlm_lock_addref(lockh, LCK_PR);
2587 ldlm_lock_decref(lockh, LCK_PW);
2593 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2594 __u32 mode, struct lustre_handle *lockh)
2598 if (mode == LCK_GROUP)
2599 ldlm_lock_decref_and_cancel(lockh, mode);
2601 ldlm_lock_decref(lockh, mode);
2606 static int osc_cancel_unused(struct obd_export *exp,
2607 struct lov_stripe_md *lsm, int flags, void *opaque)
2609 struct obd_device *obd = class_exp2obd(exp);
2610 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
2613 res_id.name[0] = lsm->lsm_object_id;
2614 res_id.name[2] = lsm->lsm_object_gr;
2618 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
2621 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2622 unsigned long max_age)
2624 struct obd_statfs *msfs;
2625 struct ptlrpc_request *request;
2626 int rc, size = sizeof(*osfs);
2629 /* We could possibly pass max_age in the request (as an absolute
2630 * timestamp or a "seconds.usec ago") so the target can avoid doing
2631 * extra calls into the filesystem if that isn't necessary (e.g.
2632 * during mount that would help a bit). Having relative timestamps
2633 * is not so great if request processing is slow, while absolute
2634 * timestamps are not ideal because they need time synchronization. */
2635 request = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
2636 OST_STATFS, 0, NULL, NULL);
2640 request->rq_replen = lustre_msg_size(1, &size);
2641 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2643 rc = ptlrpc_queue_wait(request);
2647 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2648 lustre_swab_obd_statfs);
2650 CERROR("Can't unpack obd_statfs\n");
2651 GOTO(out, rc = -EPROTO);
2654 memcpy(osfs, msfs, sizeof(*osfs));
2658 ptlrpc_req_finished(request);
2662 /* Retrieve object striping information.
2664 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2665 * the maximum number of OST indices which will fit in the user buffer.
2666 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2668 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2670 struct lov_user_md lum, *lumk;
2677 rc = copy_from_user(&lum, lump, sizeof(lum));
2681 if (lum.lmm_magic != LOV_USER_MAGIC)
2684 if (lum.lmm_stripe_count > 0) {
2685 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2686 OBD_ALLOC(lumk, lum_size);
2690 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2691 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
2693 lum_size = sizeof(lum);
2697 lumk->lmm_object_id = lsm->lsm_object_id;
2698 lumk->lmm_object_gr = lsm->lsm_object_gr;
2699 lumk->lmm_stripe_count = 1;
2701 if (copy_to_user(lump, lumk, lum_size))
2705 OBD_FREE(lumk, lum_size);
2710 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2711 void *karg, void *uarg)
2713 struct obd_device *obd = exp->exp_obd;
2714 struct obd_ioctl_data *data = karg;
2718 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2721 if (!try_module_get(THIS_MODULE)) {
2722 CERROR("Can't get module. Is it alive?");
2727 case OBD_IOC_LOV_GET_CONFIG: {
2729 struct lov_desc *desc;
2730 struct obd_uuid uuid;
2734 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2735 GOTO(out, err = -EINVAL);
2737 data = (struct obd_ioctl_data *)buf;
2739 if (sizeof(*desc) > data->ioc_inllen1) {
2741 GOTO(out, err = -EINVAL);
2744 if (data->ioc_inllen2 < sizeof(uuid)) {
2746 GOTO(out, err = -EINVAL);
2749 if (data->ioc_inllen3 < sizeof(__u32)) {
2751 GOTO(out, err = -EINVAL);
2754 desc = (struct lov_desc *)data->ioc_inlbuf1;
2755 desc->ld_tgt_count = 1;
2756 desc->ld_active_tgt_count = 1;
2757 desc->ld_default_stripe_count = 1;
2758 desc->ld_default_stripe_size = 0;
2759 desc->ld_default_stripe_offset = 0;
2760 desc->ld_pattern = 0;
2761 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2762 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2763 *((__u32 *)data->ioc_inlbuf3) = 1;
2765 err = copy_to_user((void *)uarg, buf, len);
2768 obd_ioctl_freedata(buf, len);
2771 case LL_IOC_LOV_SETSTRIPE:
2772 err = obd_alloc_memmd(exp, karg);
2776 case LL_IOC_LOV_GETSTRIPE:
2777 err = osc_getstripe(karg, uarg);
2779 case OBD_IOC_CLIENT_RECOVER:
2780 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2785 case IOC_OSC_SET_ACTIVE:
2786 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2789 case IOC_OSC_CTL_RECOVERY:
2790 err = ptlrpc_import_control_recovery(obd->u.cli.cl_import,
2794 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2795 GOTO(out, err = -ENOTTY);
2798 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2801 module_put(THIS_MODULE);
2806 static int osc_get_info(struct obd_export *exp, obd_count keylen,
2807 void *key, __u32 *vallen, void *val)
2810 if (!vallen || !val)
2813 if (keylen > strlen("lock_to_stripe") &&
2814 strcmp(key, "lock_to_stripe") == 0) {
2815 __u32 *stripe = val;
2816 *vallen = sizeof(*stripe);
2819 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2820 struct ptlrpc_request *req;
2822 char *bufs[1] = {key};
2824 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2825 OST_GET_INFO, 1, (int *)&keylen, bufs);
2829 req->rq_replen = lustre_msg_size(1, (int *)vallen);
2830 rc = ptlrpc_queue_wait(req);
2834 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2835 lustre_swab_ost_last_id);
2836 if (reply == NULL) {
2837 CERROR("Can't unpack OST last ID\n");
2838 GOTO(out, rc = -EPROTO);
2840 *((obd_id *)val) = *reply;
2842 ptlrpc_req_finished(req);
2848 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2849 void *key, obd_count vallen, void *val)
2851 struct obd_device *obd = exp->exp_obd;
2852 struct obd_import *imp = class_exp2cliimp(exp);
2853 struct llog_ctxt *ctxt;
2857 if (keylen == strlen("next_id") &&
2858 memcmp(key, "next_id", strlen("next_id")) == 0) {
2859 if (vallen != sizeof(obd_id))
2861 obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1;
2862 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
2863 exp->exp_obd->obd_name,
2864 obd->u.cli.cl_oscc.oscc_next_id);
2869 if (keylen == strlen("growth_count") &&
2870 memcmp(key, "growth_count", strlen("growth_count")) == 0) {
2871 if (vallen != sizeof(int))
2873 obd->u.cli.cl_oscc.oscc_max_grow_count = *((int*)val);
2877 if (keylen == strlen("unlinked") &&
2878 memcmp(key, "unlinked", keylen) == 0) {
2879 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2880 spin_lock(&oscc->oscc_lock);
2881 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2882 spin_unlock(&oscc->oscc_lock);
2885 if (keylen == strlen("unrecovery") &&
2886 memcmp(key, "unrecovery", keylen) == 0) {
2887 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2888 spin_lock(&oscc->oscc_lock);
2889 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
2890 spin_unlock(&oscc->oscc_lock);
2893 if (keylen == strlen("initial_recov") &&
2894 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
2895 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2896 if (vallen != sizeof(int))
2898 imp->imp_initial_recov = *(int *)val;
2899 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
2900 exp->exp_obd->obd_name,
2901 imp->imp_initial_recov);
2905 if (keylen == strlen("async") && memcmp(key, "async", keylen) == 0) {
2906 struct client_obd *cl = &obd->u.cli;
2907 if (vallen != sizeof(int))
2909 cl->cl_async = *(int *)val;
2910 CDEBUG(D_HA, "%s: set async = %d\n",
2911 obd->obd_name, cl->cl_async);
2915 if (keylen == strlen("sec") && memcmp(key, "sec", keylen) == 0) {
2916 struct client_obd *cli = &exp->exp_obd->u.cli;
2918 if (vallen == strlen("null") &&
2919 memcmp(val, "null", vallen) == 0) {
2920 cli->cl_sec_flavor = PTLRPC_SEC_NULL;
2921 cli->cl_sec_subflavor = 0;
2924 if (vallen == strlen("krb5i") &&
2925 memcmp(val, "krb5i", vallen) == 0) {
2926 cli->cl_sec_flavor = PTLRPC_SEC_GSS;
2927 cli->cl_sec_subflavor = PTLRPC_SEC_GSS_KRB5I;
2930 if (vallen == strlen("krb5p") &&
2931 memcmp(val, "krb5p", vallen) == 0) {
2932 cli->cl_sec_flavor = PTLRPC_SEC_GSS;
2933 cli->cl_sec_subflavor = PTLRPC_SEC_GSS_KRB5P;
2936 CERROR("unrecognized security type %s\n", (char*) val);
2940 if (keylen < strlen("mds_conn") || memcmp(key, "mds_conn", keylen) != 0)
2943 ctxt = llog_get_context(&exp->exp_obd->obd_llogs, LLOG_UNLINK_ORIG_CTXT);
2946 rc = llog_initiator_connect(ctxt);
2948 CERROR("cannot establish the connect for ctxt %p: %d\n",
2952 imp->imp_server_timeout = 1;
2953 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
2954 imp->imp_pingable = 1;
2960 static struct llog_operations osc_size_repl_logops = {
2961 lop_cancel: llog_obd_repl_cancel
2964 static struct llog_operations osc_unlink_orig_logops;
2965 static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
2966 struct obd_device *tgt, int count,
2967 struct llog_catid *catid)
2972 osc_unlink_orig_logops = llog_lvfs_ops;
2973 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
2974 osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
2975 osc_unlink_orig_logops.lop_add = llog_catalog_add;
2976 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
2978 rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
2979 &catid->lci_logid, &osc_unlink_orig_logops);
2983 rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
2984 &osc_size_repl_logops);
2988 static int osc_llog_finish(struct obd_device *obd,
2989 struct obd_llogs *llogs, int count)
2994 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
2998 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
3003 static int osc_connect(struct lustre_handle *exph,
3004 struct obd_device *obd, struct obd_uuid *cluuid,
3005 struct obd_connect_data *data,
3006 unsigned long connect_flags)
3010 rc = client_connect_import(exph, obd, cluuid, data, connect_flags);
3014 static int osc_disconnect(struct obd_export *exp, unsigned long flags)
3016 struct obd_device *obd = class_exp2obd(exp);
3017 struct llog_ctxt *ctxt;
3021 ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
3022 if (obd->u.cli.cl_conn_count == 1)
3023 /* flush any remaining cancel messages out to the target */
3024 llog_sync(ctxt, exp);
3026 rc = client_disconnect_export(exp, flags);
3030 static int osc_import_event(struct obd_device *obd,
3031 struct obd_import *imp,
3032 enum obd_import_event event)
3034 struct client_obd *cli;
3037 LASSERT(imp->imp_obd == obd);
3040 case IMP_EVENT_DISCON: {
3041 /* Only do this on the MDS OSC's */
3042 if (imp->imp_server_timeout) {
3043 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3045 spin_lock(&oscc->oscc_lock);
3046 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3047 spin_unlock(&oscc->oscc_lock);
3051 case IMP_EVENT_INACTIVE: {
3052 if (obd->obd_observer)
3053 rc = obd_notify(obd->obd_observer, obd, 0, 0);
3056 case IMP_EVENT_INVALIDATE: {
3057 struct ldlm_namespace *ns = obd->obd_namespace;
3061 spin_lock(&cli->cl_loi_list_lock);
3062 cli->cl_avail_grant = 0;
3063 cli->cl_lost_grant = 0;
3064 /* all pages go to failing rpcs due to the invalid import */
3065 osc_check_rpcs(cli);
3066 spin_unlock(&cli->cl_loi_list_lock);
3068 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3072 case IMP_EVENT_ACTIVE: {
3073 /* Only do this on the MDS OSC's */
3074 if (imp->imp_server_timeout) {
3075 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3077 spin_lock(&oscc->oscc_lock);
3078 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3079 spin_unlock(&oscc->oscc_lock);
3082 if (obd->obd_observer)
3083 rc = obd_notify(obd->obd_observer, obd, 1, 0);
3087 CERROR("Unknown import event %d\n", event);
3093 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
3095 struct lprocfs_static_vars lvars;
3099 lprocfs_init_vars(osc,&lvars);
3100 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3104 rc = lproc_osc_attach_seqstat(dev);
3106 lprocfs_obd_detach(dev);
3110 ptlrpc_lprocfs_register_obd(dev);
3114 static int osc_detach(struct obd_device *dev)
3116 ptlrpc_lprocfs_unregister_obd(dev);
3117 return lprocfs_obd_detach(dev);
3120 static int osc_setup(struct obd_device *obd, obd_count len, void *buf)
3124 rc = ptlrpcd_addref();
3128 rc = client_obd_setup(obd, len, buf);
3137 static int osc_cleanup(struct obd_device *obd, int flags)
3139 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3142 rc = ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
3143 LDLM_FL_CONFIG_CHANGE, NULL);
3147 spin_lock(&oscc->oscc_lock);
3148 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
3149 oscc->oscc_flags |= OSCC_FLAG_EXITING;
3150 spin_unlock(&oscc->oscc_lock);
3152 rc = client_obd_cleanup(obd, flags);
3157 struct obd_ops osc_obd_ops = {
3158 .o_owner = THIS_MODULE,
3159 .o_attach = osc_attach,
3160 .o_detach = osc_detach,
3161 .o_setup = osc_setup,
3162 .o_cleanup = osc_cleanup,
3163 .o_add_conn = client_import_add_conn,
3164 .o_del_conn = client_import_del_conn,
3165 .o_connect = osc_connect,
3166 .o_disconnect = osc_disconnect,
3167 .o_statfs = osc_statfs,
3168 .o_packmd = osc_packmd,
3169 .o_unpackmd = osc_unpackmd,
3170 .o_create = osc_create,
3171 .o_destroy = osc_destroy,
3172 .o_getattr = osc_getattr,
3173 .o_getattr_async = osc_getattr_async,
3174 .o_setattr = osc_setattr,
3176 .o_brw_async = osc_brw_async,
3177 .o_prep_async_page = osc_prep_async_page,
3178 .o_queue_async_io = osc_queue_async_io,
3179 .o_set_async_flags = osc_set_async_flags,
3180 .o_queue_group_io = osc_queue_group_io,
3181 .o_trigger_group_io = osc_trigger_group_io,
3182 .o_teardown_async_page = osc_teardown_async_page,
3183 .o_punch = osc_punch,
3185 .o_enqueue = osc_enqueue,
3186 .o_match = osc_match,
3187 .o_change_cbdata = osc_change_cbdata,
3188 .o_cancel = osc_cancel,
3189 .o_cancel_unused = osc_cancel_unused,
3190 .o_iocontrol = osc_iocontrol,
3191 .o_get_info = osc_get_info,
3192 .o_set_info = osc_set_info,
3193 .o_import_event = osc_import_event,
3194 .o_llog_init = osc_llog_init,
3195 .o_llog_finish = osc_llog_finish,
3198 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3199 struct obd_ops sanosc_obd_ops = {
3200 .o_owner = THIS_MODULE,
3201 .o_attach = osc_attach,
3202 .o_detach = osc_detach,
3203 .o_cleanup = client_obd_cleanup,
3204 .o_add_conn = client_import_add_conn,
3205 .o_del_conn = client_import_del_conn,
3206 .o_connect = osc_connect,
3207 .o_disconnect = client_disconnect_export,
3208 .o_statfs = osc_statfs,
3209 .o_packmd = osc_packmd,
3210 .o_unpackmd = osc_unpackmd,
3211 .o_create = osc_real_create,
3212 .o_destroy = osc_destroy,
3213 .o_getattr = osc_getattr,
3214 .o_getattr_async = osc_getattr_async,
3215 .o_setattr = osc_setattr,
3216 .o_setup = client_sanobd_setup,
3217 .o_brw = sanosc_brw,
3218 .o_punch = osc_punch,
3220 .o_enqueue = osc_enqueue,
3221 .o_match = osc_match,
3222 .o_change_cbdata = osc_change_cbdata,
3223 .o_cancel = osc_cancel,
3224 .o_cancel_unused = osc_cancel_unused,
3225 .o_iocontrol = osc_iocontrol,
3226 .o_import_event = osc_import_event,
3227 .o_llog_init = osc_llog_init,
3228 .o_llog_finish = osc_llog_finish,
3232 int __init osc_init(void)
3234 struct lprocfs_static_vars lvars;
3235 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3236 struct lprocfs_static_vars sanlvars;
3241 lprocfs_init_vars(osc, &lvars);
3242 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3243 lprocfs_init_vars(osc, &sanlvars);
3246 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
3251 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3252 rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
3253 LUSTRE_SANOSC_NAME);
3255 class_unregister_type(LUSTRE_OSC_NAME);
3262 static void /*__exit*/ osc_exit(void)
3264 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3265 class_unregister_type(LUSTRE_SANOSC_NAME);
3267 class_unregister_type(LUSTRE_OSC_NAME);
3270 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3271 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3272 MODULE_LICENSE("GPL");
3274 module_init(osc_init);
3275 module_exit(osc_exit);