1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/ctype.h>
40 # include <linux/init.h>
41 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/workqueue.h>
43 # include <linux/smp_lock.h>
45 # include <linux/locks.h>
47 #else /* __KERNEL__ */
48 # include <liblustre.h>
51 #include <linux/lustre_dlm.h>
52 #include <libcfs/kp30.h>
53 #include <linux/lustre_net.h>
54 #include <linux/lustre_sec.h>
55 #include <lustre/lustre_user.h>
56 #include <linux/obd_ost.h>
57 #include <linux/obd_lov.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include "osc_internal.h"
68 /* Pack OSC object metadata for disk storage (LE byte order). */
69 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
70 struct lov_stripe_md *lsm)
75 lmm_size = sizeof(**lmmp);
80 OBD_FREE(*lmmp, lmm_size);
86 OBD_ALLOC(*lmmp, lmm_size);
92 LASSERT(lsm->lsm_object_id);
93 LASSERT(lsm->lsm_object_gr);
94 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
95 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
101 /* Unpack OSC object metadata from disk storage (LE byte order). */
102 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
103 struct lov_mds_md *lmm, int lmm_bytes)
109 if (lmm_bytes < sizeof (*lmm)) {
110 CERROR("lov_mds_md too small: %d, need %d\n",
111 lmm_bytes, (int)sizeof(*lmm));
114 /* XXX LOV_MAGIC etc check? */
116 if (lmm->lmm_object_id == 0) {
117 CERROR("lov_mds_md: zero lmm_object_id\n");
122 lsm_size = lov_stripe_md_size(1);
126 if (*lsmp != NULL && lmm == NULL) {
127 OBD_FREE(*lsmp, lsm_size);
133 OBD_ALLOC(*lsmp, lsm_size);
136 loi_init((*lsmp)->lsm_oinfo);
140 /* XXX zero *lsmp? */
141 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
142 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
143 LASSERT((*lsmp)->lsm_object_id);
144 LASSERT((*lsmp)->lsm_object_gr);
147 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
152 static int osc_getattr_interpret(struct ptlrpc_request *req,
153 struct osc_getattr_async_args *aa, int rc)
155 struct ost_body *body;
161 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
163 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
164 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
166 /* This should really be sent by the OST */
167 aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
168 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
170 CERROR("can't unpack ost_body\n");
172 aa->aa_oa->o_valid = 0;
178 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
179 struct lov_stripe_md *md,
180 struct ptlrpc_request_set *set)
182 struct ptlrpc_request *request;
183 struct ost_body *body;
184 int size = sizeof(*body);
185 struct osc_getattr_async_args *aa;
188 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
189 OST_GETATTR, 1, &size, NULL);
193 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
194 memcpy(&body->oa, oa, sizeof(*oa));
196 request->rq_replen = lustre_msg_size(1, &size);
197 request->rq_interpret_reply = osc_getattr_interpret;
199 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
200 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
203 ptlrpc_set_add_req (set, request);
207 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
208 struct lov_stripe_md *md)
210 struct ptlrpc_request *request;
211 struct ost_body *body;
212 int rc, size = sizeof(*body);
215 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
216 OST_GETATTR, 1, &size, NULL);
220 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
221 memcpy(&body->oa, oa, sizeof(*oa));
223 request->rq_replen = lustre_msg_size(1, &size);
225 rc = ptlrpc_queue_wait(request);
227 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
231 body = lustre_swab_repbuf(request, 0, sizeof (*body),
232 lustre_swab_ost_body);
234 CERROR ("can't unpack ost_body\n");
235 GOTO (out, rc = -EPROTO);
238 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
239 memcpy(oa, &body->oa, sizeof(*oa));
241 /* This should really be sent by the OST */
242 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
243 oa->o_valid |= OBD_MD_FLBLKSZ;
247 ptlrpc_req_finished(request);
251 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
252 struct lov_stripe_md *md, struct obd_trans_info *oti)
254 struct ptlrpc_request *request;
255 struct ost_body *body;
256 int rc, size = sizeof(*body);
259 LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
261 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
262 OST_SETATTR, 1, &size, NULL);
266 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
267 memcpy(&body->oa, oa, sizeof(*oa));
269 request->rq_replen = lustre_msg_size(1, &size);
271 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
272 ptlrpcd_add_req(request);
275 rc = ptlrpc_queue_wait(request);
279 body = lustre_swab_repbuf(request, 0, sizeof(*body),
280 lustre_swab_ost_body);
282 GOTO(out, rc = -EPROTO);
284 memcpy(oa, &body->oa, sizeof(*oa));
288 ptlrpc_req_finished(request);
292 int osc_real_create(struct obd_export *exp, struct obdo *oa,
293 struct lov_stripe_md **ea, struct obd_trans_info *oti)
295 struct osc_creator *oscc = &exp->exp_obd->u.cli.cl_oscc;
296 struct ptlrpc_request *request;
297 struct ost_body *body;
298 struct lov_stripe_md *lsm;
299 int rc, size = sizeof(*body);
307 rc = obd_alloc_memmd(exp, &lsm);
312 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
313 OST_CREATE, 1, &size, NULL);
315 GOTO(out, rc = -ENOMEM);
317 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
318 memcpy(&body->oa, oa, sizeof(body->oa));
320 request->rq_replen = lustre_msg_size(1, &size);
321 if (oa->o_valid & OBD_MD_FLINLINE) {
322 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
323 oa->o_flags == OBD_FL_DELORPHAN);
324 DEBUG_REQ(D_HA, request,
325 "delorphan from OST integration");
326 /* Don't resend the delorphan request */
327 request->rq_no_resend = request->rq_no_delay = 1;
330 rc = ptlrpc_queue_wait(request);
334 body = lustre_swab_repbuf(request, 0, sizeof(*body),
335 lustre_swab_ost_body);
337 CERROR ("can't unpack ost_body\n");
338 GOTO (out_req, rc = -EPROTO);
341 if ((oa->o_valid & OBD_MD_FLFLAGS) && oa->o_flags == OBD_FL_DELORPHAN) {
342 struct obd_import *imp = class_exp2cliimp(exp);
343 /* MDS declares last known object, OSS responses
344 * with next possible object -bzzz */
345 spin_lock(&oscc->oscc_lock);
346 oscc->oscc_next_id = body->oa.o_id;
347 spin_unlock(&oscc->oscc_lock);
348 CDEBUG(D_HA, "%s: set nextid "LPD64" after recovery\n",
349 imp->imp_target_uuid.uuid, oa->o_id);
351 memcpy(oa, &body->oa, sizeof(*oa));
353 /* This should really be sent by the OST */
354 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
355 oa->o_valid |= OBD_MD_FLBLKSZ;
357 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
358 * have valid lsm_oinfo data structs, so don't go touching that.
359 * This needs to be fixed in a big way.
361 lsm->lsm_object_id = oa->o_id;
362 lsm->lsm_object_gr = oa->o_gr;
366 oti->oti_transno = request->rq_repmsg->transno;
368 if (oa->o_valid & OBD_MD_FLCOOKIE) {
369 if (!oti->oti_logcookies)
370 oti_alloc_cookies(oti, 1);
371 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
372 sizeof(oti->oti_onecookie));
376 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
379 ptlrpc_req_finished(request);
382 obd_free_memmd(exp, &lsm);
386 static int osc_punch(struct obd_export *exp, struct obdo *oa,
387 struct lov_stripe_md *md, obd_size start,
388 obd_size end, struct obd_trans_info *oti)
390 struct ptlrpc_request *request;
391 struct ost_body *body;
392 int rc, size = sizeof(*body);
400 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
401 OST_PUNCH, 1, &size, NULL);
405 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
406 memcpy(&body->oa, oa, sizeof(*oa));
408 /* overload the size and blocks fields in the oa with start/end */
409 body->oa.o_size = start;
410 body->oa.o_blocks = end;
411 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
413 request->rq_replen = lustre_msg_size(1, &size);
415 rc = ptlrpc_queue_wait(request);
419 body = lustre_swab_repbuf (request, 0, sizeof (*body),
420 lustre_swab_ost_body);
422 CERROR ("can't unpack ost_body\n");
423 GOTO (out, rc = -EPROTO);
426 memcpy(oa, &body->oa, sizeof(*oa));
430 ptlrpc_req_finished(request);
434 static int osc_sync(struct obd_export *exp, struct obdo *oa,
435 struct lov_stripe_md *md, obd_size start,
438 struct ptlrpc_request *request;
439 struct ost_body *body;
440 int rc, size = sizeof(*body);
448 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
449 OST_SYNC, 1, &size, NULL);
453 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
454 memcpy(&body->oa, oa, sizeof(*oa));
456 /* overload the size and blocks fields in the oa with start/end */
457 body->oa.o_size = start;
458 body->oa.o_blocks = end;
459 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
461 request->rq_replen = lustre_msg_size(1, &size);
463 rc = ptlrpc_queue_wait(request);
467 body = lustre_swab_repbuf(request, 0, sizeof(*body),
468 lustre_swab_ost_body);
470 CERROR ("can't unpack ost_body\n");
471 GOTO (out, rc = -EPROTO);
474 memcpy(oa, &body->oa, sizeof(*oa));
478 ptlrpc_req_finished(request);
482 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
483 struct lov_stripe_md *ea, struct obd_trans_info *oti)
485 struct ptlrpc_request *request;
486 struct ost_body *body;
487 int rc, size = sizeof(*body);
495 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
496 OST_DESTROY, 1, &size, NULL);
500 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
502 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
503 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
504 sizeof(*oti->oti_logcookies));
505 oti->oti_logcookies++;
508 memcpy(&body->oa, oa, sizeof(*oa));
509 request->rq_replen = lustre_msg_size(1, &size);
511 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
512 ptlrpcd_add_req(request);
515 rc = ptlrpc_queue_wait(request);
521 ptlrpc_req_finished(request);
525 body = lustre_swab_repbuf(request, 0, sizeof(*body),
526 lustre_swab_ost_body);
528 CERROR ("Can't unpack body\n");
529 ptlrpc_req_finished(request);
533 memcpy(oa, &body->oa, sizeof(*oa));
534 ptlrpc_req_finished(request);
539 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
542 obd_valid bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
544 LASSERT(!(oa->o_valid & bits));
547 spin_lock(&cli->cl_loi_list_lock);
548 oa->o_dirty = cli->cl_dirty;
549 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
550 oa->o_grant = cli->cl_avail_grant;
551 oa->o_dropped = cli->cl_lost_grant;
552 cli->cl_lost_grant = 0;
553 spin_unlock(&cli->cl_loi_list_lock);
554 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
555 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
558 /* caller must hold loi_list_lock */
559 static void osc_consume_write_grant(struct client_obd *cli,
560 struct osc_async_page *oap)
562 cli->cl_dirty += PAGE_SIZE;
563 cli->cl_avail_grant -= PAGE_SIZE;
564 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
565 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
566 LASSERT(cli->cl_avail_grant >= 0);
569 static unsigned long rpcs_in_flight(struct client_obd *cli)
571 return cli->cl_r_in_flight + cli->cl_w_in_flight;
574 /* caller must hold loi_list_lock */
575 void osc_wake_cache_waiters(struct client_obd *cli)
577 struct list_head *l, *tmp;
578 struct osc_cache_waiter *ocw;
580 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
581 /* if we can't dirty more, we must wait until some is written */
582 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
583 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
584 cli->cl_dirty, cli->cl_dirty_max);
588 /* if still dirty cache but no grant wait for pending RPCs that
589 * may yet return us some grant before doing sync writes */
590 if (cli->cl_w_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
591 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
592 cli->cl_w_in_flight);
594 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
595 list_del_init(&ocw->ocw_entry);
596 if (cli->cl_avail_grant < PAGE_SIZE) {
597 /* no more RPCs in flight to return grant, do sync IO */
598 ocw->ocw_rc = -EDQUOT;
599 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
601 osc_consume_write_grant(cli, ocw->ocw_oap);
604 wake_up(&ocw->ocw_waitq);
610 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
612 spin_lock(&cli->cl_loi_list_lock);
613 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
614 cli->cl_avail_grant += body->oa.o_grant;
615 /* waiters are woken in brw_interpret_oap */
616 spin_unlock(&cli->cl_loi_list_lock);
619 /* We assume that the reason this OSC got a short read is because it read
620 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
621 * via the LOV, and it _knows_ it's reading inside the file, it's just that
622 * this stripe never got written at or beyond this stripe offset yet. */
623 static void handle_short_read(int nob_read, obd_count page_count,
624 struct brw_page *pga)
628 /* skip bytes read OK */
629 while (nob_read > 0) {
630 LASSERT (page_count > 0);
632 if (pga->count > nob_read) {
633 /* EOF inside this page */
634 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
635 memset(ptr + nob_read, 0, pga->count - nob_read);
642 nob_read -= pga->count;
647 /* zero remaining pages */
648 while (page_count-- > 0) {
649 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
650 memset(ptr, 0, pga->count);
656 static int check_write_rcs(struct ptlrpc_request *request,
657 int requested_nob, int niocount,
658 obd_count page_count, struct brw_page *pga)
662 /* return error if any niobuf was in error */
663 remote_rcs = lustre_swab_repbuf(request, 1,
664 sizeof(*remote_rcs) * niocount, NULL);
665 if (remote_rcs == NULL) {
666 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
669 if (lustre_msg_swabbed(request->rq_repmsg))
670 for (i = 0; i < niocount; i++)
671 __swab32s((__u32 *)&remote_rcs[i]);
673 for (i = 0; i < niocount; i++) {
674 if (remote_rcs[i] < 0)
675 return(remote_rcs[i]);
677 if (remote_rcs[i] != 0) {
678 CERROR("rc[%d] invalid (%d) req %p\n",
679 i, remote_rcs[i], request);
684 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
685 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
686 requested_nob, request->rq_bulk->bd_nob_transferred);
693 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
695 if (p1->flag != p2->flag) {
696 unsigned mask = ~OBD_BRW_FROM_GRANT;
698 /* warn if we try to combine flags that we don't know to be
700 if ((p1->flag & mask) != (p2->flag & mask))
701 CERROR("is it ok to have flags 0x%x and 0x%x in the "
702 "same brw?\n", p1->flag, p2->flag);
706 return (p1->disk_offset + p1->count == p2->disk_offset);
710 static obd_count cksum_pages(int nob, obd_count page_count,
711 struct brw_page *pga)
717 LASSERT (page_count > 0);
720 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
721 pga->count > nob ? nob : pga->count);
733 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
734 struct lov_stripe_md *lsm, obd_count page_count,
735 struct brw_page *pga, int *requested_nobp,
736 int *niocountp, struct ptlrpc_request **reqp)
738 struct ptlrpc_request *req;
739 struct ptlrpc_bulk_desc *desc;
740 struct client_obd *cli = &imp->imp_obd->u.cli;
741 struct ost_body *body;
742 struct obd_ioobj *ioobj;
743 struct niobuf_remote *niobuf;
751 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
753 for (niocount = i = 1; i < page_count; i++)
754 if (!can_merge_pages(&pga[i - 1], &pga[i]))
757 size[0] = sizeof(*body);
758 size[1] = sizeof(*ioobj);
759 size[2] = niocount * sizeof(*niobuf);
761 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, opc, 3, size, NULL);
765 if (opc == OST_WRITE)
766 desc = ptlrpc_prep_bulk_imp (req, page_count,
767 BULK_GET_SOURCE, OST_BULK_PORTAL);
769 desc = ptlrpc_prep_bulk_imp (req, page_count,
770 BULK_PUT_SINK, OST_BULK_PORTAL);
772 GOTO(out, rc = -ENOMEM);
773 /* NB request now owns desc and will free it when it gets freed */
775 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
776 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
777 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
779 memcpy(&body->oa, oa, sizeof(*oa));
781 obdo_to_ioobj(oa, ioobj);
782 ioobj->ioo_bufcnt = niocount;
784 LASSERT (page_count > 0);
786 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
787 struct brw_page *pg = &pga[i];
788 struct brw_page *pg_prev = pg - 1;
790 LASSERT(pg->count > 0);
791 LASSERTF((pg->page_offset & ~PAGE_MASK)+ pg->count <= PAGE_SIZE,
792 "i: %d pg: %p pg_off: "LPU64", count: %u\n", i, pg,
793 pg->page_offset, pg->count);
794 LASSERTF(i == 0 || pg->disk_offset > pg_prev->disk_offset,
795 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
796 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
798 pg->pg, pg->pg->private, pg->pg->index, pg->disk_offset,
799 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
800 pg_prev->disk_offset);
802 ptlrpc_prep_bulk_page(desc, pg->pg,
803 pg->page_offset & ~PAGE_MASK, pg->count);
804 requested_nob += pg->count;
806 if (i > 0 && can_merge_pages(pg_prev, pg)) {
808 niobuf->len += pg->count;
810 niobuf->offset = pg->disk_offset;
811 niobuf->len = pg->count;
812 niobuf->flags = pg->flag;
816 LASSERT((void *)(niobuf - niocount) ==
817 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
818 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
820 /* size[0] still sizeof (*body) */
821 if (opc == OST_WRITE) {
823 body->oa.o_valid |= OBD_MD_FLCKSUM;
824 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
826 /* 1 RC per niobuf */
827 size[1] = sizeof(__u32) * niocount;
828 req->rq_replen = lustre_msg_size(2, size);
830 /* 1 RC for the whole I/O */
831 req->rq_replen = lustre_msg_size(1, size);
834 *niocountp = niocount;
835 *requested_nobp = requested_nob;
840 ptlrpc_req_finished (req);
844 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
845 int requested_nob, int niocount,
846 obd_count page_count, struct brw_page *pga,
849 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
850 struct ost_body *body;
856 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
858 CERROR ("Can't unpack body\n");
862 osc_update_grant(cli, body);
863 memcpy(oa, &body->oa, sizeof(*oa));
865 if (req->rq_reqmsg->opc == OST_WRITE) {
867 CERROR ("Unexpected +ve rc %d\n", rc);
870 LASSERT (req->rq_bulk->bd_nob == requested_nob);
872 RETURN(check_write_rcs(req, requested_nob, niocount,
876 if (rc > requested_nob) {
877 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
881 if (rc != req->rq_bulk->bd_nob_transferred) {
882 CERROR ("Unexpected rc %d (%d transferred)\n",
883 rc, req->rq_bulk->bd_nob_transferred);
887 if (rc < requested_nob)
888 handle_short_read(rc, page_count, pga);
891 if (oa->o_valid & OBD_MD_FLCKSUM) {
892 const struct ptlrpc_peer *peer =
893 &req->rq_import->imp_connection->c_peer;
894 static int cksum_counter;
895 obd_count server_cksum = oa->o_cksum;
896 obd_count cksum = cksum_pages(rc, page_count, pga);
897 char str[PTL_NALFMT_SIZE];
899 ptlrpc_peernid2str(peer, str);
902 if (server_cksum != cksum) {
903 CERROR("Bad checksum: server %x, client %x, server NID "
904 LPX64" (%s)\n", server_cksum, cksum,
905 peer->peer_id.nid, str);
908 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
909 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
910 cksum_counter, peer->peer_id.nid, str, cksum);
913 static int cksum_missed;
916 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
917 CERROR("Request checksum %u from "LPX64", no reply\n",
919 req->rq_import->imp_connection->c_peer.peer_id.nid);
925 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
926 struct lov_stripe_md *lsm,
927 obd_count page_count, struct brw_page *pga)
931 struct ptlrpc_request *request;
936 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
937 page_count, pga, &requested_nob, &niocount,
942 rc = ptlrpc_queue_wait(request);
944 if (rc == -ETIMEDOUT && request->rq_resend) {
945 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
946 ptlrpc_req_finished(request);
950 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
951 page_count, pga, rc);
953 ptlrpc_req_finished(request);
957 static int brw_interpret(struct ptlrpc_request *request,
958 struct osc_brw_async_args *aa, int rc)
960 struct obdo *oa = aa->aa_oa;
961 int requested_nob = aa->aa_requested_nob;
962 int niocount = aa->aa_nio_count;
963 obd_count page_count = aa->aa_page_count;
964 struct brw_page *pga = aa->aa_pga;
967 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
968 page_count, pga, rc);
972 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
973 struct lov_stripe_md *lsm, obd_count page_count,
974 struct brw_page *pga, struct ptlrpc_request_set *set)
976 struct ptlrpc_request *request;
979 struct osc_brw_async_args *aa;
983 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
984 page_count, pga, &requested_nob, &nio_count,
987 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
988 aa = (struct osc_brw_async_args *)&request->rq_async_args;
990 aa->aa_requested_nob = requested_nob;
991 aa->aa_nio_count = nio_count;
992 aa->aa_page_count = page_count;
995 request->rq_interpret_reply = brw_interpret;
996 ptlrpc_set_add_req(set, request);
1002 #define min_t(type,x,y) \
1003 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
1007 * ugh, we want disk allocation on the target to happen in offset order. we'll
1008 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1009 * fine for our small page arrays and doesn't require allocation. its an
1010 * insertion sort that swaps elements that are strides apart, shrinking the
1011 * stride down until its '1' and the array is sorted.
1013 static void sort_brw_pages(struct brw_page *array, int num)
1016 struct brw_page tmp;
1020 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1025 for (i = stride ; i < num ; i++) {
1028 while (j >= stride && array[j - stride].disk_offset >
1030 array[j] = array[j - stride];
1035 } while (stride > 1);
1038 /* make sure we the regions we're passing to elan don't violate its '4
1039 * fragments' constraint. portal headers are a fragment, all full
1040 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1041 * counts as a fragment. I think. see bug 934. */
1042 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1045 int saw_whole_frag = 0;
1048 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1049 if (pg->count == PAGE_SIZE) {
1050 if (!saw_whole_frag) {
1061 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1062 struct lov_stripe_md *lsm, obd_count page_count,
1063 struct brw_page *pga, struct obd_trans_info *oti)
1067 if (cmd == OBD_BRW_CHECK) {
1068 /* The caller just wants to know if there's a chance that this
1069 * I/O can succeed */
1070 struct obd_import *imp = class_exp2cliimp(exp);
1072 if (imp == NULL || imp->imp_invalid)
1077 while (page_count) {
1078 obd_count pages_per_brw;
1081 if (page_count > PTLRPC_MAX_BRW_PAGES)
1082 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1084 pages_per_brw = page_count;
1086 sort_brw_pages(pga, pages_per_brw);
1087 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1089 rc = osc_brw_internal(cmd, exp, oa, lsm, pages_per_brw, pga);
1094 page_count -= pages_per_brw;
1095 pga += pages_per_brw;
1100 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1101 struct lov_stripe_md *lsm, obd_count page_count,
1102 struct brw_page *pga, struct ptlrpc_request_set *set,
1103 struct obd_trans_info *oti)
1107 if (cmd == OBD_BRW_CHECK) {
1108 /* The caller just wants to know if there's a chance that this
1109 * I/O can succeed */
1110 struct obd_import *imp = class_exp2cliimp(exp);
1112 if (imp == NULL || imp->imp_invalid)
1117 while (page_count) {
1118 obd_count pages_per_brw;
1121 if (page_count > PTLRPC_MAX_BRW_PAGES)
1122 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1124 pages_per_brw = page_count;
1126 sort_brw_pages(pga, pages_per_brw);
1127 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1129 rc = async_internal(cmd, exp, oa, lsm, pages_per_brw, pga, set);
1134 page_count -= pages_per_brw;
1135 pga += pages_per_brw;
1140 static void osc_check_rpcs(struct client_obd *cli);
1141 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1143 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1144 static void lop_update_pending(struct client_obd *cli,
1145 struct loi_oap_pages *lop, int cmd, int delta);
1147 /* this is called when a sync waiter receives an interruption. Its job is to
1148 * get the caller woken as soon as possible. If its page hasn't been put in an
1149 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1150 * desiring interruption which will forcefully complete the rpc once the rpc
1152 static void osc_occ_interrupted(struct oig_callback_context *occ)
1154 struct osc_async_page *oap;
1155 struct loi_oap_pages *lop;
1156 struct lov_oinfo *loi;
1159 /* XXX member_of() */
1160 oap = list_entry(occ, struct osc_async_page, oap_occ);
1162 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1164 oap->oap_interrupted = 1;
1166 /* ok, it's been put in an rpc. */
1167 if (oap->oap_request != NULL) {
1168 ptlrpc_mark_interrupted(oap->oap_request);
1169 ptlrpcd_wake(oap->oap_request);
1173 /* we don't get interruption callbacks until osc_trigger_sync_io()
1174 * has been called and put the sync oaps in the pending/urgent lists.*/
1175 if (!list_empty(&oap->oap_pending_item)) {
1176 list_del_init(&oap->oap_pending_item);
1177 if (oap->oap_async_flags & ASYNC_URGENT)
1178 list_del_init(&oap->oap_urgent_item);
1181 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1182 &loi->loi_write_lop : &loi->loi_read_lop;
1183 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1184 loi_list_maint(oap->oap_cli, oap->oap_loi);
1186 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1187 oap->oap_oig = NULL;
1191 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1194 /* this must be called holding the loi list lock to give coverage to exit_cache,
1195 * async_flag maintenance, and oap_request */
1196 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1197 struct osc_async_page *oap, int sent, int rc)
1199 osc_exit_cache(cli, oap, sent);
1200 oap->oap_async_flags = 0;
1201 oap->oap_interrupted = 0;
1203 if (oap->oap_request != NULL) {
1204 ptlrpc_req_finished(oap->oap_request);
1205 oap->oap_request = NULL;
1208 if (rc == 0 && oa != NULL)
1209 oap->oap_loi->loi_blocks = oa->o_blocks;
1212 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1213 oap->oap_oig = NULL;
1218 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1222 static int brw_interpret_oap(struct ptlrpc_request *request,
1223 struct osc_brw_async_args *aa, int rc)
1225 struct osc_async_page *oap;
1226 struct client_obd *cli;
1227 struct list_head *pos, *n;
1231 do_gettimeofday(&now);
1232 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1233 aa->aa_nio_count, aa->aa_page_count,
1236 CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
1239 /* in failout recovery we ignore writeback failure and want
1240 * to just tell llite to unlock the page and continue */
1241 if (request->rq_reqmsg->opc == OST_WRITE &&
1242 (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
1243 CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
1245 cli->cl_import ? cli->cl_import->imp_invalid : -1);
1249 spin_lock(&cli->cl_loi_list_lock);
1251 if (request->rq_reqmsg->opc == OST_WRITE)
1252 lprocfs_stime_record(&cli->cl_write_stime, &now,
1253 &request->rq_rpcd_start);
1255 lprocfs_stime_record(&cli->cl_read_stime, &now,
1256 &request->rq_rpcd_start);
1260 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1261 * is called so we know whether to go to sync BRWs or wait for more
1262 * RPCs to complete */
1263 if (request->rq_reqmsg->opc == OST_WRITE)
1264 cli->cl_w_in_flight--;
1266 cli->cl_r_in_flight--;
1268 /* the caller may re-use the oap after the completion call so
1269 * we need to clean it up a little */
1270 list_for_each_safe(pos, n, &aa->aa_oaps) {
1271 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1273 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1274 //oap->oap_page, oap->oap_page->index, oap);
1276 list_del_init(&oap->oap_rpc_item);
1277 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
1280 osc_wake_cache_waiters(cli);
1281 osc_check_rpcs(cli);
1282 spin_unlock(&cli->cl_loi_list_lock);
1284 obdo_free(aa->aa_oa);
1285 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1290 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1291 struct list_head *rpc_list,
1292 int page_count, int cmd)
1294 struct ptlrpc_request *req;
1295 struct brw_page *pga = NULL;
1296 int requested_nob, nio_count;
1297 struct osc_brw_async_args *aa;
1298 struct obdo *oa = NULL;
1299 struct obd_async_page_ops *ops = NULL;
1300 void *caller_data = NULL;
1301 struct list_head *pos;
1304 LASSERT(!list_empty(rpc_list));
1306 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1308 RETURN(ERR_PTR(-ENOMEM));
1312 GOTO(out, req = ERR_PTR(-ENOMEM));
1315 list_for_each(pos, rpc_list) {
1316 struct osc_async_page *oap;
1318 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1320 ops = oap->oap_caller_ops;
1321 caller_data = oap->oap_caller_data;
1323 pga[i].disk_offset = oap->oap_obj_off + oap->oap_page_off;
1324 pga[i].page_offset = pga[i].disk_offset;
1325 pga[i].pg = oap->oap_page;
1326 pga[i].count = oap->oap_count;
1327 pga[i].flag = oap->oap_brw_flags;
1328 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1329 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1333 /* always get the data for the obdo for the rpc */
1334 LASSERT(ops != NULL);
1335 ops->ap_fill_obdo(caller_data, cmd, oa);
1337 sort_brw_pages(pga, page_count);
1338 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1339 pga, &requested_nob, &nio_count, &req);
1341 CERROR("prep_req failed: %d\n", rc);
1342 GOTO(out, req = ERR_PTR(rc));
1345 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1346 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1348 aa->aa_requested_nob = requested_nob;
1349 aa->aa_nio_count = nio_count;
1350 aa->aa_page_count = page_count;
1359 OBD_FREE(pga, sizeof(*pga) * page_count);
1364 static void lop_update_pending(struct client_obd *cli,
1365 struct loi_oap_pages *lop, int cmd, int delta)
1367 lop->lop_num_pending += delta;
1368 if (cmd == OBD_BRW_WRITE)
1369 cli->cl_pending_w_pages += delta;
1371 cli->cl_pending_r_pages += delta;
1374 /* the loi lock is held across this function but it's allowed to release
1375 * and reacquire it during its work */
1376 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1377 int cmd, struct loi_oap_pages *lop)
1379 struct ptlrpc_request *request;
1380 obd_count page_count = 0;
1381 struct list_head *tmp, *pos;
1382 struct osc_async_page *oap = NULL;
1383 struct osc_brw_async_args *aa;
1384 struct obd_async_page_ops *ops;
1385 LIST_HEAD(rpc_list);
1388 /* first we find the pages we're allowed to work with */
1389 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1390 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1391 ops = oap->oap_caller_ops;
1393 LASSERT(oap->oap_magic == OAP_MAGIC);
1395 /* in llite being 'ready' equates to the page being locked
1396 * until completion unlocks it. commit_write submits a page
1397 * as not ready because its unlock will happen unconditionally
1398 * as the call returns. if we race with commit_write giving
1399 * us that page we dont' want to create a hole in the page
1400 * stream, so we stop and leave the rpc to be fired by
1401 * another dirtier or kupdated interval (the not ready page
1402 * will still be on the dirty list). we could call in
1403 * at the end of ll_file_write to process the queue again. */
1404 if (!(oap->oap_async_flags & ASYNC_READY)) {
1405 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1407 CDEBUG(D_INODE, "oap %p page %p returned %d "
1408 "instead of ready\n", oap,
1412 /* llite is telling us that the page is still
1413 * in commit_write and that we should try
1414 * and put it in an rpc again later. we
1415 * break out of the loop so we don't create
1416 * a hole in the sequence of pages in the rpc
1421 /* the io isn't needed.. tell the checks
1422 * below to complete the rpc with EINTR */
1423 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1424 oap->oap_count = -EINTR;
1427 oap->oap_async_flags |= ASYNC_READY;
1430 LASSERTF(0, "oap %p page %p returned %d "
1431 "from make_ready\n", oap,
1439 /* take the page out of our book-keeping */
1440 list_del_init(&oap->oap_pending_item);
1441 lop_update_pending(cli, lop, cmd, -1);
1442 list_del_init(&oap->oap_urgent_item);
1444 /* ask the caller for the size of the io as the rpc leaves. */
1445 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1447 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1448 if (oap->oap_count <= 0) {
1449 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1451 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
1455 /* now put the page back in our accounting */
1456 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1457 if (++page_count >= cli->cl_max_pages_per_rpc)
1461 osc_wake_cache_waiters(cli);
1463 if (page_count == 0)
1466 loi_list_maint(cli, loi);
1467 spin_unlock(&cli->cl_loi_list_lock);
1469 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1470 if (IS_ERR(request)) {
1471 /* this should happen rarely and is pretty bad, it makes the
1472 * pending list not follow the dirty order */
1473 spin_lock(&cli->cl_loi_list_lock);
1474 list_for_each_safe(pos, tmp, &rpc_list) {
1475 oap = list_entry(pos, struct osc_async_page,
1477 list_del_init(&oap->oap_rpc_item);
1479 /* queued sync pages can be torn down while the pages
1480 * were between the pending list and the rpc */
1481 if (oap->oap_interrupted) {
1482 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1483 osc_ap_completion(cli, NULL, oap, 0,
1488 /* put the page back in the loi/lop lists */
1489 list_add_tail(&oap->oap_pending_item,
1491 lop_update_pending(cli, lop, cmd, 1);
1492 if (oap->oap_async_flags & ASYNC_URGENT)
1493 list_add(&oap->oap_urgent_item,
1496 loi_list_maint(cli, loi);
1497 RETURN(PTR_ERR(request));
1500 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1501 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1502 INIT_LIST_HEAD(&aa->aa_oaps);
1503 list_splice(&rpc_list, &aa->aa_oaps);
1504 INIT_LIST_HEAD(&rpc_list);
1507 if (cmd == OBD_BRW_READ) {
1508 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1509 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1511 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1512 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1513 cli->cl_w_in_flight);
1517 spin_lock(&cli->cl_loi_list_lock);
1519 if (cmd == OBD_BRW_READ)
1520 cli->cl_r_in_flight++;
1522 cli->cl_w_in_flight++;
1523 /* queued sync pages can be torn down while the pages
1524 * were between the pending list and the rpc */
1525 list_for_each(pos, &aa->aa_oaps) {
1526 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1527 if (oap->oap_interrupted) {
1528 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1530 ptlrpc_mark_interrupted(request);
1535 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %dr/%dw in flight\n",
1536 request, page_count, aa, cli->cl_r_in_flight,
1537 cli->cl_w_in_flight);
1539 oap->oap_request = ptlrpc_request_addref(request);
1540 request->rq_interpret_reply = brw_interpret_oap;
1541 ptlrpcd_add_req(request);
1545 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1551 if (lop->lop_num_pending == 0)
1554 /* if we have an invalid import we want to drain the queued pages
1555 * by forcing them through rpcs that immediately fail and complete
1556 * the pages. recovery relies on this to empty the queued pages
1557 * before canceling the locks and evicting down the llite pages */
1558 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1561 /* stream rpcs in queue order as long as as there is an urgent page
1562 * queued. this is our cheap solution for good batching in the case
1563 * where writepage marks some random page in the middle of the file as
1564 * urgent because of, say, memory pressure */
1565 if (!list_empty(&lop->lop_urgent))
1568 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1569 optimal = cli->cl_max_pages_per_rpc;
1570 if (cmd == OBD_BRW_WRITE) {
1571 /* trigger a write rpc stream as long as there are dirtiers
1572 * waiting for space. as they're waiting, they're not going to
1573 * create more pages to coallesce with what's waiting.. */
1574 if (!list_empty(&cli->cl_cache_waiters))
1577 /* *2 to avoid triggering rpcs that would want to include pages
1578 * that are being queued but which can't be made ready until
1579 * the queuer finishes with the page. this is a wart for
1580 * llite::commit_write() */
1583 if (lop->lop_num_pending >= optimal)
1589 static void on_list(struct list_head *item, struct list_head *list,
1592 if (list_empty(item) && should_be_on)
1593 list_add_tail(item, list);
1594 else if (!list_empty(item) && !should_be_on)
1595 list_del_init(item);
1598 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1599 * can find pages to build into rpcs quickly */
1600 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1602 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1603 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1604 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1606 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1607 loi->loi_write_lop.lop_num_pending);
1609 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1610 loi->loi_read_lop.lop_num_pending);
1613 #define LOI_DEBUG(LOI, STR, args...) \
1614 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1615 !list_empty(&(LOI)->loi_cli_item), \
1616 (LOI)->loi_write_lop.lop_num_pending, \
1617 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1618 (LOI)->loi_read_lop.lop_num_pending, \
1619 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1622 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1625 /* first return all objects which we already know to have
1626 * pages ready to be stuffed into rpcs */
1627 if (!list_empty(&cli->cl_loi_ready_list))
1628 RETURN(list_entry(cli->cl_loi_ready_list.next,
1629 struct lov_oinfo, loi_cli_item));
1631 /* then if we have cache waiters, return all objects with queued
1632 * writes. This is especially important when many small files
1633 * have filled up the cache and not been fired into rpcs because
1634 * they don't pass the nr_pending/object threshhold */
1635 if (!list_empty(&cli->cl_cache_waiters) &&
1636 !list_empty(&cli->cl_loi_write_list))
1637 RETURN(list_entry(cli->cl_loi_write_list.next,
1638 struct lov_oinfo, loi_write_item));
1640 /* then return all queued objects when we have an invalid import
1641 * so that they get flushed */
1642 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
1643 if (!list_empty(&cli->cl_loi_write_list))
1644 RETURN(list_entry(cli->cl_loi_write_list.next,
1645 struct lov_oinfo, loi_write_item));
1646 if (!list_empty(&cli->cl_loi_read_list))
1647 RETURN(list_entry(cli->cl_loi_read_list.next,
1648 struct lov_oinfo, loi_read_item));
1653 /* called with the loi list lock held */
1654 static void osc_check_rpcs(struct client_obd *cli)
1656 struct lov_oinfo *loi;
1657 int rc = 0, race_counter = 0;
1660 while ((loi = osc_next_loi(cli)) != NULL) {
1661 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
1663 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
1666 /* attempt some read/write balancing by alternating between
1667 * reads and writes in an object. The makes_rpc checks here
1668 * would be redundant if we were getting read/write work items
1669 * instead of objects. we don't want send_oap_rpc to drain a
1670 * partial read pending queue when we're given this object to
1671 * do io on writes while there are cache waiters */
1672 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1673 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1674 &loi->loi_write_lop);
1682 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1683 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1684 &loi->loi_read_lop);
1693 /* attempt some inter-object balancing by issueing rpcs
1694 * for each object in turn */
1695 if (!list_empty(&loi->loi_cli_item))
1696 list_del_init(&loi->loi_cli_item);
1697 if (!list_empty(&loi->loi_write_item))
1698 list_del_init(&loi->loi_write_item);
1699 if (!list_empty(&loi->loi_read_item))
1700 list_del_init(&loi->loi_read_item);
1702 loi_list_maint(cli, loi);
1704 /* send_oap_rpc fails with 0 when make_ready tells it to
1705 * back off. llite's make_ready does this when it tries
1706 * to lock a page queued for write that is already locked.
1707 * we want to try sending rpcs from many objects, but we
1708 * don't want to spin failing with 0. */
1709 if (race_counter == 10)
1715 /* we're trying to queue a page in the osc so we're subject to the
1716 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1717 * If the osc's queued pages are already at that limit, then we want to sleep
1718 * until there is space in the osc's queue for us. We also may be waiting for
1719 * write credits from the OST if there are RPCs in flight that may return some
1720 * before we fall back to sync writes.
1722 * We need this know our allocation was granted in the presence of signals */
1723 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1727 spin_lock(&cli->cl_loi_list_lock);
1728 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
1729 spin_unlock(&cli->cl_loi_list_lock);
1733 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1734 * grant or cache space. */
1735 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1736 struct osc_async_page *oap)
1738 struct osc_cache_waiter ocw;
1739 struct l_wait_info lwi = { 0 };
1740 struct timeval start, stop;
1742 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1743 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1744 cli->cl_avail_grant);
1746 if (cli->cl_dirty_max < PAGE_SIZE)
1749 /* Hopefully normal case - cache space and write credits available */
1750 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1751 cli->cl_avail_grant >= PAGE_SIZE) {
1752 /* account for ourselves */
1753 osc_consume_write_grant(cli, oap);
1757 /* Make sure that there are write rpcs in flight to wait for. This
1758 * is a little silly as this object may not have any pending but
1759 * other objects sure might. */
1760 if (cli->cl_w_in_flight) {
1761 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1762 init_waitqueue_head(&ocw.ocw_waitq);
1766 loi_list_maint(cli, loi);
1767 osc_check_rpcs(cli);
1768 spin_unlock(&cli->cl_loi_list_lock);
1770 CDEBUG(0, "sleeping for cache space\n");
1771 do_gettimeofday(&start);
1772 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1773 do_gettimeofday(&stop);
1774 spin_lock(&cli->cl_loi_list_lock);
1775 lprocfs_stime_record(&cli->cl_enter_stime, &stop, &start);
1776 if (!list_empty(&ocw.ocw_entry)) {
1777 list_del(&ocw.ocw_entry);
1786 /* the companion to enter_cache, called when an oap is no longer part of the
1787 * dirty accounting.. so writeback completes or truncate happens before writing
1788 * starts. must be called with the loi lock held. */
1789 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1794 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1799 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1800 cli->cl_dirty -= PAGE_SIZE;
1802 cli->cl_lost_grant += PAGE_SIZE;
1803 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1804 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1810 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1811 struct lov_oinfo *loi, struct page *page,
1812 obd_off offset, struct obd_async_page_ops *ops,
1813 void *data, void **res)
1815 struct osc_async_page *oap;
1818 OBD_ALLOC(oap, sizeof(*oap));
1822 oap->oap_magic = OAP_MAGIC;
1823 oap->oap_cli = &exp->exp_obd->u.cli;
1826 oap->oap_caller_ops = ops;
1827 oap->oap_caller_data = data;
1829 oap->oap_page = page;
1830 oap->oap_obj_off = offset;
1832 INIT_LIST_HEAD(&oap->oap_pending_item);
1833 INIT_LIST_HEAD(&oap->oap_urgent_item);
1834 INIT_LIST_HEAD(&oap->oap_rpc_item);
1836 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1838 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1843 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1844 struct lov_oinfo *loi, void *cookie,
1845 int cmd, obd_off off, int count,
1846 obd_flags brw_flags, enum async_flags async_flags)
1848 struct client_obd *cli = &exp->exp_obd->u.cli;
1849 struct osc_async_page *oap;
1850 struct loi_oap_pages *lop;
1854 oap = OAP_FROM_COOKIE(cookie);
1856 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1859 if (!list_empty(&oap->oap_pending_item) ||
1860 !list_empty(&oap->oap_urgent_item) ||
1861 !list_empty(&oap->oap_rpc_item))
1865 loi = &lsm->lsm_oinfo[0];
1867 spin_lock(&cli->cl_loi_list_lock);
1870 oap->oap_async_flags = async_flags;
1871 oap->oap_page_off = off;
1872 oap->oap_count = count;
1873 oap->oap_brw_flags = brw_flags;
1875 if (cmd == OBD_BRW_WRITE) {
1876 rc = osc_enter_cache(cli, loi, oap);
1878 spin_unlock(&cli->cl_loi_list_lock);
1881 lop = &loi->loi_write_lop;
1883 lop = &loi->loi_read_lop;
1886 if (oap->oap_async_flags & ASYNC_URGENT)
1887 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1888 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1889 lop_update_pending(cli, lop, cmd, 1);
1891 loi_list_maint(cli, loi);
1893 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1896 osc_check_rpcs(cli);
1897 spin_unlock(&cli->cl_loi_list_lock);
1902 /* aka (~was & now & flag), but this is more clear :) */
1903 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1905 static int osc_set_async_flags(struct obd_export *exp,
1906 struct lov_stripe_md *lsm,
1907 struct lov_oinfo *loi, void *cookie,
1908 obd_flags async_flags)
1910 struct client_obd *cli = &exp->exp_obd->u.cli;
1911 struct loi_oap_pages *lop;
1912 struct osc_async_page *oap;
1916 oap = OAP_FROM_COOKIE(cookie);
1918 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1922 loi = &lsm->lsm_oinfo[0];
1924 if (oap->oap_cmd == OBD_BRW_WRITE) {
1925 lop = &loi->loi_write_lop;
1927 lop = &loi->loi_read_lop;
1930 spin_lock(&cli->cl_loi_list_lock);
1932 if (list_empty(&oap->oap_pending_item))
1933 GOTO(out, rc = -EINVAL);
1935 if ((oap->oap_async_flags & async_flags) == async_flags)
1938 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1939 oap->oap_async_flags |= ASYNC_READY;
1941 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1942 if (list_empty(&oap->oap_rpc_item)) {
1943 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1944 loi_list_maint(cli, loi);
1948 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1949 oap->oap_async_flags);
1951 osc_check_rpcs(cli);
1952 spin_unlock(&cli->cl_loi_list_lock);
1956 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1957 struct lov_oinfo *loi,
1958 struct obd_io_group *oig, void *cookie,
1959 int cmd, obd_off off, int count,
1960 obd_flags brw_flags,
1961 obd_flags async_flags)
1963 struct client_obd *cli = &exp->exp_obd->u.cli;
1964 struct osc_async_page *oap;
1965 struct loi_oap_pages *lop;
1968 oap = OAP_FROM_COOKIE(cookie);
1970 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1973 if (!list_empty(&oap->oap_pending_item) ||
1974 !list_empty(&oap->oap_urgent_item) ||
1975 !list_empty(&oap->oap_rpc_item))
1979 loi = &lsm->lsm_oinfo[0];
1981 spin_lock(&cli->cl_loi_list_lock);
1984 oap->oap_page_off = off;
1985 oap->oap_count = count;
1986 oap->oap_brw_flags = brw_flags;
1987 oap->oap_async_flags = async_flags;
1989 if (cmd == OBD_BRW_WRITE)
1990 lop = &loi->loi_write_lop;
1992 lop = &loi->loi_read_lop;
1994 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
1995 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
1997 oig_add_one(oig, &oap->oap_occ);
2000 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
2002 spin_unlock(&cli->cl_loi_list_lock);
2007 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
2008 struct loi_oap_pages *lop, int cmd)
2010 struct list_head *pos, *tmp;
2011 struct osc_async_page *oap;
2013 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2014 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2015 list_del(&oap->oap_pending_item);
2016 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2017 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2018 lop_update_pending(cli, lop, cmd, 1);
2020 loi_list_maint(cli, loi);
2023 static int osc_trigger_group_io(struct obd_export *exp,
2024 struct lov_stripe_md *lsm,
2025 struct lov_oinfo *loi,
2026 struct obd_io_group *oig)
2028 struct client_obd *cli = &exp->exp_obd->u.cli;
2032 loi = &lsm->lsm_oinfo[0];
2034 spin_lock(&cli->cl_loi_list_lock);
2036 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2037 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2039 osc_check_rpcs(cli);
2040 spin_unlock(&cli->cl_loi_list_lock);
2045 static int osc_teardown_async_page(struct obd_export *exp,
2046 struct lov_stripe_md *lsm,
2047 struct lov_oinfo *loi, void *cookie)
2049 struct client_obd *cli = &exp->exp_obd->u.cli;
2050 struct loi_oap_pages *lop;
2051 struct osc_async_page *oap;
2055 oap = OAP_FROM_COOKIE(cookie);
2058 loi = &lsm->lsm_oinfo[0];
2060 if (oap->oap_cmd == OBD_BRW_WRITE) {
2061 lop = &loi->loi_write_lop;
2063 lop = &loi->loi_read_lop;
2066 spin_lock(&cli->cl_loi_list_lock);
2068 if (!list_empty(&oap->oap_rpc_item))
2069 GOTO(out, rc = -EBUSY);
2071 osc_exit_cache(cli, oap, 0);
2072 osc_wake_cache_waiters(cli);
2074 if (!list_empty(&oap->oap_urgent_item)) {
2075 list_del_init(&oap->oap_urgent_item);
2076 oap->oap_async_flags &= ~ASYNC_URGENT;
2078 if (!list_empty(&oap->oap_pending_item)) {
2079 list_del_init(&oap->oap_pending_item);
2080 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2082 loi_list_maint(cli, loi);
2084 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2086 spin_unlock(&cli->cl_loi_list_lock);
2088 OBD_FREE(oap, sizeof(*oap));
2093 /* Note: caller will lock/unlock, and set uptodate on the pages */
2094 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2095 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2096 struct lov_stripe_md *lsm, obd_count page_count,
2097 struct brw_page *pga)
2099 struct ptlrpc_request *request = NULL;
2100 struct ost_body *body;
2101 struct niobuf_remote *nioptr;
2102 struct obd_ioobj *iooptr;
2103 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2107 /* XXX does not handle 'new' brw protocol */
2109 size[1] = sizeof(struct obd_ioobj);
2110 size[2] = page_count * sizeof(*nioptr);
2112 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2113 OST_SAN_READ, 3, size, NULL);
2117 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2118 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2119 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2120 sizeof(*nioptr) * page_count);
2122 memcpy(&body->oa, oa, sizeof(body->oa));
2124 obdo_to_ioobj(oa, iooptr);
2125 iooptr->ioo_bufcnt = page_count;
2127 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2128 LASSERT(PageLocked(pga[mapped].pg));
2129 LASSERT(mapped == 0 ||
2130 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2132 nioptr->offset = pga[mapped].disk_offset;
2133 nioptr->len = pga[mapped].count;
2134 nioptr->flags = pga[mapped].flag;
2137 size[1] = page_count * sizeof(*nioptr);
2138 request->rq_replen = lustre_msg_size(2, size);
2140 rc = ptlrpc_queue_wait(request);
2144 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2145 lustre_swab_ost_body);
2147 CERROR("Can't unpack body\n");
2148 GOTO(out_req, rc = -EPROTO);
2151 memcpy(oa, &body->oa, sizeof(*oa));
2153 swab = lustre_msg_swabbed(request->rq_repmsg);
2154 LASSERT_REPSWAB(request, 1);
2155 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2157 /* nioptr missing or short */
2158 GOTO(out_req, rc = -EPROTO);
2162 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2163 struct page *page = pga[mapped].pg;
2164 struct buffer_head *bh;
2168 lustre_swab_niobuf_remote (nioptr);
2170 /* got san device associated */
2171 LASSERT(exp->exp_obd != NULL);
2172 dev = exp->exp_obd->u.cli.cl_sandev;
2175 if (!nioptr->offset) {
2176 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2177 page->mapping->host->i_ino,
2179 memset(page_address(page), 0, PAGE_SIZE);
2183 if (!page->buffers) {
2184 create_empty_buffers(page, dev, PAGE_SIZE);
2187 clear_bit(BH_New, &bh->b_state);
2188 set_bit(BH_Mapped, &bh->b_state);
2189 bh->b_blocknr = (unsigned long)nioptr->offset;
2191 clear_bit(BH_Uptodate, &bh->b_state);
2193 ll_rw_block(READ, 1, &bh);
2197 /* if buffer already existed, it must be the
2198 * one we mapped before, check it */
2199 LASSERT(!test_bit(BH_New, &bh->b_state));
2200 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2201 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2203 /* wait it's io completion */
2204 if (test_bit(BH_Lock, &bh->b_state))
2207 if (!test_bit(BH_Uptodate, &bh->b_state))
2208 ll_rw_block(READ, 1, &bh);
2212 /* must do syncronous write here */
2214 if (!buffer_uptodate(bh)) {
2222 ptlrpc_req_finished(request);
2226 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2227 struct lov_stripe_md *lsm, obd_count page_count,
2228 struct brw_page *pga)
2230 struct ptlrpc_request *request = NULL;
2231 struct ost_body *body;
2232 struct niobuf_remote *nioptr;
2233 struct obd_ioobj *iooptr;
2234 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2238 size[1] = sizeof(struct obd_ioobj);
2239 size[2] = page_count * sizeof(*nioptr);
2241 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2242 OST_SAN_WRITE, 3, size, NULL);
2246 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2247 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2248 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2249 sizeof (*nioptr) * page_count);
2251 memcpy(&body->oa, oa, sizeof(body->oa));
2253 obdo_to_ioobj(oa, iooptr);
2254 iooptr->ioo_bufcnt = page_count;
2257 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2258 LASSERT(PageLocked(pga[mapped].pg));
2259 LASSERT(mapped == 0 ||
2260 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2262 nioptr->offset = pga[mapped].disk_offset;
2263 nioptr->len = pga[mapped].count;
2264 nioptr->flags = pga[mapped].flag;
2267 size[1] = page_count * sizeof(*nioptr);
2268 request->rq_replen = lustre_msg_size(2, size);
2270 rc = ptlrpc_queue_wait(request);
2274 swab = lustre_msg_swabbed (request->rq_repmsg);
2275 LASSERT_REPSWAB (request, 1);
2276 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2278 CERROR("absent/short niobuf array\n");
2279 GOTO(out_req, rc = -EPROTO);
2283 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2284 struct page *page = pga[mapped].pg;
2285 struct buffer_head *bh;
2289 lustre_swab_niobuf_remote (nioptr);
2291 /* got san device associated */
2292 LASSERT(exp->exp_obd != NULL);
2293 dev = exp->exp_obd->u.cli.cl_sandev;
2295 if (!page->buffers) {
2296 create_empty_buffers(page, dev, PAGE_SIZE);
2299 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2300 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2301 LASSERT(page->buffers->b_blocknr ==
2302 (unsigned long)nioptr->offset);
2308 /* if buffer locked, wait it's io completion */
2309 if (test_bit(BH_Lock, &bh->b_state))
2312 clear_bit(BH_New, &bh->b_state);
2313 set_bit(BH_Mapped, &bh->b_state);
2315 /* override the block nr */
2316 bh->b_blocknr = (unsigned long)nioptr->offset;
2318 /* we are about to write it, so set it
2320 * page lock should garentee no race condition here */
2321 set_bit(BH_Uptodate, &bh->b_state);
2322 set_bit(BH_Dirty, &bh->b_state);
2324 ll_rw_block(WRITE, 1, &bh);
2326 /* must do syncronous write here */
2328 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2336 ptlrpc_req_finished(request);
2340 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2341 struct lov_stripe_md *lsm, obd_count page_count,
2342 struct brw_page *pga, struct obd_trans_info *oti)
2346 while (page_count) {
2347 obd_count pages_per_brw;
2350 if (page_count > PTLRPC_MAX_BRW_PAGES)
2351 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
2353 pages_per_brw = page_count;
2355 if (cmd & OBD_BRW_WRITE)
2356 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2358 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2363 page_count -= pages_per_brw;
2364 pga += pages_per_brw;
2371 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2373 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2376 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
2380 l_lock(&lock->l_resource->lr_namespace->ns_lock);
2382 if (lock->l_ast_data && lock->l_ast_data != data) {
2383 struct inode *new_inode = data;
2384 struct inode *old_inode = lock->l_ast_data;
2385 if (!(old_inode->i_state & I_FREEING))
2386 LDLM_ERROR(lock, "inconsistent l_ast_data found");
2387 LASSERTF(old_inode->i_state & I_FREEING,
2388 "Found existing inode %p/%lu/%u state %lu in lock: "
2389 "setting data to %p/%lu/%u\n", old_inode,
2390 old_inode->i_ino, old_inode->i_generation,
2392 new_inode, new_inode->i_ino, new_inode->i_generation);
2395 lock->l_ast_data = data;
2396 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
2397 LDLM_LOCK_PUT(lock);
2400 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2401 ldlm_iterator_t replace, void *data)
2403 struct ldlm_res_id res_id = { .name = {0} };
2404 struct obd_device *obd = class_exp2obd(exp);
2406 res_id.name[0] = lsm->lsm_object_id;
2407 res_id.name[2] = lsm->lsm_object_gr;
2408 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2412 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2413 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2414 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2415 void *data, __u32 lvb_len, void *lvb_swabber,
2416 struct lustre_handle *lockh)
2418 struct obd_device *obd = exp->exp_obd;
2419 struct ldlm_res_id res_id = { .name = {0} };
2421 struct ldlm_reply *rep;
2422 struct ptlrpc_request *req = NULL;
2426 res_id.name[0] = lsm->lsm_object_id;
2427 res_id.name[2] = lsm->lsm_object_gr;
2429 /* Filesystem lock extents are extended to page boundaries so that
2430 * dealing with the page cache is a little smoother. */
2431 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2432 policy->l_extent.end |= ~PAGE_MASK;
2434 if (lsm->lsm_oinfo->loi_kms_valid == 0)
2437 /* Next, search for already existing extent locks that will cover us */
2438 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2441 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2442 /* return immediately if no credential held */
2443 ldlm_lock_decref(lockh, mode);
2447 osc_set_data_with_check(lockh, data);
2448 if (*flags & LDLM_FL_HAS_INTENT) {
2449 /* I would like to be able to ASSERT here that rss <=
2450 * kms, but I can't, for reasons which are explained in
2453 /* We already have a lock, and it's referenced */
2457 /* If we're trying to read, we also search for an existing PW lock. The
2458 * VFS and page cache already protect us locally, so lots of readers/
2459 * writers can share a single PW lock.
2461 * There are problems with conversion deadlocks, so instead of
2462 * converting a read lock to a write lock, we'll just enqueue a new
2465 * At some point we should cancel the read lock instead of making them
2466 * send us a blocking callback, but there are problems with canceling
2467 * locks out from other users right now, too. */
2469 if (mode == LCK_PR) {
2470 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2471 policy, LCK_PW, lockh);
2473 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2474 /* return immediately if no credential held */
2475 ldlm_lock_decref(lockh, LCK_PW);
2479 /* FIXME: This is not incredibly elegant, but it might
2480 * be more elegant than adding another parameter to
2481 * lock_match. I want a second opinion. */
2482 ldlm_lock_addref(lockh, LCK_PR);
2483 ldlm_lock_decref(lockh, LCK_PW);
2484 osc_set_data_with_check(lockh, data);
2488 if (mode == LCK_PW) {
2489 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2490 policy, LCK_PR, lockh);
2492 rc = ldlm_cli_convert(lockh, mode, flags);
2494 /* Update readers/writers accounting */
2495 ldlm_lock_addref(lockh, LCK_PW);
2496 ldlm_lock_decref(lockh, LCK_PR);
2497 osc_set_data_with_check(lockh, data);
2500 /* If the conversion failed, we need to drop refcount
2501 on matched lock before we get new one */
2502 /* XXX Won't it save us some efforts if we cancel PR
2503 lock here? We are going to take PW lock anyway and it
2504 will invalidate PR lock */
2505 ldlm_lock_decref(lockh, LCK_PR);
2506 if (rc != EDEADLOCK) {
2512 if (mode == LCK_PW) {
2513 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2514 policy, LCK_PR, lockh);
2516 rc = ldlm_cli_convert(lockh, mode, flags);
2518 /* Update readers/writers accounting */
2519 ldlm_lock_addref(lockh, LCK_PW);
2520 ldlm_lock_decref(lockh, LCK_PR);
2521 osc_set_data_with_check(lockh, data);
2524 /* If the conversion failed, we need to drop refcount
2525 on matched lock before we get new one */
2526 /* XXX Won't it save us some efforts if we cancel PR
2527 lock here? We are going to take PW lock anyway and it
2528 will invalidate PR lock */
2529 ldlm_lock_decref(lockh, LCK_PR);
2530 if (rc != EDEADLOCK) {
2537 if (*flags & LDLM_FL_HAS_INTENT) {
2538 int size[2] = {0, sizeof(struct ldlm_request)};
2540 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
2541 LDLM_ENQUEUE, 2, size, NULL);
2545 size[0] = sizeof(*rep);
2546 size[1] = sizeof(lvb);
2547 req->rq_replen = lustre_msg_size(2, size);
2549 rc = ldlm_cli_enqueue(exp, req, obd->obd_namespace, res_id, type,
2550 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2551 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2553 if (rc == ELDLM_LOCK_ABORTED) {
2554 /* swabbed by ldlm_cli_enqueue() */
2555 LASSERT_REPSWABBED(req, 0);
2556 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
2557 LASSERT(rep != NULL);
2558 if (rep->lock_policy_res1)
2559 rc = rep->lock_policy_res1;
2561 ptlrpc_req_finished(req);
2564 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
2565 CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
2566 lvb.lvb_size, lvb.lvb_blocks);
2567 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2568 lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
2574 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2575 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2576 int *flags, void *data, struct lustre_handle *lockh)
2578 struct ldlm_res_id res_id = { .name = {0} };
2579 struct obd_device *obd = exp->exp_obd;
2583 res_id.name[0] = lsm->lsm_object_id;
2584 res_id.name[2] = lsm->lsm_object_gr;
2586 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2588 /* Filesystem lock extents are extended to page boundaries so that
2589 * dealing with the page cache is a little smoother */
2590 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2591 policy->l_extent.end |= ~PAGE_MASK;
2593 /* Next, search for already existing extent locks that will cover us */
2594 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2595 policy, mode, lockh);
2597 // if (!(*flags & LDLM_FL_TEST_LOCK))
2598 osc_set_data_with_check(lockh, data);
2601 /* If we're trying to read, we also search for an existing PW lock. The
2602 * VFS and page cache already protect us locally, so lots of readers/
2603 * writers can share a single PW lock. */
2604 if (mode == LCK_PR) {
2605 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2606 policy, LCK_PW, lockh);
2607 if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
2608 /* FIXME: This is not incredibly elegant, but it might
2609 * be more elegant than adding another parameter to
2610 * lock_match. I want a second opinion. */
2611 osc_set_data_with_check(lockh, data);
2612 ldlm_lock_addref(lockh, LCK_PR);
2613 ldlm_lock_decref(lockh, LCK_PW);
2619 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2620 __u32 mode, struct lustre_handle *lockh)
2624 if (mode == LCK_GROUP)
2625 ldlm_lock_decref_and_cancel(lockh, mode);
2627 ldlm_lock_decref(lockh, mode);
2632 static int osc_cancel_unused(struct obd_export *exp,
2633 struct lov_stripe_md *lsm,
2634 int flags, void *opaque)
2636 struct obd_device *obd = class_exp2obd(exp);
2637 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
2640 res_id.name[0] = lsm->lsm_object_id;
2641 res_id.name[2] = lsm->lsm_object_gr;
2645 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
2648 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2649 unsigned long max_age)
2651 struct obd_statfs *msfs;
2652 struct ptlrpc_request *request;
2653 int rc, size = sizeof(*osfs);
2656 /* We could possibly pass max_age in the request (as an absolute
2657 * timestamp or a "seconds.usec ago") so the target can avoid doing
2658 * extra calls into the filesystem if that isn't necessary (e.g.
2659 * during mount that would help a bit). Having relative timestamps
2660 * is not so great if request processing is slow, while absolute
2661 * timestamps are not ideal because they need time synchronization. */
2662 request = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
2663 OST_STATFS, 0, NULL, NULL);
2667 request->rq_replen = lustre_msg_size(1, &size);
2668 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2670 rc = ptlrpc_queue_wait(request);
2674 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2675 lustre_swab_obd_statfs);
2677 CERROR("Can't unpack obd_statfs\n");
2678 GOTO(out, rc = -EPROTO);
2681 memcpy(osfs, msfs, sizeof(*osfs));
2685 ptlrpc_req_finished(request);
2689 /* Retrieve object striping information.
2691 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2692 * the maximum number of OST indices which will fit in the user buffer.
2693 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2695 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2697 struct lov_user_md lum, *lumk;
2704 rc = copy_from_user(&lum, lump, sizeof(lum));
2708 if (lum.lmm_magic != LOV_USER_MAGIC)
2711 if (lum.lmm_stripe_count > 0) {
2712 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2713 OBD_ALLOC(lumk, lum_size);
2717 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2718 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
2720 lum_size = sizeof(lum);
2724 lumk->lmm_object_id = lsm->lsm_object_id;
2725 lumk->lmm_object_gr = lsm->lsm_object_gr;
2726 lumk->lmm_stripe_count = 1;
2728 if (copy_to_user(lump, lumk, lum_size))
2732 OBD_FREE(lumk, lum_size);
2737 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2738 void *karg, void *uarg)
2740 struct obd_device *obd = exp->exp_obd;
2741 struct obd_ioctl_data *data = karg;
2745 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2748 if (!try_module_get(THIS_MODULE)) {
2749 CERROR("Can't get module. Is it alive?");
2754 case OBD_IOC_LOV_GET_CONFIG: {
2756 struct lov_desc *desc;
2757 struct obd_uuid uuid;
2761 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2762 GOTO(out, err = -EINVAL);
2764 data = (struct obd_ioctl_data *)buf;
2766 if (sizeof(*desc) > data->ioc_inllen1) {
2768 GOTO(out, err = -EINVAL);
2771 if (data->ioc_inllen2 < sizeof(uuid)) {
2773 GOTO(out, err = -EINVAL);
2776 if (data->ioc_inllen3 < sizeof(__u32)) {
2778 GOTO(out, err = -EINVAL);
2781 desc = (struct lov_desc *)data->ioc_inlbuf1;
2782 desc->ld_tgt_count = 1;
2783 desc->ld_active_tgt_count = 1;
2784 desc->ld_default_stripe_count = 1;
2785 desc->ld_default_stripe_size = 0;
2786 desc->ld_default_stripe_offset = 0;
2787 desc->ld_pattern = 0;
2788 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2789 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2790 *((__u32 *)data->ioc_inlbuf3) = 1;
2792 err = copy_to_user((void *)uarg, buf, len);
2795 obd_ioctl_freedata(buf, len);
2798 case LL_IOC_LOV_SETSTRIPE:
2799 err = obd_alloc_memmd(exp, karg);
2803 case LL_IOC_LOV_GETSTRIPE:
2804 err = osc_getstripe(karg, uarg);
2806 case OBD_IOC_CLIENT_RECOVER:
2807 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2812 case IOC_OSC_SET_ACTIVE:
2813 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2816 case IOC_OSC_CTL_RECOVERY:
2817 err = ptlrpc_import_control_recovery(obd->u.cli.cl_import,
2821 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2822 GOTO(out, err = -ENOTTY);
2825 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2828 module_put(THIS_MODULE);
2833 static int osc_get_info(struct obd_export *exp, __u32 keylen,
2834 void *key, __u32 *vallen, void *val)
2837 if (!vallen || !val)
2840 if (keylen > strlen("lock_to_stripe") &&
2841 strcmp(key, "lock_to_stripe") == 0) {
2842 __u32 *stripe = val;
2843 *vallen = sizeof(*stripe);
2846 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2847 struct ptlrpc_request *req;
2849 char *bufs[1] = {key};
2851 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2852 OST_GET_INFO, 1, (int *)&keylen, bufs);
2856 req->rq_replen = lustre_msg_size(1, (int *)vallen);
2857 rc = ptlrpc_queue_wait(req);
2861 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2862 lustre_swab_ost_last_id);
2863 if (reply == NULL) {
2864 CERROR("Can't unpack OST last ID\n");
2865 GOTO(out, rc = -EPROTO);
2867 *((obd_id *)val) = *reply;
2869 ptlrpc_req_finished(req);
2875 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2876 void *key, obd_count vallen, void *val)
2878 struct obd_device *obd = exp->exp_obd;
2879 struct obd_import *imp = class_exp2cliimp(exp);
2880 struct llog_ctxt *ctxt;
2884 if (keylen == strlen("unlinked") &&
2885 memcmp(key, "unlinked", keylen) == 0) {
2886 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2887 spin_lock(&oscc->oscc_lock);
2888 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2889 spin_unlock(&oscc->oscc_lock);
2892 if (keylen == strlen("unrecovery") &&
2893 memcmp(key, "unrecovery", keylen) == 0) {
2894 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2895 spin_lock(&oscc->oscc_lock);
2896 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
2897 spin_unlock(&oscc->oscc_lock);
2900 if (keylen == strlen("initial_recov") &&
2901 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
2902 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2903 if (vallen != sizeof(int))
2905 imp->imp_initial_recov = *(int *)val;
2906 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
2907 exp->exp_obd->obd_name,
2908 imp->imp_initial_recov);
2912 if (keylen == strlen("async") &&
2913 memcmp(key, "async", keylen) == 0) {
2914 struct client_obd *cl = &obd->u.cli;
2915 if (vallen != sizeof(int))
2917 cl->cl_async = *(int *)val;
2918 CDEBUG(D_HA, "%s: set async = %d\n",
2919 obd->obd_name, cl->cl_async);
2923 if (keylen == strlen("sec") &&
2924 memcmp(key, "sec", keylen) == 0) {
2925 struct client_obd *cli = &exp->exp_obd->u.cli;
2927 cli->cl_sec_flavor = ptlrpcs_name2flavor(val);
2928 if (cli->cl_sec_flavor == PTLRPCS_FLVR_INVALID) {
2929 CERROR("unrecognized security flavor %s\n", (char*) val);
2936 if (keylen == strlen("sec_flags") &&
2937 memcmp(key, "sec_flags", keylen) == 0) {
2938 struct client_obd *cli = &exp->exp_obd->u.cli;
2940 cli->cl_sec_flags = *((unsigned long *) val);
2944 if (keylen == strlen("flush_cred") &&
2945 memcmp(key, "flush_cred", keylen) == 0) {
2946 struct client_obd *cli = &exp->exp_obd->u.cli;
2949 ptlrpcs_import_flush_creds(cli->cl_import,
2954 if (keylen < strlen("mds_conn") ||
2955 memcmp(key, "mds_conn", keylen) != 0)
2958 ctxt = llog_get_context(&exp->exp_obd->obd_llogs,
2959 LLOG_UNLINK_ORIG_CTXT);
2962 rc = llog_initiator_connect(ctxt);
2964 CERROR("cannot establish the connect for "
2965 "ctxt %p: %d\n", ctxt, rc);
2968 imp->imp_server_timeout = 1;
2969 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
2970 imp->imp_pingable = 1;
2976 static struct llog_operations osc_size_repl_logops = {
2977 lop_cancel: llog_obd_repl_cancel
2980 static struct llog_operations osc_unlink_orig_logops;
2982 static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
2983 struct obd_device *tgt, int count,
2984 struct llog_catid *catid)
2989 osc_unlink_orig_logops = llog_lvfs_ops;
2990 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
2991 osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
2992 osc_unlink_orig_logops.lop_add = llog_catalog_add;
2993 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
2995 rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
2996 &catid->lci_logid, &osc_unlink_orig_logops);
3000 rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
3001 &osc_size_repl_logops);
3005 static int osc_llog_finish(struct obd_device *obd,
3006 struct obd_llogs *llogs, int count)
3011 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
3015 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
3019 static int osc_connect(struct lustre_handle *exph,
3020 struct obd_device *obd, struct obd_uuid *cluuid,
3021 struct obd_connect_data *data,
3022 unsigned long connect_flags)
3026 rc = client_connect_import(exph, obd, cluuid, data, connect_flags);
3030 static int osc_disconnect(struct obd_export *exp, unsigned long flags)
3032 struct obd_device *obd = class_exp2obd(exp);
3033 struct llog_ctxt *ctxt;
3037 ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
3038 if (obd->u.cli.cl_conn_count == 1)
3039 /* flush any remaining cancel messages out to the target */
3040 llog_sync(ctxt, exp);
3042 rc = client_disconnect_export(exp, flags);
3046 static int osc_import_event(struct obd_device *obd,
3047 struct obd_import *imp,
3048 enum obd_import_event event)
3050 struct client_obd *cli;
3053 LASSERT(imp->imp_obd == obd);
3056 case IMP_EVENT_DISCON: {
3057 /* Only do this on the MDS OSC's */
3058 if (imp->imp_server_timeout) {
3059 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3061 spin_lock(&oscc->oscc_lock);
3062 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3063 spin_unlock(&oscc->oscc_lock);
3067 case IMP_EVENT_INACTIVE: {
3068 if (obd->obd_observer)
3069 rc = obd_notify(obd->obd_observer, obd, 0, 0);
3072 case IMP_EVENT_INVALIDATE: {
3073 struct ldlm_namespace *ns = obd->obd_namespace;
3077 spin_lock(&cli->cl_loi_list_lock);
3078 cli->cl_avail_grant = 0;
3079 cli->cl_lost_grant = 0;
3080 /* all pages go to failing rpcs due to the invalid import */
3081 osc_check_rpcs(cli);
3082 spin_unlock(&cli->cl_loi_list_lock);
3084 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3088 case IMP_EVENT_ACTIVE: {
3089 /* Only do this on the MDS OSC's */
3090 if (imp->imp_server_timeout) {
3091 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3093 spin_lock(&oscc->oscc_lock);
3094 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3095 spin_unlock(&oscc->oscc_lock);
3098 if (obd->obd_observer)
3099 rc = obd_notify(obd->obd_observer, obd, 1, 0);
3103 CERROR("Unknown import event %d\n", event);
3109 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
3111 struct lprocfs_static_vars lvars;
3115 lprocfs_init_vars(osc,&lvars);
3116 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3120 rc = lproc_osc_attach_seqstat(dev);
3122 lprocfs_obd_detach(dev);
3126 ptlrpc_lprocfs_register_obd(dev);
3130 static int osc_detach(struct obd_device *dev)
3132 ptlrpc_lprocfs_unregister_obd(dev);
3133 return lprocfs_obd_detach(dev);
3136 static int osc_setup(struct obd_device *obd, obd_count len, void *buf)
3140 rc = ptlrpcd_addref();
3144 rc = client_obd_setup(obd, len, buf);
3153 static int osc_cleanup(struct obd_device *obd, int flags)
3155 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3158 rc = ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
3159 LDLM_FL_CONFIG_CHANGE, NULL);
3163 spin_lock(&oscc->oscc_lock);
3164 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
3165 oscc->oscc_flags |= OSCC_FLAG_EXITING;
3166 spin_unlock(&oscc->oscc_lock);
3168 rc = client_obd_cleanup(obd, flags);
3173 struct obd_ops osc_obd_ops = {
3174 .o_owner = THIS_MODULE,
3175 .o_attach = osc_attach,
3176 .o_detach = osc_detach,
3177 .o_setup = osc_setup,
3178 .o_cleanup = osc_cleanup,
3179 .o_add_conn = client_import_add_conn,
3180 .o_del_conn = client_import_del_conn,
3181 .o_connect = osc_connect,
3182 .o_disconnect = osc_disconnect,
3183 .o_statfs = osc_statfs,
3184 .o_packmd = osc_packmd,
3185 .o_unpackmd = osc_unpackmd,
3186 .o_create = osc_create,
3187 .o_destroy = osc_destroy,
3188 .o_getattr = osc_getattr,
3189 .o_getattr_async = osc_getattr_async,
3190 .o_setattr = osc_setattr,
3192 .o_brw_async = osc_brw_async,
3193 .o_prep_async_page = osc_prep_async_page,
3194 .o_queue_async_io = osc_queue_async_io,
3195 .o_set_async_flags = osc_set_async_flags,
3196 .o_queue_group_io = osc_queue_group_io,
3197 .o_trigger_group_io = osc_trigger_group_io,
3198 .o_teardown_async_page = osc_teardown_async_page,
3199 .o_punch = osc_punch,
3201 .o_enqueue = osc_enqueue,
3202 .o_match = osc_match,
3203 .o_change_cbdata = osc_change_cbdata,
3204 .o_cancel = osc_cancel,
3205 .o_cancel_unused = osc_cancel_unused,
3206 .o_iocontrol = osc_iocontrol,
3207 .o_get_info = osc_get_info,
3208 .o_set_info = osc_set_info,
3209 .o_import_event = osc_import_event,
3210 .o_llog_init = osc_llog_init,
3211 .o_llog_finish = osc_llog_finish,
3214 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3215 struct obd_ops sanosc_obd_ops = {
3216 .o_owner = THIS_MODULE,
3217 .o_attach = osc_attach,
3218 .o_detach = osc_detach,
3219 .o_cleanup = client_obd_cleanup,
3220 .o_add_conn = client_import_add_conn,
3221 .o_del_conn = client_import_del_conn,
3222 .o_connect = osc_connect,
3223 .o_disconnect = client_disconnect_export,
3224 .o_statfs = osc_statfs,
3225 .o_packmd = osc_packmd,
3226 .o_unpackmd = osc_unpackmd,
3227 .o_create = osc_real_create,
3228 .o_destroy = osc_destroy,
3229 .o_getattr = osc_getattr,
3230 .o_getattr_async = osc_getattr_async,
3231 .o_setattr = osc_setattr,
3232 .o_setup = client_sanobd_setup,
3233 .o_brw = sanosc_brw,
3234 .o_punch = osc_punch,
3236 .o_enqueue = osc_enqueue,
3237 .o_match = osc_match,
3238 .o_change_cbdata = osc_change_cbdata,
3239 .o_cancel = osc_cancel,
3240 .o_cancel_unused = osc_cancel_unused,
3241 .o_iocontrol = osc_iocontrol,
3242 .o_import_event = osc_import_event,
3243 .o_llog_init = osc_llog_init,
3244 .o_llog_finish = osc_llog_finish,
3248 int __init osc_init(void)
3250 struct lprocfs_static_vars lvars;
3251 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3252 struct lprocfs_static_vars sanlvars;
3257 lprocfs_init_vars(osc, &lvars);
3258 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3259 lprocfs_init_vars(osc, &sanlvars);
3262 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
3267 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3268 rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
3269 LUSTRE_SANOSC_NAME);
3271 class_unregister_type(LUSTRE_OSC_NAME);
3278 static void /*__exit*/ osc_exit(void)
3280 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3281 class_unregister_type(LUSTRE_SANOSC_NAME);
3283 class_unregister_type(LUSTRE_OSC_NAME);
3286 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3287 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3288 MODULE_LICENSE("GPL");
3290 module_init(osc_init);
3291 module_exit(osc_exit);