1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/ctype.h>
40 # include <linux/init.h>
41 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/workqueue.h>
43 # include <linux/smp_lock.h>
45 # include <linux/locks.h>
47 #else /* __KERNEL__ */
48 # include <liblustre.h>
51 #include <linux/lustre_dlm.h>
52 #include <libcfs/kp30.h>
53 #include <linux/lustre_net.h>
54 #include <linux/lustre_sec.h>
55 #include <lustre/lustre_user.h>
56 #include <linux/obd_ost.h>
57 #include <linux/obd_lov.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include "osc_internal.h"
68 /* Pack OSC object metadata for disk storage (LE byte order). */
69 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
70 struct lov_stripe_md *lsm)
75 lmm_size = sizeof(**lmmp);
80 OBD_FREE(*lmmp, lmm_size);
86 OBD_ALLOC(*lmmp, lmm_size);
92 LASSERT(lsm->lsm_object_id);
93 LASSERT(lsm->lsm_object_gr);
94 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
95 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
101 /* Unpack OSC object metadata from disk storage (LE byte order). */
102 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
103 struct lov_mds_md *lmm, int lmm_bytes)
109 if (lmm_bytes < sizeof (*lmm)) {
110 CERROR("lov_mds_md too small: %d, need %d\n",
111 lmm_bytes, (int)sizeof(*lmm));
114 /* XXX LOV_MAGIC etc check? */
116 if (lmm->lmm_object_id == 0) {
117 CERROR("lov_mds_md: zero lmm_object_id\n");
122 lsm_size = lov_stripe_md_size(1);
126 if (*lsmp != NULL && lmm == NULL) {
127 OBD_FREE(*lsmp, lsm_size);
133 OBD_ALLOC(*lsmp, lsm_size);
136 loi_init((*lsmp)->lsm_oinfo);
140 /* XXX zero *lsmp? */
141 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
142 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
143 LASSERT((*lsmp)->lsm_object_id);
144 LASSERT((*lsmp)->lsm_object_gr);
147 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
152 static int osc_getattr_interpret(struct ptlrpc_request *req,
153 struct osc_getattr_async_args *aa, int rc)
155 struct ost_body *body;
161 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
163 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
164 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
166 /* This should really be sent by the OST */
167 aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
168 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
170 CERROR("can't unpack ost_body\n");
172 aa->aa_oa->o_valid = 0;
178 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
179 struct lov_stripe_md *md,
180 struct ptlrpc_request_set *set)
182 struct ptlrpc_request *request;
183 struct ost_body *body;
184 int size = sizeof(*body);
185 struct osc_getattr_async_args *aa;
188 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
189 OST_GETATTR, 1, &size, NULL);
193 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
194 memcpy(&body->oa, oa, sizeof(*oa));
196 request->rq_replen = lustre_msg_size(1, &size);
197 request->rq_interpret_reply = osc_getattr_interpret;
199 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
200 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
203 ptlrpc_set_add_req (set, request);
207 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
208 struct lov_stripe_md *md)
210 struct ptlrpc_request *request;
211 struct ost_body *body;
212 int rc, size = sizeof(*body);
215 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
216 OST_GETATTR, 1, &size, NULL);
220 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
221 memcpy(&body->oa, oa, sizeof(*oa));
223 request->rq_replen = lustre_msg_size(1, &size);
225 rc = ptlrpc_queue_wait(request);
227 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
231 body = lustre_swab_repbuf(request, 0, sizeof (*body),
232 lustre_swab_ost_body);
234 CERROR ("can't unpack ost_body\n");
235 GOTO (out, rc = -EPROTO);
238 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
239 memcpy(oa, &body->oa, sizeof(*oa));
241 /* This should really be sent by the OST */
242 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
243 oa->o_valid |= OBD_MD_FLBLKSZ;
247 ptlrpc_req_finished(request);
251 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
252 struct lov_stripe_md *md, struct obd_trans_info *oti)
254 struct ptlrpc_request *request;
255 struct ost_body *body;
256 int rc, size = sizeof(*body);
259 LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
261 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
262 OST_SETATTR, 1, &size, NULL);
266 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
267 memcpy(&body->oa, oa, sizeof(*oa));
269 request->rq_replen = lustre_msg_size(1, &size);
271 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
272 ptlrpcd_add_req(request);
275 rc = ptlrpc_queue_wait(request);
279 body = lustre_swab_repbuf(request, 0, sizeof(*body),
280 lustre_swab_ost_body);
282 GOTO(out, rc = -EPROTO);
284 memcpy(oa, &body->oa, sizeof(*oa));
288 ptlrpc_req_finished(request);
292 int osc_real_create(struct obd_export *exp, struct obdo *oa,
293 struct lov_stripe_md **ea, struct obd_trans_info *oti)
295 struct ptlrpc_request *request;
296 struct ost_body *body;
297 struct lov_stripe_md *lsm;
298 int rc, size = sizeof(*body);
306 rc = obd_alloc_memmd(exp, &lsm);
311 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
312 OST_CREATE, 1, &size, NULL);
314 GOTO(out, rc = -ENOMEM);
316 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
317 memcpy(&body->oa, oa, sizeof(body->oa));
319 request->rq_replen = lustre_msg_size(1, &size);
320 if (oa->o_valid & OBD_MD_FLINLINE) {
321 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
322 oa->o_flags == OBD_FL_DELORPHAN);
323 DEBUG_REQ(D_HA, request,
324 "delorphan from OST integration");
325 /* Don't resend the delorphan request */
326 request->rq_no_resend = request->rq_no_delay = 1;
329 rc = ptlrpc_queue_wait(request);
333 body = lustre_swab_repbuf(request, 0, sizeof(*body),
334 lustre_swab_ost_body);
336 CERROR ("can't unpack ost_body\n");
337 GOTO (out_req, rc = -EPROTO);
340 memcpy(oa, &body->oa, sizeof(*oa));
342 /* This should really be sent by the OST */
343 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
344 oa->o_valid |= OBD_MD_FLBLKSZ;
346 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
347 * have valid lsm_oinfo data structs, so don't go touching that.
348 * This needs to be fixed in a big way.
350 lsm->lsm_object_id = oa->o_id;
351 lsm->lsm_object_gr = oa->o_gr;
355 oti->oti_transno = request->rq_repmsg->transno;
357 if (oa->o_valid & OBD_MD_FLCOOKIE) {
358 if (!oti->oti_logcookies)
359 oti_alloc_cookies(oti, 1);
360 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
361 sizeof(oti->oti_onecookie));
365 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
368 ptlrpc_req_finished(request);
371 obd_free_memmd(exp, &lsm);
375 static int osc_punch(struct obd_export *exp, struct obdo *oa,
376 struct lov_stripe_md *md, obd_size start,
377 obd_size end, struct obd_trans_info *oti)
379 struct ptlrpc_request *request;
380 struct ost_body *body;
381 int rc, size = sizeof(*body);
389 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
390 OST_PUNCH, 1, &size, NULL);
394 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
395 memcpy(&body->oa, oa, sizeof(*oa));
397 /* overload the size and blocks fields in the oa with start/end */
398 body->oa.o_size = start;
399 body->oa.o_blocks = end;
400 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
402 request->rq_replen = lustre_msg_size(1, &size);
404 rc = ptlrpc_queue_wait(request);
408 body = lustre_swab_repbuf (request, 0, sizeof (*body),
409 lustre_swab_ost_body);
411 CERROR ("can't unpack ost_body\n");
412 GOTO (out, rc = -EPROTO);
415 memcpy(oa, &body->oa, sizeof(*oa));
419 ptlrpc_req_finished(request);
423 static int osc_sync(struct obd_export *exp, struct obdo *oa,
424 struct lov_stripe_md *md, obd_size start,
427 struct ptlrpc_request *request;
428 struct ost_body *body;
429 int rc, size = sizeof(*body);
437 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
438 OST_SYNC, 1, &size, NULL);
442 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
443 memcpy(&body->oa, oa, sizeof(*oa));
445 /* overload the size and blocks fields in the oa with start/end */
446 body->oa.o_size = start;
447 body->oa.o_blocks = end;
448 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
450 request->rq_replen = lustre_msg_size(1, &size);
452 rc = ptlrpc_queue_wait(request);
456 body = lustre_swab_repbuf(request, 0, sizeof(*body),
457 lustre_swab_ost_body);
459 CERROR ("can't unpack ost_body\n");
460 GOTO (out, rc = -EPROTO);
463 memcpy(oa, &body->oa, sizeof(*oa));
467 ptlrpc_req_finished(request);
471 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
472 struct lov_stripe_md *ea, struct obd_trans_info *oti)
474 struct ptlrpc_request *request;
475 struct ost_body *body;
476 int rc, size = sizeof(*body);
484 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
485 OST_DESTROY, 1, &size, NULL);
489 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
491 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
492 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
493 sizeof(*oti->oti_logcookies));
494 oti->oti_logcookies++;
497 memcpy(&body->oa, oa, sizeof(*oa));
498 request->rq_replen = lustre_msg_size(1, &size);
500 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
501 ptlrpcd_add_req(request);
504 rc = ptlrpc_queue_wait(request);
510 ptlrpc_req_finished(request);
514 body = lustre_swab_repbuf(request, 0, sizeof(*body),
515 lustre_swab_ost_body);
517 CERROR ("Can't unpack body\n");
518 ptlrpc_req_finished(request);
522 memcpy(oa, &body->oa, sizeof(*oa));
523 ptlrpc_req_finished(request);
528 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
531 obd_valid bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
533 LASSERT(!(oa->o_valid & bits));
536 spin_lock(&cli->cl_loi_list_lock);
537 oa->o_dirty = cli->cl_dirty;
538 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
539 oa->o_grant = cli->cl_avail_grant;
540 oa->o_dropped = cli->cl_lost_grant;
541 cli->cl_lost_grant = 0;
542 spin_unlock(&cli->cl_loi_list_lock);
543 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
544 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
547 /* caller must hold loi_list_lock */
548 static void osc_consume_write_grant(struct client_obd *cli,
549 struct osc_async_page *oap)
551 cli->cl_dirty += PAGE_SIZE;
552 cli->cl_avail_grant -= PAGE_SIZE;
553 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
554 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
555 LASSERT(cli->cl_avail_grant >= 0);
558 static unsigned long rpcs_in_flight(struct client_obd *cli)
560 return cli->cl_r_in_flight + cli->cl_w_in_flight;
563 /* caller must hold loi_list_lock */
564 void osc_wake_cache_waiters(struct client_obd *cli)
566 struct list_head *l, *tmp;
567 struct osc_cache_waiter *ocw;
569 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
570 /* if we can't dirty more, we must wait until some is written */
571 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
572 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
573 cli->cl_dirty, cli->cl_dirty_max);
577 /* if still dirty cache but no grant wait for pending RPCs that
578 * may yet return us some grant before doing sync writes */
579 if (cli->cl_w_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
580 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
581 cli->cl_w_in_flight);
583 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
584 list_del_init(&ocw->ocw_entry);
585 if (cli->cl_avail_grant < PAGE_SIZE) {
586 /* no more RPCs in flight to return grant, do sync IO */
587 ocw->ocw_rc = -EDQUOT;
588 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
590 osc_consume_write_grant(cli, ocw->ocw_oap);
593 wake_up(&ocw->ocw_waitq);
599 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
601 spin_lock(&cli->cl_loi_list_lock);
602 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
603 cli->cl_avail_grant += body->oa.o_grant;
604 /* waiters are woken in brw_interpret_oap */
605 spin_unlock(&cli->cl_loi_list_lock);
608 /* We assume that the reason this OSC got a short read is because it read
609 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
610 * via the LOV, and it _knows_ it's reading inside the file, it's just that
611 * this stripe never got written at or beyond this stripe offset yet. */
612 static void handle_short_read(int nob_read, obd_count page_count,
613 struct brw_page *pga)
617 /* skip bytes read OK */
618 while (nob_read > 0) {
619 LASSERT (page_count > 0);
621 if (pga->count > nob_read) {
622 /* EOF inside this page */
623 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
624 memset(ptr + nob_read, 0, pga->count - nob_read);
631 nob_read -= pga->count;
636 /* zero remaining pages */
637 while (page_count-- > 0) {
638 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
639 memset(ptr, 0, pga->count);
645 static int check_write_rcs(struct ptlrpc_request *request,
646 int requested_nob, int niocount,
647 obd_count page_count, struct brw_page *pga)
651 /* return error if any niobuf was in error */
652 remote_rcs = lustre_swab_repbuf(request, 1,
653 sizeof(*remote_rcs) * niocount, NULL);
654 if (remote_rcs == NULL) {
655 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
658 if (lustre_msg_swabbed(request->rq_repmsg))
659 for (i = 0; i < niocount; i++)
660 __swab32s((__u32 *)&remote_rcs[i]);
662 for (i = 0; i < niocount; i++) {
663 if (remote_rcs[i] < 0)
664 return(remote_rcs[i]);
666 if (remote_rcs[i] != 0) {
667 CERROR("rc[%d] invalid (%d) req %p\n",
668 i, remote_rcs[i], request);
673 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
674 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
675 requested_nob, request->rq_bulk->bd_nob_transferred);
682 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
684 if (p1->flag != p2->flag) {
685 unsigned mask = ~OBD_BRW_FROM_GRANT;
687 /* warn if we try to combine flags that we don't know to be
689 if ((p1->flag & mask) != (p2->flag & mask))
690 CERROR("is it ok to have flags 0x%x and 0x%x in the "
691 "same brw?\n", p1->flag, p2->flag);
695 return (p1->disk_offset + p1->count == p2->disk_offset);
699 static obd_count cksum_pages(int nob, obd_count page_count,
700 struct brw_page *pga)
706 LASSERT (page_count > 0);
709 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
710 pga->count > nob ? nob : pga->count);
722 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
723 struct lov_stripe_md *lsm, obd_count page_count,
724 struct brw_page *pga, int *requested_nobp,
725 int *niocountp, struct ptlrpc_request **reqp)
727 struct ptlrpc_request *req;
728 struct ptlrpc_bulk_desc *desc;
729 struct client_obd *cli = &imp->imp_obd->u.cli;
730 struct ost_body *body;
731 struct obd_ioobj *ioobj;
732 struct niobuf_remote *niobuf;
740 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
742 for (niocount = i = 1; i < page_count; i++)
743 if (!can_merge_pages(&pga[i - 1], &pga[i]))
746 size[0] = sizeof(*body);
747 size[1] = sizeof(*ioobj);
748 size[2] = niocount * sizeof(*niobuf);
750 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, opc, 3, size, NULL);
754 if (opc == OST_WRITE)
755 desc = ptlrpc_prep_bulk_imp (req, page_count,
756 BULK_GET_SOURCE, OST_BULK_PORTAL);
758 desc = ptlrpc_prep_bulk_imp (req, page_count,
759 BULK_PUT_SINK, OST_BULK_PORTAL);
761 GOTO(out, rc = -ENOMEM);
762 /* NB request now owns desc and will free it when it gets freed */
764 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
765 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
766 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
768 memcpy(&body->oa, oa, sizeof(*oa));
770 obdo_to_ioobj(oa, ioobj);
771 ioobj->ioo_bufcnt = niocount;
773 LASSERT (page_count > 0);
775 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
776 struct brw_page *pg = &pga[i];
777 struct brw_page *pg_prev = pg - 1;
779 LASSERT(pg->count > 0);
780 LASSERTF((pg->page_offset & ~PAGE_MASK)+ pg->count <= PAGE_SIZE,
781 "i: %d pg: %p pg_off: "LPU64", count: %u\n", i, pg,
782 pg->page_offset, pg->count);
783 LASSERTF(i == 0 || pg->disk_offset > pg_prev->disk_offset,
784 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
785 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
787 pg->pg, pg->pg->private, pg->pg->index, pg->disk_offset,
788 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
789 pg_prev->disk_offset);
791 ptlrpc_prep_bulk_page(desc, pg->pg,
792 pg->page_offset & ~PAGE_MASK, pg->count);
793 requested_nob += pg->count;
795 if (i > 0 && can_merge_pages(pg_prev, pg)) {
797 niobuf->len += pg->count;
799 niobuf->offset = pg->disk_offset;
800 niobuf->len = pg->count;
801 niobuf->flags = pg->flag;
805 LASSERT((void *)(niobuf - niocount) ==
806 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
807 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
809 /* size[0] still sizeof (*body) */
810 if (opc == OST_WRITE) {
812 body->oa.o_valid |= OBD_MD_FLCKSUM;
813 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
815 /* 1 RC per niobuf */
816 size[1] = sizeof(__u32) * niocount;
817 req->rq_replen = lustre_msg_size(2, size);
819 /* 1 RC for the whole I/O */
820 req->rq_replen = lustre_msg_size(1, size);
823 *niocountp = niocount;
824 *requested_nobp = requested_nob;
829 ptlrpc_req_finished (req);
833 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
834 int requested_nob, int niocount,
835 obd_count page_count, struct brw_page *pga,
838 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
839 struct ost_body *body;
845 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
847 CERROR ("Can't unpack body\n");
851 osc_update_grant(cli, body);
852 memcpy(oa, &body->oa, sizeof(*oa));
854 if (req->rq_reqmsg->opc == OST_WRITE) {
856 CERROR ("Unexpected +ve rc %d\n", rc);
859 LASSERT (req->rq_bulk->bd_nob == requested_nob);
861 RETURN(check_write_rcs(req, requested_nob, niocount,
865 if (rc > requested_nob) {
866 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
870 if (rc != req->rq_bulk->bd_nob_transferred) {
871 CERROR ("Unexpected rc %d (%d transferred)\n",
872 rc, req->rq_bulk->bd_nob_transferred);
876 if (rc < requested_nob)
877 handle_short_read(rc, page_count, pga);
880 if (oa->o_valid & OBD_MD_FLCKSUM) {
881 const struct ptlrpc_peer *peer =
882 &req->rq_import->imp_connection->c_peer;
883 static int cksum_counter;
884 obd_count server_cksum = oa->o_cksum;
885 obd_count cksum = cksum_pages(rc, page_count, pga);
886 char str[PTL_NALFMT_SIZE];
888 ptlrpc_peernid2str(peer, str);
891 if (server_cksum != cksum) {
892 CERROR("Bad checksum: server %x, client %x, server NID "
893 LPX64" (%s)\n", server_cksum, cksum,
894 peer->peer_id.nid, str);
897 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
898 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
899 cksum_counter, peer->peer_id.nid, str, cksum);
902 static int cksum_missed;
905 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
906 CERROR("Request checksum %u from "LPX64", no reply\n",
908 req->rq_import->imp_connection->c_peer.peer_id.nid);
914 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
915 struct lov_stripe_md *lsm,
916 obd_count page_count, struct brw_page *pga)
920 struct ptlrpc_request *request;
925 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
926 page_count, pga, &requested_nob, &niocount,
931 rc = ptlrpc_queue_wait(request);
933 if (rc == -ETIMEDOUT && request->rq_resend) {
934 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
935 ptlrpc_req_finished(request);
939 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
940 page_count, pga, rc);
942 ptlrpc_req_finished(request);
946 static int brw_interpret(struct ptlrpc_request *request,
947 struct osc_brw_async_args *aa, int rc)
949 struct obdo *oa = aa->aa_oa;
950 int requested_nob = aa->aa_requested_nob;
951 int niocount = aa->aa_nio_count;
952 obd_count page_count = aa->aa_page_count;
953 struct brw_page *pga = aa->aa_pga;
956 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
957 page_count, pga, rc);
961 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
962 struct lov_stripe_md *lsm, obd_count page_count,
963 struct brw_page *pga, struct ptlrpc_request_set *set)
965 struct ptlrpc_request *request;
968 struct osc_brw_async_args *aa;
972 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
973 page_count, pga, &requested_nob, &nio_count,
976 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
977 aa = (struct osc_brw_async_args *)&request->rq_async_args;
979 aa->aa_requested_nob = requested_nob;
980 aa->aa_nio_count = nio_count;
981 aa->aa_page_count = page_count;
984 request->rq_interpret_reply = brw_interpret;
985 ptlrpc_set_add_req(set, request);
991 #define min_t(type,x,y) \
992 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
996 * ugh, we want disk allocation on the target to happen in offset order. we'll
997 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
998 * fine for our small page arrays and doesn't require allocation. its an
999 * insertion sort that swaps elements that are strides apart, shrinking the
1000 * stride down until its '1' and the array is sorted.
1002 static void sort_brw_pages(struct brw_page *array, int num)
1005 struct brw_page tmp;
1009 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1014 for (i = stride ; i < num ; i++) {
1017 while (j >= stride && array[j - stride].disk_offset >
1019 array[j] = array[j - stride];
1024 } while (stride > 1);
1027 /* make sure we the regions we're passing to elan don't violate its '4
1028 * fragments' constraint. portal headers are a fragment, all full
1029 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1030 * counts as a fragment. I think. see bug 934. */
1031 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1034 int saw_whole_frag = 0;
1037 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1038 if (pg->count == PAGE_SIZE) {
1039 if (!saw_whole_frag) {
1050 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1051 struct lov_stripe_md *lsm, obd_count page_count,
1052 struct brw_page *pga, struct obd_trans_info *oti)
1056 if (cmd == OBD_BRW_CHECK) {
1057 /* The caller just wants to know if there's a chance that this
1058 * I/O can succeed */
1059 struct obd_import *imp = class_exp2cliimp(exp);
1061 if (imp == NULL || imp->imp_invalid)
1066 while (page_count) {
1067 obd_count pages_per_brw;
1070 if (page_count > PTLRPC_MAX_BRW_PAGES)
1071 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1073 pages_per_brw = page_count;
1075 sort_brw_pages(pga, pages_per_brw);
1076 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1078 rc = osc_brw_internal(cmd, exp, oa, lsm, pages_per_brw, pga);
1083 page_count -= pages_per_brw;
1084 pga += pages_per_brw;
1089 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1090 struct lov_stripe_md *lsm, obd_count page_count,
1091 struct brw_page *pga, struct ptlrpc_request_set *set,
1092 struct obd_trans_info *oti)
1096 if (cmd == OBD_BRW_CHECK) {
1097 /* The caller just wants to know if there's a chance that this
1098 * I/O can succeed */
1099 struct obd_import *imp = class_exp2cliimp(exp);
1101 if (imp == NULL || imp->imp_invalid)
1106 while (page_count) {
1107 obd_count pages_per_brw;
1110 if (page_count > PTLRPC_MAX_BRW_PAGES)
1111 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1113 pages_per_brw = page_count;
1115 sort_brw_pages(pga, pages_per_brw);
1116 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1118 rc = async_internal(cmd, exp, oa, lsm, pages_per_brw, pga, set);
1123 page_count -= pages_per_brw;
1124 pga += pages_per_brw;
1129 static void osc_check_rpcs(struct client_obd *cli);
1130 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1132 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1133 static void lop_update_pending(struct client_obd *cli,
1134 struct loi_oap_pages *lop, int cmd, int delta);
1136 /* this is called when a sync waiter receives an interruption. Its job is to
1137 * get the caller woken as soon as possible. If its page hasn't been put in an
1138 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1139 * desiring interruption which will forcefully complete the rpc once the rpc
1141 static void osc_occ_interrupted(struct oig_callback_context *occ)
1143 struct osc_async_page *oap;
1144 struct loi_oap_pages *lop;
1145 struct lov_oinfo *loi;
1148 /* XXX member_of() */
1149 oap = list_entry(occ, struct osc_async_page, oap_occ);
1151 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1153 oap->oap_interrupted = 1;
1155 /* ok, it's been put in an rpc. */
1156 if (oap->oap_request != NULL) {
1157 ptlrpc_mark_interrupted(oap->oap_request);
1158 ptlrpcd_wake(oap->oap_request);
1162 /* we don't get interruption callbacks until osc_trigger_sync_io()
1163 * has been called and put the sync oaps in the pending/urgent lists.*/
1164 if (!list_empty(&oap->oap_pending_item)) {
1165 list_del_init(&oap->oap_pending_item);
1166 if (oap->oap_async_flags & ASYNC_URGENT)
1167 list_del_init(&oap->oap_urgent_item);
1170 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1171 &loi->loi_write_lop : &loi->loi_read_lop;
1172 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1173 loi_list_maint(oap->oap_cli, oap->oap_loi);
1175 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1176 oap->oap_oig = NULL;
1180 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1183 /* this must be called holding the loi list lock to give coverage to exit_cache,
1184 * async_flag maintenance, and oap_request */
1185 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1186 struct osc_async_page *oap, int sent, int rc)
1188 osc_exit_cache(cli, oap, sent);
1189 oap->oap_async_flags = 0;
1190 oap->oap_interrupted = 0;
1192 if (oap->oap_request != NULL) {
1193 ptlrpc_req_finished(oap->oap_request);
1194 oap->oap_request = NULL;
1197 if (rc == 0 && oa != NULL)
1198 oap->oap_loi->loi_blocks = oa->o_blocks;
1201 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1202 oap->oap_oig = NULL;
1207 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1211 static int brw_interpret_oap(struct ptlrpc_request *request,
1212 struct osc_brw_async_args *aa, int rc)
1214 struct osc_async_page *oap;
1215 struct client_obd *cli;
1216 struct list_head *pos, *n;
1220 do_gettimeofday(&now);
1221 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1222 aa->aa_nio_count, aa->aa_page_count,
1225 CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
1228 /* in failout recovery we ignore writeback failure and want
1229 * to just tell llite to unlock the page and continue */
1230 if (request->rq_reqmsg->opc == OST_WRITE &&
1231 (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
1232 CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
1234 cli->cl_import ? cli->cl_import->imp_invalid : -1);
1238 spin_lock(&cli->cl_loi_list_lock);
1240 if (request->rq_reqmsg->opc == OST_WRITE)
1241 lprocfs_stime_record(&cli->cl_write_stime, &now,
1242 &request->rq_rpcd_start);
1244 lprocfs_stime_record(&cli->cl_read_stime, &now,
1245 &request->rq_rpcd_start);
1249 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1250 * is called so we know whether to go to sync BRWs or wait for more
1251 * RPCs to complete */
1252 if (request->rq_reqmsg->opc == OST_WRITE)
1253 cli->cl_w_in_flight--;
1255 cli->cl_r_in_flight--;
1257 /* the caller may re-use the oap after the completion call so
1258 * we need to clean it up a little */
1259 list_for_each_safe(pos, n, &aa->aa_oaps) {
1260 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1262 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1263 //oap->oap_page, oap->oap_page->index, oap);
1265 list_del_init(&oap->oap_rpc_item);
1266 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
1269 osc_wake_cache_waiters(cli);
1270 osc_check_rpcs(cli);
1271 spin_unlock(&cli->cl_loi_list_lock);
1273 obdo_free(aa->aa_oa);
1274 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1279 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1280 struct list_head *rpc_list,
1281 int page_count, int cmd)
1283 struct ptlrpc_request *req;
1284 struct brw_page *pga = NULL;
1285 int requested_nob, nio_count;
1286 struct osc_brw_async_args *aa;
1287 struct obdo *oa = NULL;
1288 struct obd_async_page_ops *ops = NULL;
1289 void *caller_data = NULL;
1290 struct list_head *pos;
1293 LASSERT(!list_empty(rpc_list));
1295 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1297 RETURN(ERR_PTR(-ENOMEM));
1301 GOTO(out, req = ERR_PTR(-ENOMEM));
1304 list_for_each(pos, rpc_list) {
1305 struct osc_async_page *oap;
1307 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1309 ops = oap->oap_caller_ops;
1310 caller_data = oap->oap_caller_data;
1312 pga[i].disk_offset = oap->oap_obj_off + oap->oap_page_off;
1313 pga[i].page_offset = pga[i].disk_offset;
1314 pga[i].pg = oap->oap_page;
1315 pga[i].count = oap->oap_count;
1316 pga[i].flag = oap->oap_brw_flags;
1317 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1318 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1322 /* always get the data for the obdo for the rpc */
1323 LASSERT(ops != NULL);
1324 ops->ap_fill_obdo(caller_data, cmd, oa);
1326 sort_brw_pages(pga, page_count);
1327 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1328 pga, &requested_nob, &nio_count, &req);
1330 CERROR("prep_req failed: %d\n", rc);
1331 GOTO(out, req = ERR_PTR(rc));
1334 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1335 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1337 aa->aa_requested_nob = requested_nob;
1338 aa->aa_nio_count = nio_count;
1339 aa->aa_page_count = page_count;
1348 OBD_FREE(pga, sizeof(*pga) * page_count);
1353 static void lop_update_pending(struct client_obd *cli,
1354 struct loi_oap_pages *lop, int cmd, int delta)
1356 lop->lop_num_pending += delta;
1357 if (cmd == OBD_BRW_WRITE)
1358 cli->cl_pending_w_pages += delta;
1360 cli->cl_pending_r_pages += delta;
1363 /* the loi lock is held across this function but it's allowed to release
1364 * and reacquire it during its work */
1365 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1366 int cmd, struct loi_oap_pages *lop)
1368 struct ptlrpc_request *request;
1369 obd_count page_count = 0;
1370 struct list_head *tmp, *pos;
1371 struct osc_async_page *oap = NULL;
1372 struct osc_brw_async_args *aa;
1373 struct obd_async_page_ops *ops;
1374 LIST_HEAD(rpc_list);
1377 /* first we find the pages we're allowed to work with */
1378 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1379 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1380 ops = oap->oap_caller_ops;
1382 LASSERT(oap->oap_magic == OAP_MAGIC);
1384 /* in llite being 'ready' equates to the page being locked
1385 * until completion unlocks it. commit_write submits a page
1386 * as not ready because its unlock will happen unconditionally
1387 * as the call returns. if we race with commit_write giving
1388 * us that page we dont' want to create a hole in the page
1389 * stream, so we stop and leave the rpc to be fired by
1390 * another dirtier or kupdated interval (the not ready page
1391 * will still be on the dirty list). we could call in
1392 * at the end of ll_file_write to process the queue again. */
1393 if (!(oap->oap_async_flags & ASYNC_READY)) {
1394 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1396 CDEBUG(D_INODE, "oap %p page %p returned %d "
1397 "instead of ready\n", oap,
1401 /* llite is telling us that the page is still
1402 * in commit_write and that we should try
1403 * and put it in an rpc again later. we
1404 * break out of the loop so we don't create
1405 * a hole in the sequence of pages in the rpc
1410 /* the io isn't needed.. tell the checks
1411 * below to complete the rpc with EINTR */
1412 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1413 oap->oap_count = -EINTR;
1416 oap->oap_async_flags |= ASYNC_READY;
1419 LASSERTF(0, "oap %p page %p returned %d "
1420 "from make_ready\n", oap,
1428 /* take the page out of our book-keeping */
1429 list_del_init(&oap->oap_pending_item);
1430 lop_update_pending(cli, lop, cmd, -1);
1431 list_del_init(&oap->oap_urgent_item);
1433 /* ask the caller for the size of the io as the rpc leaves. */
1434 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1436 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1437 if (oap->oap_count <= 0) {
1438 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1440 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
1444 /* now put the page back in our accounting */
1445 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1446 if (++page_count >= cli->cl_max_pages_per_rpc)
1450 osc_wake_cache_waiters(cli);
1452 if (page_count == 0)
1455 loi_list_maint(cli, loi);
1456 spin_unlock(&cli->cl_loi_list_lock);
1458 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1459 if (IS_ERR(request)) {
1460 /* this should happen rarely and is pretty bad, it makes the
1461 * pending list not follow the dirty order */
1462 spin_lock(&cli->cl_loi_list_lock);
1463 list_for_each_safe(pos, tmp, &rpc_list) {
1464 oap = list_entry(pos, struct osc_async_page,
1466 list_del_init(&oap->oap_rpc_item);
1468 /* queued sync pages can be torn down while the pages
1469 * were between the pending list and the rpc */
1470 if (oap->oap_interrupted) {
1471 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1472 osc_ap_completion(cli, NULL, oap, 0,
1477 /* put the page back in the loi/lop lists */
1478 list_add_tail(&oap->oap_pending_item,
1480 lop_update_pending(cli, lop, cmd, 1);
1481 if (oap->oap_async_flags & ASYNC_URGENT)
1482 list_add(&oap->oap_urgent_item,
1485 loi_list_maint(cli, loi);
1486 RETURN(PTR_ERR(request));
1489 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1490 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1491 INIT_LIST_HEAD(&aa->aa_oaps);
1492 list_splice(&rpc_list, &aa->aa_oaps);
1493 INIT_LIST_HEAD(&rpc_list);
1496 if (cmd == OBD_BRW_READ) {
1497 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1498 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1500 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1501 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1502 cli->cl_w_in_flight);
1506 spin_lock(&cli->cl_loi_list_lock);
1508 if (cmd == OBD_BRW_READ)
1509 cli->cl_r_in_flight++;
1511 cli->cl_w_in_flight++;
1512 /* queued sync pages can be torn down while the pages
1513 * were between the pending list and the rpc */
1514 list_for_each(pos, &aa->aa_oaps) {
1515 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1516 if (oap->oap_interrupted) {
1517 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1519 ptlrpc_mark_interrupted(request);
1524 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %dr/%dw in flight\n",
1525 request, page_count, aa, cli->cl_r_in_flight,
1526 cli->cl_w_in_flight);
1528 oap->oap_request = ptlrpc_request_addref(request);
1529 request->rq_interpret_reply = brw_interpret_oap;
1530 ptlrpcd_add_req(request);
1534 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1540 if (lop->lop_num_pending == 0)
1543 /* if we have an invalid import we want to drain the queued pages
1544 * by forcing them through rpcs that immediately fail and complete
1545 * the pages. recovery relies on this to empty the queued pages
1546 * before canceling the locks and evicting down the llite pages */
1547 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1550 /* stream rpcs in queue order as long as as there is an urgent page
1551 * queued. this is our cheap solution for good batching in the case
1552 * where writepage marks some random page in the middle of the file as
1553 * urgent because of, say, memory pressure */
1554 if (!list_empty(&lop->lop_urgent))
1557 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1558 optimal = cli->cl_max_pages_per_rpc;
1559 if (cmd == OBD_BRW_WRITE) {
1560 /* trigger a write rpc stream as long as there are dirtiers
1561 * waiting for space. as they're waiting, they're not going to
1562 * create more pages to coallesce with what's waiting.. */
1563 if (!list_empty(&cli->cl_cache_waiters))
1566 /* *2 to avoid triggering rpcs that would want to include pages
1567 * that are being queued but which can't be made ready until
1568 * the queuer finishes with the page. this is a wart for
1569 * llite::commit_write() */
1572 if (lop->lop_num_pending >= optimal)
1578 static void on_list(struct list_head *item, struct list_head *list,
1581 if (list_empty(item) && should_be_on)
1582 list_add_tail(item, list);
1583 else if (!list_empty(item) && !should_be_on)
1584 list_del_init(item);
1587 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1588 * can find pages to build into rpcs quickly */
1589 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1591 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1592 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1593 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1595 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1596 loi->loi_write_lop.lop_num_pending);
1598 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1599 loi->loi_read_lop.lop_num_pending);
1602 #define LOI_DEBUG(LOI, STR, args...) \
1603 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1604 !list_empty(&(LOI)->loi_cli_item), \
1605 (LOI)->loi_write_lop.lop_num_pending, \
1606 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1607 (LOI)->loi_read_lop.lop_num_pending, \
1608 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1611 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1614 /* first return all objects which we already know to have
1615 * pages ready to be stuffed into rpcs */
1616 if (!list_empty(&cli->cl_loi_ready_list))
1617 RETURN(list_entry(cli->cl_loi_ready_list.next,
1618 struct lov_oinfo, loi_cli_item));
1620 /* then if we have cache waiters, return all objects with queued
1621 * writes. This is especially important when many small files
1622 * have filled up the cache and not been fired into rpcs because
1623 * they don't pass the nr_pending/object threshhold */
1624 if (!list_empty(&cli->cl_cache_waiters) &&
1625 !list_empty(&cli->cl_loi_write_list))
1626 RETURN(list_entry(cli->cl_loi_write_list.next,
1627 struct lov_oinfo, loi_write_item));
1629 /* then return all queued objects when we have an invalid import
1630 * so that they get flushed */
1631 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
1632 if (!list_empty(&cli->cl_loi_write_list))
1633 RETURN(list_entry(cli->cl_loi_write_list.next,
1634 struct lov_oinfo, loi_write_item));
1635 if (!list_empty(&cli->cl_loi_read_list))
1636 RETURN(list_entry(cli->cl_loi_read_list.next,
1637 struct lov_oinfo, loi_read_item));
1642 /* called with the loi list lock held */
1643 static void osc_check_rpcs(struct client_obd *cli)
1645 struct lov_oinfo *loi;
1646 int rc = 0, race_counter = 0;
1649 while ((loi = osc_next_loi(cli)) != NULL) {
1650 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
1652 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
1655 /* attempt some read/write balancing by alternating between
1656 * reads and writes in an object. The makes_rpc checks here
1657 * would be redundant if we were getting read/write work items
1658 * instead of objects. we don't want send_oap_rpc to drain a
1659 * partial read pending queue when we're given this object to
1660 * do io on writes while there are cache waiters */
1661 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1662 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1663 &loi->loi_write_lop);
1671 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1672 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1673 &loi->loi_read_lop);
1682 /* attempt some inter-object balancing by issueing rpcs
1683 * for each object in turn */
1684 if (!list_empty(&loi->loi_cli_item))
1685 list_del_init(&loi->loi_cli_item);
1686 if (!list_empty(&loi->loi_write_item))
1687 list_del_init(&loi->loi_write_item);
1688 if (!list_empty(&loi->loi_read_item))
1689 list_del_init(&loi->loi_read_item);
1691 loi_list_maint(cli, loi);
1693 /* send_oap_rpc fails with 0 when make_ready tells it to
1694 * back off. llite's make_ready does this when it tries
1695 * to lock a page queued for write that is already locked.
1696 * we want to try sending rpcs from many objects, but we
1697 * don't want to spin failing with 0. */
1698 if (race_counter == 10)
1704 /* we're trying to queue a page in the osc so we're subject to the
1705 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1706 * If the osc's queued pages are already at that limit, then we want to sleep
1707 * until there is space in the osc's queue for us. We also may be waiting for
1708 * write credits from the OST if there are RPCs in flight that may return some
1709 * before we fall back to sync writes.
1711 * We need this know our allocation was granted in the presence of signals */
1712 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1716 spin_lock(&cli->cl_loi_list_lock);
1717 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
1718 spin_unlock(&cli->cl_loi_list_lock);
1722 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1723 * grant or cache space. */
1724 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1725 struct osc_async_page *oap)
1727 struct osc_cache_waiter ocw;
1728 struct l_wait_info lwi = { 0 };
1729 struct timeval start, stop;
1731 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1732 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1733 cli->cl_avail_grant);
1735 if (cli->cl_dirty_max < PAGE_SIZE)
1738 /* Hopefully normal case - cache space and write credits available */
1739 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1740 cli->cl_avail_grant >= PAGE_SIZE) {
1741 /* account for ourselves */
1742 osc_consume_write_grant(cli, oap);
1746 /* Make sure that there are write rpcs in flight to wait for. This
1747 * is a little silly as this object may not have any pending but
1748 * other objects sure might. */
1749 if (cli->cl_w_in_flight) {
1750 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1751 init_waitqueue_head(&ocw.ocw_waitq);
1755 loi_list_maint(cli, loi);
1756 osc_check_rpcs(cli);
1757 spin_unlock(&cli->cl_loi_list_lock);
1759 CDEBUG(0, "sleeping for cache space\n");
1760 do_gettimeofday(&start);
1761 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1762 do_gettimeofday(&stop);
1763 spin_lock(&cli->cl_loi_list_lock);
1764 lprocfs_stime_record(&cli->cl_enter_stime, &stop, &start);
1765 if (!list_empty(&ocw.ocw_entry)) {
1766 list_del(&ocw.ocw_entry);
1775 /* the companion to enter_cache, called when an oap is no longer part of the
1776 * dirty accounting.. so writeback completes or truncate happens before writing
1777 * starts. must be called with the loi lock held. */
1778 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1783 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1788 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1789 cli->cl_dirty -= PAGE_SIZE;
1791 cli->cl_lost_grant += PAGE_SIZE;
1792 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1793 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1799 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1800 struct lov_oinfo *loi, struct page *page,
1801 obd_off offset, struct obd_async_page_ops *ops,
1802 void *data, void **res)
1804 struct osc_async_page *oap;
1807 OBD_ALLOC(oap, sizeof(*oap));
1811 oap->oap_magic = OAP_MAGIC;
1812 oap->oap_cli = &exp->exp_obd->u.cli;
1815 oap->oap_caller_ops = ops;
1816 oap->oap_caller_data = data;
1818 oap->oap_page = page;
1819 oap->oap_obj_off = offset;
1821 INIT_LIST_HEAD(&oap->oap_pending_item);
1822 INIT_LIST_HEAD(&oap->oap_urgent_item);
1823 INIT_LIST_HEAD(&oap->oap_rpc_item);
1825 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1827 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1832 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1833 struct lov_oinfo *loi, void *cookie,
1834 int cmd, obd_off off, int count,
1835 obd_flags brw_flags, enum async_flags async_flags)
1837 struct client_obd *cli = &exp->exp_obd->u.cli;
1838 struct osc_async_page *oap;
1839 struct loi_oap_pages *lop;
1843 oap = OAP_FROM_COOKIE(cookie);
1845 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1848 if (!list_empty(&oap->oap_pending_item) ||
1849 !list_empty(&oap->oap_urgent_item) ||
1850 !list_empty(&oap->oap_rpc_item))
1854 loi = &lsm->lsm_oinfo[0];
1856 spin_lock(&cli->cl_loi_list_lock);
1859 oap->oap_async_flags = async_flags;
1860 oap->oap_page_off = off;
1861 oap->oap_count = count;
1862 oap->oap_brw_flags = brw_flags;
1864 if (cmd == OBD_BRW_WRITE) {
1865 rc = osc_enter_cache(cli, loi, oap);
1867 spin_unlock(&cli->cl_loi_list_lock);
1870 lop = &loi->loi_write_lop;
1872 lop = &loi->loi_read_lop;
1875 if (oap->oap_async_flags & ASYNC_URGENT)
1876 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1877 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1878 lop_update_pending(cli, lop, cmd, 1);
1880 loi_list_maint(cli, loi);
1882 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1885 osc_check_rpcs(cli);
1886 spin_unlock(&cli->cl_loi_list_lock);
1891 /* aka (~was & now & flag), but this is more clear :) */
1892 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1894 static int osc_set_async_flags(struct obd_export *exp,
1895 struct lov_stripe_md *lsm,
1896 struct lov_oinfo *loi, void *cookie,
1897 obd_flags async_flags)
1899 struct client_obd *cli = &exp->exp_obd->u.cli;
1900 struct loi_oap_pages *lop;
1901 struct osc_async_page *oap;
1905 oap = OAP_FROM_COOKIE(cookie);
1907 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1911 loi = &lsm->lsm_oinfo[0];
1913 if (oap->oap_cmd == OBD_BRW_WRITE) {
1914 lop = &loi->loi_write_lop;
1916 lop = &loi->loi_read_lop;
1919 spin_lock(&cli->cl_loi_list_lock);
1921 if (list_empty(&oap->oap_pending_item))
1922 GOTO(out, rc = -EINVAL);
1924 if ((oap->oap_async_flags & async_flags) == async_flags)
1927 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1928 oap->oap_async_flags |= ASYNC_READY;
1930 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1931 if (list_empty(&oap->oap_rpc_item)) {
1932 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1933 loi_list_maint(cli, loi);
1937 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1938 oap->oap_async_flags);
1940 osc_check_rpcs(cli);
1941 spin_unlock(&cli->cl_loi_list_lock);
1945 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1946 struct lov_oinfo *loi,
1947 struct obd_io_group *oig, void *cookie,
1948 int cmd, obd_off off, int count,
1949 obd_flags brw_flags,
1950 obd_flags async_flags)
1952 struct client_obd *cli = &exp->exp_obd->u.cli;
1953 struct osc_async_page *oap;
1954 struct loi_oap_pages *lop;
1957 oap = OAP_FROM_COOKIE(cookie);
1959 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1962 if (!list_empty(&oap->oap_pending_item) ||
1963 !list_empty(&oap->oap_urgent_item) ||
1964 !list_empty(&oap->oap_rpc_item))
1968 loi = &lsm->lsm_oinfo[0];
1970 spin_lock(&cli->cl_loi_list_lock);
1973 oap->oap_page_off = off;
1974 oap->oap_count = count;
1975 oap->oap_brw_flags = brw_flags;
1976 oap->oap_async_flags = async_flags;
1978 if (cmd == OBD_BRW_WRITE)
1979 lop = &loi->loi_write_lop;
1981 lop = &loi->loi_read_lop;
1983 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
1984 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
1986 oig_add_one(oig, &oap->oap_occ);
1989 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
1991 spin_unlock(&cli->cl_loi_list_lock);
1996 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
1997 struct loi_oap_pages *lop, int cmd)
1999 struct list_head *pos, *tmp;
2000 struct osc_async_page *oap;
2002 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2003 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2004 list_del(&oap->oap_pending_item);
2005 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2006 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2007 lop_update_pending(cli, lop, cmd, 1);
2009 loi_list_maint(cli, loi);
2012 static int osc_trigger_group_io(struct obd_export *exp,
2013 struct lov_stripe_md *lsm,
2014 struct lov_oinfo *loi,
2015 struct obd_io_group *oig)
2017 struct client_obd *cli = &exp->exp_obd->u.cli;
2021 loi = &lsm->lsm_oinfo[0];
2023 spin_lock(&cli->cl_loi_list_lock);
2025 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2026 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2028 osc_check_rpcs(cli);
2029 spin_unlock(&cli->cl_loi_list_lock);
2034 static int osc_teardown_async_page(struct obd_export *exp,
2035 struct lov_stripe_md *lsm,
2036 struct lov_oinfo *loi, void *cookie)
2038 struct client_obd *cli = &exp->exp_obd->u.cli;
2039 struct loi_oap_pages *lop;
2040 struct osc_async_page *oap;
2044 oap = OAP_FROM_COOKIE(cookie);
2047 loi = &lsm->lsm_oinfo[0];
2049 if (oap->oap_cmd == OBD_BRW_WRITE) {
2050 lop = &loi->loi_write_lop;
2052 lop = &loi->loi_read_lop;
2055 spin_lock(&cli->cl_loi_list_lock);
2057 if (!list_empty(&oap->oap_rpc_item))
2058 GOTO(out, rc = -EBUSY);
2060 osc_exit_cache(cli, oap, 0);
2061 osc_wake_cache_waiters(cli);
2063 if (!list_empty(&oap->oap_urgent_item)) {
2064 list_del_init(&oap->oap_urgent_item);
2065 oap->oap_async_flags &= ~ASYNC_URGENT;
2067 if (!list_empty(&oap->oap_pending_item)) {
2068 list_del_init(&oap->oap_pending_item);
2069 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2071 loi_list_maint(cli, loi);
2073 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2075 spin_unlock(&cli->cl_loi_list_lock);
2077 OBD_FREE(oap, sizeof(*oap));
2082 /* Note: caller will lock/unlock, and set uptodate on the pages */
2083 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2084 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2085 struct lov_stripe_md *lsm, obd_count page_count,
2086 struct brw_page *pga)
2088 struct ptlrpc_request *request = NULL;
2089 struct ost_body *body;
2090 struct niobuf_remote *nioptr;
2091 struct obd_ioobj *iooptr;
2092 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2096 /* XXX does not handle 'new' brw protocol */
2098 size[1] = sizeof(struct obd_ioobj);
2099 size[2] = page_count * sizeof(*nioptr);
2101 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2102 OST_SAN_READ, 3, size, NULL);
2106 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2107 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2108 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2109 sizeof(*nioptr) * page_count);
2111 memcpy(&body->oa, oa, sizeof(body->oa));
2113 obdo_to_ioobj(oa, iooptr);
2114 iooptr->ioo_bufcnt = page_count;
2116 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2117 LASSERT(PageLocked(pga[mapped].pg));
2118 LASSERT(mapped == 0 ||
2119 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2121 nioptr->offset = pga[mapped].disk_offset;
2122 nioptr->len = pga[mapped].count;
2123 nioptr->flags = pga[mapped].flag;
2126 size[1] = page_count * sizeof(*nioptr);
2127 request->rq_replen = lustre_msg_size(2, size);
2129 rc = ptlrpc_queue_wait(request);
2133 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2134 lustre_swab_ost_body);
2136 CERROR("Can't unpack body\n");
2137 GOTO(out_req, rc = -EPROTO);
2140 memcpy(oa, &body->oa, sizeof(*oa));
2142 swab = lustre_msg_swabbed(request->rq_repmsg);
2143 LASSERT_REPSWAB(request, 1);
2144 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2146 /* nioptr missing or short */
2147 GOTO(out_req, rc = -EPROTO);
2151 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2152 struct page *page = pga[mapped].pg;
2153 struct buffer_head *bh;
2157 lustre_swab_niobuf_remote (nioptr);
2159 /* got san device associated */
2160 LASSERT(exp->exp_obd != NULL);
2161 dev = exp->exp_obd->u.cli.cl_sandev;
2164 if (!nioptr->offset) {
2165 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2166 page->mapping->host->i_ino,
2168 memset(page_address(page), 0, PAGE_SIZE);
2172 if (!page->buffers) {
2173 create_empty_buffers(page, dev, PAGE_SIZE);
2176 clear_bit(BH_New, &bh->b_state);
2177 set_bit(BH_Mapped, &bh->b_state);
2178 bh->b_blocknr = (unsigned long)nioptr->offset;
2180 clear_bit(BH_Uptodate, &bh->b_state);
2182 ll_rw_block(READ, 1, &bh);
2186 /* if buffer already existed, it must be the
2187 * one we mapped before, check it */
2188 LASSERT(!test_bit(BH_New, &bh->b_state));
2189 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2190 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2192 /* wait it's io completion */
2193 if (test_bit(BH_Lock, &bh->b_state))
2196 if (!test_bit(BH_Uptodate, &bh->b_state))
2197 ll_rw_block(READ, 1, &bh);
2201 /* must do syncronous write here */
2203 if (!buffer_uptodate(bh)) {
2211 ptlrpc_req_finished(request);
2215 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2216 struct lov_stripe_md *lsm, obd_count page_count,
2217 struct brw_page *pga)
2219 struct ptlrpc_request *request = NULL;
2220 struct ost_body *body;
2221 struct niobuf_remote *nioptr;
2222 struct obd_ioobj *iooptr;
2223 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2227 size[1] = sizeof(struct obd_ioobj);
2228 size[2] = page_count * sizeof(*nioptr);
2230 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2231 OST_SAN_WRITE, 3, size, NULL);
2235 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2236 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2237 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2238 sizeof (*nioptr) * page_count);
2240 memcpy(&body->oa, oa, sizeof(body->oa));
2242 obdo_to_ioobj(oa, iooptr);
2243 iooptr->ioo_bufcnt = page_count;
2246 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2247 LASSERT(PageLocked(pga[mapped].pg));
2248 LASSERT(mapped == 0 ||
2249 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2251 nioptr->offset = pga[mapped].disk_offset;
2252 nioptr->len = pga[mapped].count;
2253 nioptr->flags = pga[mapped].flag;
2256 size[1] = page_count * sizeof(*nioptr);
2257 request->rq_replen = lustre_msg_size(2, size);
2259 rc = ptlrpc_queue_wait(request);
2263 swab = lustre_msg_swabbed (request->rq_repmsg);
2264 LASSERT_REPSWAB (request, 1);
2265 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2267 CERROR("absent/short niobuf array\n");
2268 GOTO(out_req, rc = -EPROTO);
2272 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2273 struct page *page = pga[mapped].pg;
2274 struct buffer_head *bh;
2278 lustre_swab_niobuf_remote (nioptr);
2280 /* got san device associated */
2281 LASSERT(exp->exp_obd != NULL);
2282 dev = exp->exp_obd->u.cli.cl_sandev;
2284 if (!page->buffers) {
2285 create_empty_buffers(page, dev, PAGE_SIZE);
2288 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2289 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2290 LASSERT(page->buffers->b_blocknr ==
2291 (unsigned long)nioptr->offset);
2297 /* if buffer locked, wait it's io completion */
2298 if (test_bit(BH_Lock, &bh->b_state))
2301 clear_bit(BH_New, &bh->b_state);
2302 set_bit(BH_Mapped, &bh->b_state);
2304 /* override the block nr */
2305 bh->b_blocknr = (unsigned long)nioptr->offset;
2307 /* we are about to write it, so set it
2309 * page lock should garentee no race condition here */
2310 set_bit(BH_Uptodate, &bh->b_state);
2311 set_bit(BH_Dirty, &bh->b_state);
2313 ll_rw_block(WRITE, 1, &bh);
2315 /* must do syncronous write here */
2317 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2325 ptlrpc_req_finished(request);
2329 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2330 struct lov_stripe_md *lsm, obd_count page_count,
2331 struct brw_page *pga, struct obd_trans_info *oti)
2335 while (page_count) {
2336 obd_count pages_per_brw;
2339 if (page_count > PTLRPC_MAX_BRW_PAGES)
2340 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
2342 pages_per_brw = page_count;
2344 if (cmd & OBD_BRW_WRITE)
2345 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2347 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2352 page_count -= pages_per_brw;
2353 pga += pages_per_brw;
2360 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2362 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2365 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
2369 l_lock(&lock->l_resource->lr_namespace->ns_lock);
2371 if (lock->l_ast_data && lock->l_ast_data != data) {
2372 struct inode *new_inode = data;
2373 struct inode *old_inode = lock->l_ast_data;
2374 if (!(old_inode->i_state & I_FREEING))
2375 LDLM_ERROR(lock, "inconsistent l_ast_data found");
2376 LASSERTF(old_inode->i_state & I_FREEING,
2377 "Found existing inode %p/%lu/%u state %lu in lock: "
2378 "setting data to %p/%lu/%u\n", old_inode,
2379 old_inode->i_ino, old_inode->i_generation,
2381 new_inode, new_inode->i_ino, new_inode->i_generation);
2384 lock->l_ast_data = data;
2385 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
2386 LDLM_LOCK_PUT(lock);
2389 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2390 ldlm_iterator_t replace, void *data)
2392 struct ldlm_res_id res_id = { .name = {0} };
2393 struct obd_device *obd = class_exp2obd(exp);
2395 res_id.name[0] = lsm->lsm_object_id;
2396 res_id.name[2] = lsm->lsm_object_gr;
2397 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2401 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2402 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2403 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2404 void *data, __u32 lvb_len, void *lvb_swabber,
2405 struct lustre_handle *lockh)
2407 struct obd_device *obd = exp->exp_obd;
2408 struct ldlm_res_id res_id = { .name = {0} };
2410 struct ldlm_reply *rep;
2411 struct ptlrpc_request *req = NULL;
2415 res_id.name[0] = lsm->lsm_object_id;
2416 res_id.name[2] = lsm->lsm_object_gr;
2418 /* Filesystem lock extents are extended to page boundaries so that
2419 * dealing with the page cache is a little smoother. */
2420 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2421 policy->l_extent.end |= ~PAGE_MASK;
2423 if (lsm->lsm_oinfo->loi_kms_valid == 0)
2426 /* Next, search for already existing extent locks that will cover us */
2427 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2430 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2431 /* return immediately if no credential held */
2432 ldlm_lock_decref(lockh, mode);
2436 osc_set_data_with_check(lockh, data);
2437 if (*flags & LDLM_FL_HAS_INTENT) {
2438 /* I would like to be able to ASSERT here that rss <=
2439 * kms, but I can't, for reasons which are explained in
2442 /* We already have a lock, and it's referenced */
2446 /* If we're trying to read, we also search for an existing PW lock. The
2447 * VFS and page cache already protect us locally, so lots of readers/
2448 * writers can share a single PW lock.
2450 * There are problems with conversion deadlocks, so instead of
2451 * converting a read lock to a write lock, we'll just enqueue a new
2454 * At some point we should cancel the read lock instead of making them
2455 * send us a blocking callback, but there are problems with canceling
2456 * locks out from other users right now, too. */
2458 if (mode == LCK_PR) {
2459 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2460 policy, LCK_PW, lockh);
2462 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2463 /* return immediately if no credential held */
2464 ldlm_lock_decref(lockh, LCK_PW);
2468 /* FIXME: This is not incredibly elegant, but it might
2469 * be more elegant than adding another parameter to
2470 * lock_match. I want a second opinion. */
2471 ldlm_lock_addref(lockh, LCK_PR);
2472 ldlm_lock_decref(lockh, LCK_PW);
2473 osc_set_data_with_check(lockh, data);
2477 if (mode == LCK_PW) {
2478 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2479 policy, LCK_PR, lockh);
2481 rc = ldlm_cli_convert(lockh, mode, flags);
2483 /* Update readers/writers accounting */
2484 ldlm_lock_addref(lockh, LCK_PW);
2485 ldlm_lock_decref(lockh, LCK_PR);
2486 osc_set_data_with_check(lockh, data);
2489 /* If the conversion failed, we need to drop refcount
2490 on matched lock before we get new one */
2491 /* XXX Won't it save us some efforts if we cancel PR
2492 lock here? We are going to take PW lock anyway and it
2493 will invalidate PR lock */
2494 ldlm_lock_decref(lockh, LCK_PR);
2495 if (rc != EDEADLOCK) {
2501 if (mode == LCK_PW) {
2502 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2503 policy, LCK_PR, lockh);
2505 rc = ldlm_cli_convert(lockh, mode, flags);
2507 /* Update readers/writers accounting */
2508 ldlm_lock_addref(lockh, LCK_PW);
2509 ldlm_lock_decref(lockh, LCK_PR);
2510 osc_set_data_with_check(lockh, data);
2513 /* If the conversion failed, we need to drop refcount
2514 on matched lock before we get new one */
2515 /* XXX Won't it save us some efforts if we cancel PR
2516 lock here? We are going to take PW lock anyway and it
2517 will invalidate PR lock */
2518 ldlm_lock_decref(lockh, LCK_PR);
2519 if (rc != EDEADLOCK) {
2526 if (*flags & LDLM_FL_HAS_INTENT) {
2527 int size[2] = {0, sizeof(struct ldlm_request)};
2529 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
2530 LDLM_ENQUEUE, 2, size, NULL);
2534 size[0] = sizeof(*rep);
2535 size[1] = sizeof(lvb);
2536 req->rq_replen = lustre_msg_size(2, size);
2538 rc = ldlm_cli_enqueue(exp, req, obd->obd_namespace, res_id, type,
2539 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2540 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2542 if (rc == ELDLM_LOCK_ABORTED) {
2543 /* swabbed by ldlm_cli_enqueue() */
2544 LASSERT_REPSWABBED(req, 0);
2545 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
2546 LASSERT(rep != NULL);
2547 if (rep->lock_policy_res1)
2548 rc = rep->lock_policy_res1;
2550 ptlrpc_req_finished(req);
2553 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
2554 CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
2555 lvb.lvb_size, lvb.lvb_blocks);
2556 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2557 lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
2563 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2564 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2565 int *flags, void *data, struct lustre_handle *lockh)
2567 struct ldlm_res_id res_id = { .name = {0} };
2568 struct obd_device *obd = exp->exp_obd;
2572 res_id.name[0] = lsm->lsm_object_id;
2573 res_id.name[2] = lsm->lsm_object_gr;
2575 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2577 /* Filesystem lock extents are extended to page boundaries so that
2578 * dealing with the page cache is a little smoother */
2579 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2580 policy->l_extent.end |= ~PAGE_MASK;
2582 /* Next, search for already existing extent locks that will cover us */
2583 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2584 policy, mode, lockh);
2586 // if (!(*flags & LDLM_FL_TEST_LOCK))
2587 osc_set_data_with_check(lockh, data);
2590 /* If we're trying to read, we also search for an existing PW lock. The
2591 * VFS and page cache already protect us locally, so lots of readers/
2592 * writers can share a single PW lock. */
2593 if (mode == LCK_PR) {
2594 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2595 policy, LCK_PW, lockh);
2596 if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
2597 /* FIXME: This is not incredibly elegant, but it might
2598 * be more elegant than adding another parameter to
2599 * lock_match. I want a second opinion. */
2600 osc_set_data_with_check(lockh, data);
2601 ldlm_lock_addref(lockh, LCK_PR);
2602 ldlm_lock_decref(lockh, LCK_PW);
2608 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2609 __u32 mode, struct lustre_handle *lockh)
2613 if (mode == LCK_GROUP)
2614 ldlm_lock_decref_and_cancel(lockh, mode);
2616 ldlm_lock_decref(lockh, mode);
2621 static int osc_cancel_unused(struct obd_export *exp,
2622 struct lov_stripe_md *lsm,
2623 int flags, void *opaque)
2625 struct obd_device *obd = class_exp2obd(exp);
2626 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
2629 res_id.name[0] = lsm->lsm_object_id;
2630 res_id.name[2] = lsm->lsm_object_gr;
2634 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
2637 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2638 unsigned long max_age)
2640 struct obd_statfs *msfs;
2641 struct ptlrpc_request *request;
2642 int rc, size = sizeof(*osfs);
2645 /* We could possibly pass max_age in the request (as an absolute
2646 * timestamp or a "seconds.usec ago") so the target can avoid doing
2647 * extra calls into the filesystem if that isn't necessary (e.g.
2648 * during mount that would help a bit). Having relative timestamps
2649 * is not so great if request processing is slow, while absolute
2650 * timestamps are not ideal because they need time synchronization. */
2651 request = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
2652 OST_STATFS, 0, NULL, NULL);
2656 request->rq_replen = lustre_msg_size(1, &size);
2657 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2659 rc = ptlrpc_queue_wait(request);
2663 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2664 lustre_swab_obd_statfs);
2666 CERROR("Can't unpack obd_statfs\n");
2667 GOTO(out, rc = -EPROTO);
2670 memcpy(osfs, msfs, sizeof(*osfs));
2674 ptlrpc_req_finished(request);
2678 /* Retrieve object striping information.
2680 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2681 * the maximum number of OST indices which will fit in the user buffer.
2682 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2684 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2686 struct lov_user_md lum, *lumk;
2693 rc = copy_from_user(&lum, lump, sizeof(lum));
2697 if (lum.lmm_magic != LOV_USER_MAGIC)
2700 if (lum.lmm_stripe_count > 0) {
2701 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2702 OBD_ALLOC(lumk, lum_size);
2706 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2707 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
2709 lum_size = sizeof(lum);
2713 lumk->lmm_object_id = lsm->lsm_object_id;
2714 lumk->lmm_object_gr = lsm->lsm_object_gr;
2715 lumk->lmm_stripe_count = 1;
2717 if (copy_to_user(lump, lumk, lum_size))
2721 OBD_FREE(lumk, lum_size);
2726 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2727 void *karg, void *uarg)
2729 struct obd_device *obd = exp->exp_obd;
2730 struct obd_ioctl_data *data = karg;
2734 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2737 if (!try_module_get(THIS_MODULE)) {
2738 CERROR("Can't get module. Is it alive?");
2743 case OBD_IOC_LOV_GET_CONFIG: {
2745 struct lov_desc *desc;
2746 struct obd_uuid uuid;
2750 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2751 GOTO(out, err = -EINVAL);
2753 data = (struct obd_ioctl_data *)buf;
2755 if (sizeof(*desc) > data->ioc_inllen1) {
2757 GOTO(out, err = -EINVAL);
2760 if (data->ioc_inllen2 < sizeof(uuid)) {
2762 GOTO(out, err = -EINVAL);
2765 if (data->ioc_inllen3 < sizeof(__u32)) {
2767 GOTO(out, err = -EINVAL);
2770 desc = (struct lov_desc *)data->ioc_inlbuf1;
2771 desc->ld_tgt_count = 1;
2772 desc->ld_active_tgt_count = 1;
2773 desc->ld_default_stripe_count = 1;
2774 desc->ld_default_stripe_size = 0;
2775 desc->ld_default_stripe_offset = 0;
2776 desc->ld_pattern = 0;
2777 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2778 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2779 *((__u32 *)data->ioc_inlbuf3) = 1;
2781 err = copy_to_user((void *)uarg, buf, len);
2784 obd_ioctl_freedata(buf, len);
2787 case LL_IOC_LOV_SETSTRIPE:
2788 err = obd_alloc_memmd(exp, karg);
2792 case LL_IOC_LOV_GETSTRIPE:
2793 err = osc_getstripe(karg, uarg);
2795 case OBD_IOC_CLIENT_RECOVER:
2796 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2801 case IOC_OSC_SET_ACTIVE:
2802 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2805 case IOC_OSC_CTL_RECOVERY:
2806 err = ptlrpc_import_control_recovery(obd->u.cli.cl_import,
2810 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2811 GOTO(out, err = -ENOTTY);
2814 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2817 module_put(THIS_MODULE);
2822 static int osc_get_info(struct obd_export *exp, __u32 keylen,
2823 void *key, __u32 *vallen, void *val)
2826 if (!vallen || !val)
2829 if (keylen > strlen("lock_to_stripe") &&
2830 strcmp(key, "lock_to_stripe") == 0) {
2831 __u32 *stripe = val;
2832 *vallen = sizeof(*stripe);
2835 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2836 struct ptlrpc_request *req;
2838 char *bufs[1] = {key};
2840 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2841 OST_GET_INFO, 1, (int *)&keylen, bufs);
2845 req->rq_replen = lustre_msg_size(1, (int *)vallen);
2846 rc = ptlrpc_queue_wait(req);
2850 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2851 lustre_swab_ost_last_id);
2852 if (reply == NULL) {
2853 CERROR("Can't unpack OST last ID\n");
2854 GOTO(out, rc = -EPROTO);
2856 *((obd_id *)val) = *reply;
2858 ptlrpc_req_finished(req);
2864 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2865 void *key, obd_count vallen, void *val)
2867 struct obd_device *obd = exp->exp_obd;
2868 struct obd_import *imp = class_exp2cliimp(exp);
2869 struct llog_ctxt *ctxt;
2873 if (keylen == strlen("unlinked") &&
2874 memcmp(key, "unlinked", keylen) == 0) {
2875 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2876 spin_lock(&oscc->oscc_lock);
2877 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2878 spin_unlock(&oscc->oscc_lock);
2881 if (keylen == strlen("unrecovery") &&
2882 memcmp(key, "unrecovery", keylen) == 0) {
2883 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2884 spin_lock(&oscc->oscc_lock);
2885 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
2886 spin_unlock(&oscc->oscc_lock);
2889 if (keylen == strlen("initial_recov") &&
2890 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
2891 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2892 if (vallen != sizeof(int))
2894 imp->imp_initial_recov = *(int *)val;
2895 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
2896 exp->exp_obd->obd_name,
2897 imp->imp_initial_recov);
2901 if (keylen == strlen("async") &&
2902 memcmp(key, "async", keylen) == 0) {
2903 struct client_obd *cl = &obd->u.cli;
2904 if (vallen != sizeof(int))
2906 cl->cl_async = *(int *)val;
2907 CDEBUG(D_HA, "%s: set async = %d\n",
2908 obd->obd_name, cl->cl_async);
2912 if (keylen == strlen("sec") &&
2913 memcmp(key, "sec", keylen) == 0) {
2914 struct client_obd *cli = &exp->exp_obd->u.cli;
2916 if (vallen == strlen("null") &&
2917 memcmp(val, "null", vallen) == 0) {
2918 cli->cl_sec_flavor = PTLRPC_SEC_NULL;
2919 cli->cl_sec_subflavor = 0;
2922 if (vallen == strlen("krb5i") &&
2923 memcmp(val, "krb5i", vallen) == 0) {
2924 cli->cl_sec_flavor = PTLRPC_SEC_GSS;
2925 cli->cl_sec_subflavor = PTLRPC_SEC_GSS_KRB5I;
2928 if (vallen == strlen("krb5p") &&
2929 memcmp(val, "krb5p", vallen) == 0) {
2930 cli->cl_sec_flavor = PTLRPC_SEC_GSS;
2931 cli->cl_sec_subflavor = PTLRPC_SEC_GSS_KRB5P;
2934 CERROR("unrecognized security type %s\n", (char*) val);
2938 if (keylen == strlen("flush_cred") &&
2939 memcmp(key, "flush_cred", keylen) == 0) {
2940 struct client_obd *cli = &exp->exp_obd->u.cli;
2943 ptlrpcs_import_flush_creds(cli->cl_import,
2948 if (keylen < strlen("mds_conn") ||
2949 memcmp(key, "mds_conn", keylen) != 0)
2952 ctxt = llog_get_context(&exp->exp_obd->obd_llogs,
2953 LLOG_UNLINK_ORIG_CTXT);
2956 rc = llog_initiator_connect(ctxt);
2958 CERROR("cannot establish the connect for "
2959 "ctxt %p: %d\n", ctxt, rc);
2962 imp->imp_server_timeout = 1;
2963 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
2964 imp->imp_pingable = 1;
2970 static struct llog_operations osc_size_repl_logops = {
2971 lop_cancel: llog_obd_repl_cancel
2974 static struct llog_operations osc_unlink_orig_logops;
2976 static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
2977 struct obd_device *tgt, int count,
2978 struct llog_catid *catid)
2983 osc_unlink_orig_logops = llog_lvfs_ops;
2984 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
2985 osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
2986 osc_unlink_orig_logops.lop_add = llog_catalog_add;
2987 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
2989 rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
2990 &catid->lci_logid, &osc_unlink_orig_logops);
2994 rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
2995 &osc_size_repl_logops);
2999 static int osc_llog_finish(struct obd_device *obd,
3000 struct obd_llogs *llogs, int count)
3005 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
3009 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
3013 static int osc_connect(struct lustre_handle *exph,
3014 struct obd_device *obd, struct obd_uuid *cluuid,
3015 struct obd_connect_data *data,
3016 unsigned long connect_flags)
3020 rc = client_connect_import(exph, obd, cluuid, data, connect_flags);
3024 static int osc_disconnect(struct obd_export *exp, unsigned long flags)
3026 struct obd_device *obd = class_exp2obd(exp);
3027 struct llog_ctxt *ctxt;
3031 ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
3032 if (obd->u.cli.cl_conn_count == 1)
3033 /* flush any remaining cancel messages out to the target */
3034 llog_sync(ctxt, exp);
3036 rc = client_disconnect_export(exp, flags);
3040 static int osc_import_event(struct obd_device *obd,
3041 struct obd_import *imp,
3042 enum obd_import_event event)
3044 struct client_obd *cli;
3047 LASSERT(imp->imp_obd == obd);
3050 case IMP_EVENT_DISCON: {
3051 /* Only do this on the MDS OSC's */
3052 if (imp->imp_server_timeout) {
3053 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3055 spin_lock(&oscc->oscc_lock);
3056 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3057 spin_unlock(&oscc->oscc_lock);
3061 case IMP_EVENT_INACTIVE: {
3062 if (obd->obd_observer)
3063 rc = obd_notify(obd->obd_observer, obd, 0, 0);
3066 case IMP_EVENT_INVALIDATE: {
3067 struct ldlm_namespace *ns = obd->obd_namespace;
3071 spin_lock(&cli->cl_loi_list_lock);
3072 cli->cl_avail_grant = 0;
3073 cli->cl_lost_grant = 0;
3074 /* all pages go to failing rpcs due to the invalid import */
3075 osc_check_rpcs(cli);
3076 spin_unlock(&cli->cl_loi_list_lock);
3078 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3082 case IMP_EVENT_ACTIVE: {
3083 /* Only do this on the MDS OSC's */
3084 if (imp->imp_server_timeout) {
3085 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3087 spin_lock(&oscc->oscc_lock);
3088 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3089 spin_unlock(&oscc->oscc_lock);
3092 if (obd->obd_observer)
3093 rc = obd_notify(obd->obd_observer, obd, 1, 0);
3097 CERROR("Unknown import event %d\n", event);
3103 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
3105 struct lprocfs_static_vars lvars;
3109 lprocfs_init_vars(osc,&lvars);
3110 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3114 rc = lproc_osc_attach_seqstat(dev);
3116 lprocfs_obd_detach(dev);
3120 ptlrpc_lprocfs_register_obd(dev);
3124 static int osc_detach(struct obd_device *dev)
3126 ptlrpc_lprocfs_unregister_obd(dev);
3127 return lprocfs_obd_detach(dev);
3130 static int osc_setup(struct obd_device *obd, obd_count len, void *buf)
3134 rc = ptlrpcd_addref();
3138 rc = client_obd_setup(obd, len, buf);
3147 static int osc_cleanup(struct obd_device *obd, int flags)
3149 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3152 rc = ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
3153 LDLM_FL_CONFIG_CHANGE, NULL);
3157 spin_lock(&oscc->oscc_lock);
3158 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
3159 oscc->oscc_flags |= OSCC_FLAG_EXITING;
3160 spin_unlock(&oscc->oscc_lock);
3162 rc = client_obd_cleanup(obd, flags);
3167 struct obd_ops osc_obd_ops = {
3168 .o_owner = THIS_MODULE,
3169 .o_attach = osc_attach,
3170 .o_detach = osc_detach,
3171 .o_setup = osc_setup,
3172 .o_cleanup = osc_cleanup,
3173 .o_add_conn = client_import_add_conn,
3174 .o_del_conn = client_import_del_conn,
3175 .o_connect = osc_connect,
3176 .o_disconnect = osc_disconnect,
3177 .o_statfs = osc_statfs,
3178 .o_packmd = osc_packmd,
3179 .o_unpackmd = osc_unpackmd,
3180 .o_create = osc_create,
3181 .o_destroy = osc_destroy,
3182 .o_getattr = osc_getattr,
3183 .o_getattr_async = osc_getattr_async,
3184 .o_setattr = osc_setattr,
3186 .o_brw_async = osc_brw_async,
3187 .o_prep_async_page = osc_prep_async_page,
3188 .o_queue_async_io = osc_queue_async_io,
3189 .o_set_async_flags = osc_set_async_flags,
3190 .o_queue_group_io = osc_queue_group_io,
3191 .o_trigger_group_io = osc_trigger_group_io,
3192 .o_teardown_async_page = osc_teardown_async_page,
3193 .o_punch = osc_punch,
3195 .o_enqueue = osc_enqueue,
3196 .o_match = osc_match,
3197 .o_change_cbdata = osc_change_cbdata,
3198 .o_cancel = osc_cancel,
3199 .o_cancel_unused = osc_cancel_unused,
3200 .o_iocontrol = osc_iocontrol,
3201 .o_get_info = osc_get_info,
3202 .o_set_info = osc_set_info,
3203 .o_import_event = osc_import_event,
3204 .o_llog_init = osc_llog_init,
3205 .o_llog_finish = osc_llog_finish,
3208 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3209 struct obd_ops sanosc_obd_ops = {
3210 .o_owner = THIS_MODULE,
3211 .o_attach = osc_attach,
3212 .o_detach = osc_detach,
3213 .o_cleanup = client_obd_cleanup,
3214 .o_add_conn = client_import_add_conn,
3215 .o_del_conn = client_import_del_conn,
3216 .o_connect = osc_connect,
3217 .o_disconnect = client_disconnect_export,
3218 .o_statfs = osc_statfs,
3219 .o_packmd = osc_packmd,
3220 .o_unpackmd = osc_unpackmd,
3221 .o_create = osc_real_create,
3222 .o_destroy = osc_destroy,
3223 .o_getattr = osc_getattr,
3224 .o_getattr_async = osc_getattr_async,
3225 .o_setattr = osc_setattr,
3226 .o_setup = client_sanobd_setup,
3227 .o_brw = sanosc_brw,
3228 .o_punch = osc_punch,
3230 .o_enqueue = osc_enqueue,
3231 .o_match = osc_match,
3232 .o_change_cbdata = osc_change_cbdata,
3233 .o_cancel = osc_cancel,
3234 .o_cancel_unused = osc_cancel_unused,
3235 .o_iocontrol = osc_iocontrol,
3236 .o_import_event = osc_import_event,
3237 .o_llog_init = osc_llog_init,
3238 .o_llog_finish = osc_llog_finish,
3242 int __init osc_init(void)
3244 struct lprocfs_static_vars lvars;
3245 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3246 struct lprocfs_static_vars sanlvars;
3251 lprocfs_init_vars(osc, &lvars);
3252 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3253 lprocfs_init_vars(osc, &sanlvars);
3256 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
3261 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3262 rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
3263 LUSTRE_SANOSC_NAME);
3265 class_unregister_type(LUSTRE_OSC_NAME);
3272 static void /*__exit*/ osc_exit(void)
3274 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3275 class_unregister_type(LUSTRE_SANOSC_NAME);
3277 class_unregister_type(LUSTRE_OSC_NAME);
3280 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3281 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3282 MODULE_LICENSE("GPL");
3284 module_init(osc_init);
3285 module_exit(osc_exit);