1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/ctype.h>
40 # include <linux/init.h>
41 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/workqueue.h>
43 # include <linux/smp_lock.h>
45 # include <linux/locks.h>
47 #else /* __KERNEL__ */
48 # include <liblustre.h>
51 # include <linux/lustre_dlm.h>
52 #include <linux/kp30.h>
53 #include <linux/lustre_net.h>
54 #include <lustre/lustre_user.h>
55 #include <linux/obd_ost.h>
56 #include <linux/obd_lov.h>
62 #include <linux/lustre_ha.h>
63 #include <linux/lprocfs_status.h>
64 #include <linux/lustre_log.h>
65 #include "osc_internal.h"
67 /* Pack OSC object metadata for disk storage (LE byte order). */
68 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
69 struct lov_stripe_md *lsm)
74 lmm_size = sizeof(**lmmp);
79 OBD_FREE(*lmmp, lmm_size);
85 OBD_ALLOC(*lmmp, lmm_size);
91 LASSERT(lsm->lsm_object_id);
92 LASSERT(lsm->lsm_object_gr);
93 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
94 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
100 /* Unpack OSC object metadata from disk storage (LE byte order). */
101 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
102 struct lov_mds_md *lmm, int lmm_bytes)
108 if (lmm_bytes < sizeof (*lmm)) {
109 CERROR("lov_mds_md too small: %d, need %d\n",
110 lmm_bytes, (int)sizeof(*lmm));
113 /* XXX LOV_MAGIC etc check? */
115 if (lmm->lmm_object_id == 0) {
116 CERROR("lov_mds_md: zero lmm_object_id\n");
121 lsm_size = lov_stripe_md_size(1);
125 if (*lsmp != NULL && lmm == NULL) {
126 OBD_FREE(*lsmp, lsm_size);
132 OBD_ALLOC(*lsmp, lsm_size);
135 loi_init((*lsmp)->lsm_oinfo);
139 /* XXX zero *lsmp? */
140 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
141 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
142 LASSERT((*lsmp)->lsm_object_id);
143 LASSERT((*lsmp)->lsm_object_gr);
146 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
151 static int osc_getattr_interpret(struct ptlrpc_request *req,
152 struct osc_getattr_async_args *aa, int rc)
154 struct ost_body *body;
160 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
162 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
163 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
165 /* This should really be sent by the OST */
166 aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
167 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
169 CERROR("can't unpack ost_body\n");
171 aa->aa_oa->o_valid = 0;
177 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
178 struct lov_stripe_md *md,
179 struct ptlrpc_request_set *set)
181 struct ptlrpc_request *request;
182 struct ost_body *body;
183 int size = sizeof(*body);
184 struct osc_getattr_async_args *aa;
187 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
192 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
193 memcpy(&body->oa, oa, sizeof(*oa));
195 request->rq_replen = lustre_msg_size(1, &size);
196 request->rq_interpret_reply = osc_getattr_interpret;
198 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
199 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
202 ptlrpc_set_add_req (set, request);
206 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
207 struct lov_stripe_md *md)
209 struct ptlrpc_request *request;
210 struct ost_body *body;
211 int rc, size = sizeof(*body);
214 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
219 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
220 memcpy(&body->oa, oa, sizeof(*oa));
222 request->rq_replen = lustre_msg_size(1, &size);
224 rc = ptlrpc_queue_wait(request);
226 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
230 body = lustre_swab_repbuf(request, 0, sizeof (*body),
231 lustre_swab_ost_body);
233 CERROR ("can't unpack ost_body\n");
234 GOTO (out, rc = -EPROTO);
237 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
238 memcpy(oa, &body->oa, sizeof(*oa));
240 /* This should really be sent by the OST */
241 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
242 oa->o_valid |= OBD_MD_FLBLKSZ;
246 ptlrpc_req_finished(request);
250 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
251 struct lov_stripe_md *md, struct obd_trans_info *oti)
253 struct ptlrpc_request *request;
254 struct ost_body *body;
255 int rc, size = sizeof(*body);
258 LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
260 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SETATTR, 1, &size,
265 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
266 memcpy(&body->oa, oa, sizeof(*oa));
268 request->rq_replen = lustre_msg_size(1, &size);
270 rc = ptlrpc_queue_wait(request);
274 body = lustre_swab_repbuf(request, 0, sizeof(*body),
275 lustre_swab_ost_body);
277 GOTO(out, rc = -EPROTO);
279 memcpy(oa, &body->oa, sizeof(*oa));
283 ptlrpc_req_finished(request);
287 int osc_real_create(struct obd_export *exp, struct obdo *oa,
288 struct lov_stripe_md **ea, struct obd_trans_info *oti)
290 struct ptlrpc_request *request;
291 struct ost_body *body;
292 struct lov_stripe_md *lsm;
293 int rc, size = sizeof(*body);
301 rc = obd_alloc_memmd(exp, &lsm);
306 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_CREATE, 1, &size,
309 GOTO(out, rc = -ENOMEM);
311 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
312 memcpy(&body->oa, oa, sizeof(body->oa));
314 request->rq_replen = lustre_msg_size(1, &size);
315 if (oa->o_valid & OBD_MD_FLINLINE) {
316 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
317 oa->o_flags == OBD_FL_DELORPHAN);
318 DEBUG_REQ(D_HA, request,
319 "delorphan from OST integration");
322 rc = ptlrpc_queue_wait(request);
326 body = lustre_swab_repbuf(request, 0, sizeof(*body),
327 lustre_swab_ost_body);
329 CERROR ("can't unpack ost_body\n");
330 GOTO (out_req, rc = -EPROTO);
333 memcpy(oa, &body->oa, sizeof(*oa));
335 /* This should really be sent by the OST */
336 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
337 oa->o_valid |= OBD_MD_FLBLKSZ;
339 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
340 * have valid lsm_oinfo data structs, so don't go touching that.
341 * This needs to be fixed in a big way.
343 lsm->lsm_object_id = oa->o_id;
344 lsm->lsm_object_gr = oa->o_gr;
348 oti->oti_transno = request->rq_repmsg->transno;
350 if (oa->o_valid & OBD_MD_FLCOOKIE) {
351 if (!oti->oti_logcookies)
352 oti_alloc_cookies(oti, 1);
353 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
354 sizeof(oti->oti_onecookie));
358 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
361 ptlrpc_req_finished(request);
364 obd_free_memmd(exp, &lsm);
368 static int osc_punch(struct obd_export *exp, struct obdo *oa,
369 struct lov_stripe_md *md, obd_size start,
370 obd_size end, struct obd_trans_info *oti)
372 struct ptlrpc_request *request;
373 struct ost_body *body;
374 int rc, size = sizeof(*body);
382 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_PUNCH, 1, &size,
387 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
388 memcpy(&body->oa, oa, sizeof(*oa));
390 /* overload the size and blocks fields in the oa with start/end */
391 body->oa.o_size = start;
392 body->oa.o_blocks = end;
393 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
395 request->rq_replen = lustre_msg_size(1, &size);
397 rc = ptlrpc_queue_wait(request);
401 body = lustre_swab_repbuf (request, 0, sizeof (*body),
402 lustre_swab_ost_body);
404 CERROR ("can't unpack ost_body\n");
405 GOTO (out, rc = -EPROTO);
408 memcpy(oa, &body->oa, sizeof(*oa));
412 ptlrpc_req_finished(request);
416 static int osc_sync(struct obd_export *exp, struct obdo *oa,
417 struct lov_stripe_md *md, obd_size start, obd_size end)
419 struct ptlrpc_request *request;
420 struct ost_body *body;
421 int rc, size = sizeof(*body);
429 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SYNC, 1, &size,
434 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
435 memcpy(&body->oa, oa, sizeof(*oa));
437 /* overload the size and blocks fields in the oa with start/end */
438 body->oa.o_size = start;
439 body->oa.o_blocks = end;
440 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
442 request->rq_replen = lustre_msg_size(1, &size);
444 rc = ptlrpc_queue_wait(request);
448 body = lustre_swab_repbuf(request, 0, sizeof(*body),
449 lustre_swab_ost_body);
451 CERROR ("can't unpack ost_body\n");
452 GOTO (out, rc = -EPROTO);
455 memcpy(oa, &body->oa, sizeof(*oa));
459 ptlrpc_req_finished(request);
463 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
464 struct lov_stripe_md *ea, struct obd_trans_info *oti)
466 struct ptlrpc_request *request;
467 struct ost_body *body;
468 int rc, size = sizeof(*body);
476 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_DESTROY, 1,
481 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
483 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
484 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
485 sizeof(*oti->oti_logcookies));
486 oti->oti_logcookies++;
489 memcpy(&body->oa, oa, sizeof(*oa));
490 request->rq_replen = lustre_msg_size(1, &size);
492 rc = ptlrpc_queue_wait(request);
496 body = lustre_swab_repbuf(request, 0, sizeof(*body),
497 lustre_swab_ost_body);
499 CERROR ("Can't unpack body\n");
500 GOTO (out, rc = -EPROTO);
503 memcpy(oa, &body->oa, sizeof(*oa));
507 ptlrpc_req_finished(request);
511 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
514 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
516 LASSERT(!(oa->o_valid & bits));
519 spin_lock(&cli->cl_loi_list_lock);
520 oa->o_dirty = cli->cl_dirty;
521 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
522 oa->o_grant = cli->cl_avail_grant;
523 oa->o_dropped = cli->cl_lost_grant;
524 cli->cl_lost_grant = 0;
525 spin_unlock(&cli->cl_loi_list_lock);
526 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
527 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
530 /* caller must hold loi_list_lock */
531 static void osc_consume_write_grant(struct client_obd *cli,
532 struct osc_async_page *oap)
534 cli->cl_dirty += PAGE_SIZE;
535 cli->cl_avail_grant -= PAGE_SIZE;
536 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
537 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
538 LASSERT(cli->cl_avail_grant >= 0);
541 /* caller must hold loi_list_lock */
542 void osc_wake_cache_waiters(struct client_obd *cli)
544 struct list_head *l, *tmp;
545 struct osc_cache_waiter *ocw;
547 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
548 /* if we can't dirty more, we must wait until some is written */
549 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
550 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
551 cli->cl_dirty, cli->cl_dirty_max);
555 /* if still dirty cache but no grant wait for pending RPCs that
556 * may yet return us some grant before doing sync writes */
557 if (cli->cl_brw_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
558 CDEBUG(D_CACHE, "%d BRWs in flight, no grant\n",
559 cli->cl_brw_in_flight);
563 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
564 list_del_init(&ocw->ocw_entry);
565 if (cli->cl_avail_grant < PAGE_SIZE) {
566 /* no more RPCs in flight to return grant, do sync IO */
567 ocw->ocw_rc = -EDQUOT;
568 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
570 osc_consume_write_grant(cli, ocw->ocw_oap);
573 wake_up(&ocw->ocw_waitq);
579 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
581 spin_lock(&cli->cl_loi_list_lock);
582 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
583 cli->cl_avail_grant += body->oa.o_grant;
584 /* waiters are woken in brw_interpret_oap */
585 spin_unlock(&cli->cl_loi_list_lock);
588 /* We assume that the reason this OSC got a short read is because it read
589 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
590 * via the LOV, and it _knows_ it's reading inside the file, it's just that
591 * this stripe never got written at or beyond this stripe offset yet. */
592 static void handle_short_read(int nob_read, obd_count page_count,
593 struct brw_page *pga)
597 /* skip bytes read OK */
598 while (nob_read > 0) {
599 LASSERT (page_count > 0);
601 if (pga->count > nob_read) {
602 /* EOF inside this page */
603 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
604 memset(ptr + nob_read, 0, pga->count - nob_read);
611 nob_read -= pga->count;
616 /* zero remaining pages */
617 while (page_count-- > 0) {
618 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
619 memset(ptr, 0, pga->count);
625 static int check_write_rcs(struct ptlrpc_request *request,
626 int requested_nob, int niocount,
627 obd_count page_count, struct brw_page *pga)
631 /* return error if any niobuf was in error */
632 remote_rcs = lustre_swab_repbuf(request, 1,
633 sizeof(*remote_rcs) * niocount, NULL);
634 if (remote_rcs == NULL) {
635 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
638 if (lustre_msg_swabbed(request->rq_repmsg))
639 for (i = 0; i < niocount; i++)
640 __swab32s(&remote_rcs[i]);
642 for (i = 0; i < niocount; i++) {
643 if (remote_rcs[i] < 0)
644 return(remote_rcs[i]);
646 if (remote_rcs[i] != 0) {
647 CERROR("rc[%d] invalid (%d) req %p\n",
648 i, remote_rcs[i], request);
653 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
654 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
655 requested_nob, request->rq_bulk->bd_nob_transferred);
662 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
664 if (p1->flag != p2->flag) {
665 unsigned mask = ~OBD_BRW_FROM_GRANT;
667 /* warn if we try to combine flags that we don't know to be
669 if ((p1->flag & mask) != (p2->flag & mask))
670 CERROR("is it ok to have flags 0x%x and 0x%x in the "
671 "same brw?\n", p1->flag, p2->flag);
675 return (p1->off + p1->count == p2->off);
679 static obd_count cksum_pages(int nob, obd_count page_count,
680 struct brw_page *pga)
686 LASSERT (page_count > 0);
689 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
690 pga->count > nob ? nob : pga->count);
702 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
703 struct lov_stripe_md *lsm, obd_count page_count,
704 struct brw_page *pga, int *requested_nobp,
705 int *niocountp, struct ptlrpc_request **reqp)
707 struct ptlrpc_request *req;
708 struct ptlrpc_bulk_desc *desc;
709 struct client_obd *cli = &imp->imp_obd->u.cli;
710 struct ost_body *body;
711 struct obd_ioobj *ioobj;
712 struct niobuf_remote *niobuf;
721 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
723 for (niocount = i = 1; i < page_count; i++)
724 if (!can_merge_pages(&pga[i - 1], &pga[i]))
727 size[0] = sizeof(*body);
728 size[1] = sizeof(*ioobj);
729 size[2] = niocount * sizeof(*niobuf);
731 req = ptlrpc_prep_req(imp, opc, 3, size, NULL);
735 if (opc == OST_WRITE)
736 desc = ptlrpc_prep_bulk_imp (req, page_count,
737 BULK_GET_SOURCE, OST_BULK_PORTAL);
739 desc = ptlrpc_prep_bulk_imp (req, page_count,
740 BULK_PUT_SINK, OST_BULK_PORTAL);
742 GOTO(out, rc = -ENOMEM);
743 /* NB request now owns desc and will free it when it gets freed */
745 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
746 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
747 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
749 memcpy(&body->oa, oa, sizeof(*oa));
751 obdo_to_ioobj(oa, ioobj);
752 ioobj->ioo_bufcnt = niocount;
754 LASSERT (page_count > 0);
755 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
756 struct brw_page *pg = &pga[i];
757 struct brw_page *pg_prev = pg - 1;
759 LASSERT(pg->count > 0);
760 LASSERT((pg->off & ~PAGE_MASK)+ pg->count <= PAGE_SIZE);
761 LASSERTF(i == 0 || pg->off > pg_prev->off,
762 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
763 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
765 pg->pg, pg->pg->private, pg->pg->index, pg->off,
766 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
769 ptlrpc_prep_bulk_page(desc, pg->pg,
770 pg->off & ~PAGE_MASK, pg->count);
771 requested_nob += pg->count;
773 if (i > 0 && can_merge_pages(pg_prev, pg)) {
775 niobuf->len += pg->count;
777 niobuf->offset = pg->off;
778 niobuf->len = pg->count;
779 niobuf->flags = pg->flag;
783 LASSERT((void *)(niobuf - niocount) ==
784 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
785 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
786 spin_lock_irqsave(&req->rq_lock, flags);
787 req->rq_no_resend = 1;
788 spin_unlock_irqrestore(&req->rq_lock, flags);
790 /* size[0] still sizeof (*body) */
791 if (opc == OST_WRITE) {
793 body->oa.o_valid |= OBD_MD_FLCKSUM;
794 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
796 /* 1 RC per niobuf */
797 size[1] = sizeof(__u32) * niocount;
798 req->rq_replen = lustre_msg_size(2, size);
800 /* 1 RC for the whole I/O */
801 req->rq_replen = lustre_msg_size(1, size);
804 *niocountp = niocount;
805 *requested_nobp = requested_nob;
810 ptlrpc_req_finished (req);
814 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
815 int requested_nob, int niocount,
816 obd_count page_count, struct brw_page *pga,
819 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
820 struct ost_body *body;
826 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
828 CERROR ("Can't unpack body\n");
832 osc_update_grant(cli, body);
833 memcpy(oa, &body->oa, sizeof(*oa));
835 if (req->rq_reqmsg->opc == OST_WRITE) {
837 CERROR ("Unexpected +ve rc %d\n", rc);
840 LASSERT (req->rq_bulk->bd_nob == requested_nob);
842 RETURN(check_write_rcs(req, requested_nob, niocount,
846 if (rc > requested_nob) {
847 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
851 if (rc != req->rq_bulk->bd_nob_transferred) {
852 CERROR ("Unexpected rc %d (%d transferred)\n",
853 rc, req->rq_bulk->bd_nob_transferred);
857 if (rc < requested_nob)
858 handle_short_read(rc, page_count, pga);
861 if (oa->o_valid & OBD_MD_FLCKSUM) {
862 const struct ptlrpc_peer *peer =
863 &req->rq_import->imp_connection->c_peer;
864 static int cksum_counter;
865 obd_count server_cksum = oa->o_cksum;
866 obd_count cksum = cksum_pages(rc, page_count, pga);
867 char str[PTL_NALFMT_SIZE];
869 ptlrpc_peernid2str(peer, str);
872 if (server_cksum != cksum) {
873 CERROR("Bad checksum: server %x, client %x, server NID "
874 LPX64" (%s)\n", server_cksum, cksum,
875 peer->peer_nid, str);
878 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
879 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
880 cksum_counter, peer->peer_nid, str, cksum);
883 static int cksum_missed;
886 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
887 CERROR("Request checksum %u from "LPX64", no reply\n",
889 req->rq_import->imp_connection->c_peer.peer_nid);
895 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
896 struct lov_stripe_md *lsm,
897 obd_count page_count, struct brw_page *pga)
901 struct ptlrpc_request *request;
906 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
907 page_count, pga, &requested_nob, &niocount,
909 /* NB ^ sets rq_no_resend */
914 rc = ptlrpc_queue_wait(request);
916 if (rc == -ETIMEDOUT && request->rq_resend) {
917 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
918 ptlrpc_req_finished(request);
922 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
923 page_count, pga, rc);
925 ptlrpc_req_finished(request);
929 static int brw_interpret(struct ptlrpc_request *request,
930 struct osc_brw_async_args *aa, int rc)
932 struct obdo *oa = aa->aa_oa;
933 int requested_nob = aa->aa_requested_nob;
934 int niocount = aa->aa_nio_count;
935 obd_count page_count = aa->aa_page_count;
936 struct brw_page *pga = aa->aa_pga;
939 /* XXX bug 937 here */
940 if (rc == -ETIMEDOUT && request->rq_resend) {
941 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
942 LBUG(); /* re-send. later. */
946 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
947 page_count, pga, rc);
951 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
952 struct lov_stripe_md *lsm, obd_count page_count,
953 struct brw_page *pga, struct ptlrpc_request_set *set)
955 struct ptlrpc_request *request;
958 struct osc_brw_async_args *aa;
962 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
963 page_count, pga, &requested_nob, &nio_count,
965 /* NB ^ sets rq_no_resend */
968 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
969 aa = (struct osc_brw_async_args *)&request->rq_async_args;
971 aa->aa_requested_nob = requested_nob;
972 aa->aa_nio_count = nio_count;
973 aa->aa_page_count = page_count;
976 request->rq_interpret_reply = brw_interpret;
977 ptlrpc_set_add_req(set, request);
983 #define min_t(type,x,y) \
984 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
988 * ugh, we want disk allocation on the target to happen in offset order. we'll
989 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
990 * fine for our small page arrays and doesn't require allocation. its an
991 * insertion sort that swaps elements that are strides apart, shrinking the
992 * stride down until its '1' and the array is sorted.
994 static void sort_brw_pages(struct brw_page *array, int num)
1001 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1006 for (i = stride ; i < num ; i++) {
1009 while (j >= stride && array[j - stride].off >
1011 array[j] = array[j - stride];
1016 } while (stride > 1);
1019 /* make sure we the regions we're passing to elan don't violate its '4
1020 * fragments' constraint. portal headers are a fragment, all full
1021 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1022 * counts as a fragment. I think. see bug 934. */
1023 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1026 int saw_whole_frag = 0;
1029 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1030 if (pg->count == PAGE_SIZE) {
1031 if (!saw_whole_frag) {
1042 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1043 struct lov_stripe_md *md, obd_count page_count,
1044 struct brw_page *pga, struct obd_trans_info *oti)
1048 if (cmd == OBD_BRW_CHECK) {
1049 /* The caller just wants to know if there's a chance that this
1050 * I/O can succeed */
1051 struct obd_import *imp = class_exp2cliimp(exp);
1053 if (imp == NULL || imp->imp_invalid)
1058 while (page_count) {
1059 obd_count pages_per_brw;
1062 if (page_count > PTLRPC_MAX_BRW_PAGES)
1063 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1065 pages_per_brw = page_count;
1067 sort_brw_pages(pga, pages_per_brw);
1068 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1070 rc = osc_brw_internal(cmd, exp, oa, md, pages_per_brw, pga);
1075 page_count -= pages_per_brw;
1076 pga += pages_per_brw;
1081 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1082 struct lov_stripe_md *md, obd_count page_count,
1083 struct brw_page *pga, struct ptlrpc_request_set *set,
1084 struct obd_trans_info *oti)
1088 if (cmd == OBD_BRW_CHECK) {
1089 /* The caller just wants to know if there's a chance that this
1090 * I/O can succeed */
1091 struct obd_import *imp = class_exp2cliimp(exp);
1093 if (imp == NULL || imp->imp_invalid)
1098 while (page_count) {
1099 obd_count pages_per_brw;
1102 if (page_count > PTLRPC_MAX_BRW_PAGES)
1103 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1105 pages_per_brw = page_count;
1107 sort_brw_pages(pga, pages_per_brw);
1108 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1110 rc = async_internal(cmd, exp, oa, md, pages_per_brw, pga, set);
1115 page_count -= pages_per_brw;
1116 pga += pages_per_brw;
1121 static void osc_check_rpcs(struct client_obd *cli);
1122 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1124 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1125 static void lop_update_pending(struct client_obd *cli,
1126 struct loi_oap_pages *lop, int cmd, int delta);
1128 /* this is called when a sync waiter receives an interruption. Its job is to
1129 * get the caller woken as soon as possible. If its page hasn't been put in an
1130 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1131 * desiring interruption which will forcefully complete the rpc once the rpc
1133 static void osc_occ_interrupted(struct oig_callback_context *occ)
1135 struct osc_async_page *oap;
1136 struct loi_oap_pages *lop;
1137 struct lov_oinfo *loi;
1140 /* XXX member_of() */
1141 oap = list_entry(occ, struct osc_async_page, oap_occ);
1143 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1145 oap->oap_interrupted = 1;
1147 /* ok, it's been put in an rpc. */
1148 if (oap->oap_request != NULL) {
1149 ptlrpc_mark_interrupted(oap->oap_request);
1150 ptlrpcd_wake(oap->oap_request);
1154 /* we don't get interruption callbacks until osc_trigger_sync_io()
1155 * has been called and put the sync oaps in the pending/urgent lists.*/
1156 if (!list_empty(&oap->oap_pending_item)) {
1157 list_del_init(&oap->oap_pending_item);
1158 if (oap->oap_async_flags & ASYNC_URGENT)
1159 list_del_init(&oap->oap_urgent_item);
1162 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1163 &loi->loi_write_lop : &loi->loi_read_lop;
1164 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1165 loi_list_maint(oap->oap_cli, oap->oap_loi);
1167 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1168 oap->oap_oig = NULL;
1172 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1175 /* this must be called holding the loi list lock to give coverage to exit_cache,
1176 * async_flag maintenance, and oap_request */
1177 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1178 struct osc_async_page *oap, int sent, int rc)
1180 osc_exit_cache(cli, oap, sent);
1181 oap->oap_async_flags = 0;
1182 oap->oap_interrupted = 0;
1184 if (oap->oap_request != NULL) {
1185 ptlrpc_req_finished(oap->oap_request);
1186 oap->oap_request = NULL;
1189 if (rc == 0 && oa != NULL)
1190 oap->oap_loi->loi_blocks = oa->o_blocks;
1193 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1194 oap->oap_oig = NULL;
1199 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1203 static int brw_interpret_oap(struct ptlrpc_request *request,
1204 struct osc_brw_async_args *aa, int rc)
1206 struct osc_async_page *oap;
1207 struct client_obd *cli;
1208 struct list_head *pos, *n;
1212 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1213 aa->aa_nio_count, aa->aa_page_count,
1216 CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
1219 /* in failout recovery we ignore writeback failure and want
1220 * to just tell llite to unlock the page and continue */
1221 if (request->rq_reqmsg->opc == OST_WRITE &&
1222 (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
1223 CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
1225 cli->cl_import ? cli->cl_import->imp_invalid : -1);
1229 spin_lock(&cli->cl_loi_list_lock);
1231 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1232 * is called so we know whether to go to sync BRWs or wait for more
1233 * RPCs to complete */
1234 cli->cl_brw_in_flight--;
1236 /* the caller may re-use the oap after the completion call so
1237 * we need to clean it up a little */
1238 list_for_each_safe(pos, n, &aa->aa_oaps) {
1239 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1241 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1242 //oap->oap_page, oap->oap_page->index, oap);
1244 list_del_init(&oap->oap_rpc_item);
1245 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
1248 osc_wake_cache_waiters(cli);
1249 osc_check_rpcs(cli);
1251 spin_unlock(&cli->cl_loi_list_lock);
1253 obdo_free(aa->aa_oa);
1254 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1259 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1260 struct list_head *rpc_list,
1261 int page_count, int cmd)
1263 struct ptlrpc_request *req;
1264 struct brw_page *pga = NULL;
1265 int requested_nob, nio_count;
1266 struct osc_brw_async_args *aa;
1267 struct obdo *oa = NULL;
1268 struct obd_async_page_ops *ops = NULL;
1269 void *caller_data = NULL;
1270 struct list_head *pos;
1273 LASSERT(!list_empty(rpc_list));
1275 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1277 RETURN(ERR_PTR(-ENOMEM));
1281 GOTO(out, req = ERR_PTR(-ENOMEM));
1284 list_for_each(pos, rpc_list) {
1285 struct osc_async_page *oap;
1287 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1289 ops = oap->oap_caller_ops;
1290 caller_data = oap->oap_caller_data;
1292 pga[i].off = oap->oap_obj_off + oap->oap_page_off;
1293 pga[i].pg = oap->oap_page;
1294 pga[i].count = oap->oap_count;
1295 pga[i].flag = oap->oap_brw_flags;
1296 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1297 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1301 /* always get the data for the obdo for the rpc */
1302 LASSERT(ops != NULL);
1303 ops->ap_fill_obdo(caller_data, cmd, oa);
1305 sort_brw_pages(pga, page_count);
1306 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1307 pga, &requested_nob, &nio_count, &req);
1309 CERROR("prep_req failed: %d\n", rc);
1310 GOTO(out, req = ERR_PTR(rc));
1313 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1314 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1316 aa->aa_requested_nob = requested_nob;
1317 aa->aa_nio_count = nio_count;
1318 aa->aa_page_count = page_count;
1327 OBD_FREE(pga, sizeof(*pga) * page_count);
1332 static void lop_update_pending(struct client_obd *cli,
1333 struct loi_oap_pages *lop, int cmd, int delta)
1335 lop->lop_num_pending += delta;
1336 if (cmd == OBD_BRW_WRITE)
1337 cli->cl_pending_w_pages += delta;
1339 cli->cl_pending_r_pages += delta;
1342 /* the loi lock is held across this function but it's allowed to release
1343 * and reacquire it during its work */
1344 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1345 int cmd, struct loi_oap_pages *lop)
1347 struct ptlrpc_request *request;
1348 obd_count page_count = 0;
1349 struct list_head *tmp, *pos;
1350 struct osc_async_page *oap = NULL;
1351 struct osc_brw_async_args *aa;
1352 struct obd_async_page_ops *ops;
1353 LIST_HEAD(rpc_list);
1356 /* first we find the pages we're allowed to work with */
1357 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1358 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1359 ops = oap->oap_caller_ops;
1361 LASSERT(oap->oap_magic == OAP_MAGIC);
1363 /* in llite being 'ready' equates to the page being locked
1364 * until completion unlocks it. commit_write submits a page
1365 * as not ready because its unlock will happen unconditionally
1366 * as the call returns. if we race with commit_write giving
1367 * us that page we dont' want to create a hole in the page
1368 * stream, so we stop and leave the rpc to be fired by
1369 * another dirtier or kupdated interval (the not ready page
1370 * will still be on the dirty list). we could call in
1371 * at the end of ll_file_write to process the queue again. */
1372 if (!(oap->oap_async_flags & ASYNC_READY)) {
1373 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1375 CDEBUG(D_INODE, "oap %p page %p returned %d "
1376 "instead of ready\n", oap,
1380 /* llite is telling us that the page is still
1381 * in commit_write and that we should try
1382 * and put it in an rpc again later. we
1383 * break out of the loop so we don't create
1384 * a hole in the sequence of pages in the rpc
1389 /* the io isn't needed.. tell the checks
1390 * below to complete the rpc with EINTR */
1391 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1392 oap->oap_count = -EINTR;
1395 oap->oap_async_flags |= ASYNC_READY;
1398 LASSERTF(0, "oap %p page %p returned %d "
1399 "from make_ready\n", oap,
1407 /* take the page out of our book-keeping */
1408 list_del_init(&oap->oap_pending_item);
1409 lop_update_pending(cli, lop, cmd, -1);
1410 list_del_init(&oap->oap_urgent_item);
1412 /* ask the caller for the size of the io as the rpc leaves. */
1413 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1415 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1416 if (oap->oap_count <= 0) {
1417 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1419 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
1423 /* now put the page back in our accounting */
1424 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1425 if (++page_count >= cli->cl_max_pages_per_rpc)
1429 osc_wake_cache_waiters(cli);
1431 if (page_count == 0)
1434 loi_list_maint(cli, loi);
1435 spin_unlock(&cli->cl_loi_list_lock);
1437 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1438 if (IS_ERR(request)) {
1439 /* this should happen rarely and is pretty bad, it makes the
1440 * pending list not follow the dirty order */
1441 spin_lock(&cli->cl_loi_list_lock);
1442 list_for_each_safe(pos, tmp, &rpc_list) {
1443 oap = list_entry(pos, struct osc_async_page,
1445 list_del_init(&oap->oap_rpc_item);
1447 /* queued sync pages can be torn down while the pages
1448 * were between the pending list and the rpc */
1449 if (oap->oap_interrupted) {
1450 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1451 osc_ap_completion(cli, NULL, oap, 0,
1456 /* put the page back in the loi/lop lists */
1457 list_add_tail(&oap->oap_pending_item,
1459 lop_update_pending(cli, lop, cmd, 1);
1460 if (oap->oap_async_flags & ASYNC_URGENT)
1461 list_add(&oap->oap_urgent_item,
1464 loi_list_maint(cli, loi);
1465 RETURN(PTR_ERR(request));
1468 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1469 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1470 INIT_LIST_HEAD(&aa->aa_oaps);
1471 list_splice(&rpc_list, &aa->aa_oaps);
1472 INIT_LIST_HEAD(&rpc_list);
1475 if (cmd == OBD_BRW_READ) {
1476 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1477 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_brw_in_flight);
1479 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1480 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1481 cli->cl_brw_in_flight);
1485 spin_lock(&cli->cl_loi_list_lock);
1487 cli->cl_brw_in_flight++;
1488 /* queued sync pages can be torn down while the pages
1489 * were between the pending list and the rpc */
1490 list_for_each(pos, &aa->aa_oaps) {
1491 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1492 if (oap->oap_interrupted) {
1493 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1495 ptlrpc_mark_interrupted(request);
1500 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %d in flight\n", request,
1501 page_count, aa, cli->cl_brw_in_flight);
1503 oap->oap_request = ptlrpc_request_addref(request);
1504 request->rq_interpret_reply = brw_interpret_oap;
1505 ptlrpcd_add_req(request);
1509 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1515 if (lop->lop_num_pending == 0)
1518 /* if we have an invalid import we want to drain the queued pages
1519 * by forcing them through rpcs that immediately fail and complete
1520 * the pages. recovery relies on this to empty the queued pages
1521 * before canceling the locks and evicting down the llite pages */
1522 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1525 /* stream rpcs in queue order as long as as there is an urgent page
1526 * queued. this is our cheap solution for good batching in the case
1527 * where writepage marks some random page in the middle of the file as
1528 * urgent because of, say, memory pressure */
1529 if (!list_empty(&lop->lop_urgent))
1532 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1533 optimal = cli->cl_max_pages_per_rpc;
1534 if (cmd == OBD_BRW_WRITE) {
1535 /* trigger a write rpc stream as long as there are dirtiers
1536 * waiting for space. as they're waiting, they're not going to
1537 * create more pages to coallesce with what's waiting.. */
1538 if (!list_empty(&cli->cl_cache_waiters))
1541 /* *2 to avoid triggering rpcs that would want to include pages
1542 * that are being queued but which can't be made ready until
1543 * the queuer finishes with the page. this is a wart for
1544 * llite::commit_write() */
1547 if (lop->lop_num_pending >= optimal)
1553 static void on_list(struct list_head *item, struct list_head *list,
1556 if (list_empty(item) && should_be_on)
1557 list_add_tail(item, list);
1558 else if (!list_empty(item) && !should_be_on)
1559 list_del_init(item);
1562 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1563 * can find pages to build into rpcs quickly */
1564 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1566 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1567 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1568 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1570 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1571 loi->loi_write_lop.lop_num_pending);
1573 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1574 loi->loi_read_lop.lop_num_pending);
1577 #define LOI_DEBUG(LOI, STR, args...) \
1578 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1579 !list_empty(&(LOI)->loi_cli_item), \
1580 (LOI)->loi_write_lop.lop_num_pending, \
1581 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1582 (LOI)->loi_read_lop.lop_num_pending, \
1583 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1586 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1589 /* first return all objects which we already know to have
1590 * pages ready to be stuffed into rpcs */
1591 if (!list_empty(&cli->cl_loi_ready_list))
1592 RETURN(list_entry(cli->cl_loi_ready_list.next,
1593 struct lov_oinfo, loi_cli_item));
1595 /* then if we have cache waiters, return all objects with queued
1596 * writes. This is especially important when many small files
1597 * have filled up the cache and not been fired into rpcs because
1598 * they don't pass the nr_pending/object threshhold */
1599 if (!list_empty(&cli->cl_cache_waiters) &&
1600 !list_empty(&cli->cl_loi_write_list))
1601 RETURN(list_entry(cli->cl_loi_write_list.next,
1602 struct lov_oinfo, loi_write_item));
1604 /* then return all queued objects when we have an invalid import
1605 * so that they get flushed */
1606 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
1607 if (!list_empty(&cli->cl_loi_write_list))
1608 RETURN(list_entry(cli->cl_loi_write_list.next,
1609 struct lov_oinfo, loi_write_item));
1610 if (!list_empty(&cli->cl_loi_read_list))
1611 RETURN(list_entry(cli->cl_loi_read_list.next,
1612 struct lov_oinfo, loi_read_item));
1617 /* called with the loi list lock held */
1618 static void osc_check_rpcs(struct client_obd *cli)
1620 struct lov_oinfo *loi;
1621 int rc = 0, race_counter = 0;
1624 while ((loi = osc_next_loi(cli)) != NULL) {
1625 LOI_DEBUG(loi, "%d in flight\n", cli->cl_brw_in_flight);
1627 if (cli->cl_brw_in_flight >= cli->cl_max_rpcs_in_flight)
1630 /* attempt some read/write balancing by alternating between
1631 * reads and writes in an object. The makes_rpc checks here
1632 * would be redundant if we were getting read/write work items
1633 * instead of objects. we don't want send_oap_rpc to drain a
1634 * partial read pending queue when we're given this object to
1635 * do io on writes while there are cache waiters */
1636 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1637 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1638 &loi->loi_write_lop);
1646 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1647 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1648 &loi->loi_read_lop);
1657 /* attempt some inter-object balancing by issueing rpcs
1658 * for each object in turn */
1659 if (!list_empty(&loi->loi_cli_item))
1660 list_del_init(&loi->loi_cli_item);
1661 if (!list_empty(&loi->loi_write_item))
1662 list_del_init(&loi->loi_write_item);
1663 if (!list_empty(&loi->loi_read_item))
1664 list_del_init(&loi->loi_read_item);
1666 loi_list_maint(cli, loi);
1668 /* send_oap_rpc fails with 0 when make_ready tells it to
1669 * back off. llite's make_ready does this when it tries
1670 * to lock a page queued for write that is already locked.
1671 * we want to try sending rpcs from many objects, but we
1672 * don't want to spin failing with 0. */
1673 if (race_counter == 10)
1679 /* we're trying to queue a page in the osc so we're subject to the
1680 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1681 * If the osc's queued pages are already at that limit, then we want to sleep
1682 * until there is space in the osc's queue for us. We also may be waiting for
1683 * write credits from the OST if there are RPCs in flight that may return some
1684 * before we fall back to sync writes.
1686 * We need this know our allocation was granted in the presence of signals */
1687 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1691 spin_lock(&cli->cl_loi_list_lock);
1692 rc = list_empty(&ocw->ocw_entry) || cli->cl_brw_in_flight == 0;
1693 spin_unlock(&cli->cl_loi_list_lock);
1697 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1698 * grant or cache space. */
1699 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1700 struct osc_async_page *oap)
1702 struct osc_cache_waiter ocw;
1703 struct l_wait_info lwi = { 0 };
1705 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1706 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1707 cli->cl_avail_grant);
1709 if (cli->cl_dirty_max < PAGE_SIZE)
1712 /* Hopefully normal case - cache space and write credits available */
1713 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1714 cli->cl_avail_grant >= PAGE_SIZE) {
1715 /* account for ourselves */
1716 osc_consume_write_grant(cli, oap);
1720 /* Make sure that there are write rpcs in flight to wait for. This
1721 * is a little silly as this object may not have any pending but
1722 * other objects sure might. */
1723 if (cli->cl_brw_in_flight) {
1724 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1725 init_waitqueue_head(&ocw.ocw_waitq);
1729 loi_list_maint(cli, loi);
1730 osc_check_rpcs(cli);
1731 spin_unlock(&cli->cl_loi_list_lock);
1733 CDEBUG(0, "sleeping for cache space\n");
1734 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1736 spin_lock(&cli->cl_loi_list_lock);
1737 if (!list_empty(&ocw.ocw_entry)) {
1738 list_del(&ocw.ocw_entry);
1747 /* the companion to enter_cache, called when an oap is no longer part of the
1748 * dirty accounting.. so writeback completes or truncate happens before writing
1749 * starts. must be called with the loi lock held. */
1750 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1755 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1760 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1761 cli->cl_dirty -= PAGE_SIZE;
1763 cli->cl_lost_grant += PAGE_SIZE;
1764 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1765 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1771 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1772 struct lov_oinfo *loi, struct page *page,
1773 obd_off offset, struct obd_async_page_ops *ops,
1774 void *data, void **res)
1776 struct osc_async_page *oap;
1779 OBD_ALLOC(oap, sizeof(*oap));
1783 oap->oap_magic = OAP_MAGIC;
1784 oap->oap_cli = &exp->exp_obd->u.cli;
1787 oap->oap_caller_ops = ops;
1788 oap->oap_caller_data = data;
1790 oap->oap_page = page;
1791 oap->oap_obj_off = offset;
1793 INIT_LIST_HEAD(&oap->oap_pending_item);
1794 INIT_LIST_HEAD(&oap->oap_urgent_item);
1795 INIT_LIST_HEAD(&oap->oap_rpc_item);
1797 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1799 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1804 struct osc_async_page *oap_from_cookie(void *cookie)
1806 struct osc_async_page *oap = cookie;
1807 if (oap->oap_magic != OAP_MAGIC)
1808 return ERR_PTR(-EINVAL);
1812 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1813 struct lov_oinfo *loi, void *cookie,
1814 int cmd, obd_off off, int count,
1815 obd_flag brw_flags, enum async_flags async_flags)
1817 struct client_obd *cli = &exp->exp_obd->u.cli;
1818 struct osc_async_page *oap;
1819 struct loi_oap_pages *lop;
1823 oap = oap_from_cookie(cookie);
1825 RETURN(PTR_ERR(oap));
1827 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1830 if (!list_empty(&oap->oap_pending_item) ||
1831 !list_empty(&oap->oap_urgent_item) ||
1832 !list_empty(&oap->oap_rpc_item))
1836 loi = &lsm->lsm_oinfo[0];
1838 spin_lock(&cli->cl_loi_list_lock);
1841 oap->oap_async_flags = async_flags;
1842 oap->oap_page_off = off;
1843 oap->oap_count = count;
1844 oap->oap_brw_flags = brw_flags;
1846 if (cmd == OBD_BRW_WRITE) {
1847 rc = osc_enter_cache(cli, loi, oap);
1849 spin_unlock(&cli->cl_loi_list_lock);
1852 lop = &loi->loi_write_lop;
1854 lop = &loi->loi_read_lop;
1857 if (oap->oap_async_flags & ASYNC_URGENT)
1858 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1859 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1860 lop_update_pending(cli, lop, cmd, 1);
1862 loi_list_maint(cli, loi);
1864 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1867 osc_check_rpcs(cli);
1868 spin_unlock(&cli->cl_loi_list_lock);
1873 /* aka (~was & now & flag), but this is more clear :) */
1874 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1876 static int osc_set_async_flags(struct obd_export *exp,
1877 struct lov_stripe_md *lsm,
1878 struct lov_oinfo *loi, void *cookie,
1879 obd_flag async_flags)
1881 struct client_obd *cli = &exp->exp_obd->u.cli;
1882 struct loi_oap_pages *lop;
1883 struct osc_async_page *oap;
1887 oap = oap_from_cookie(cookie);
1889 RETURN(PTR_ERR(oap));
1891 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1895 loi = &lsm->lsm_oinfo[0];
1897 if (oap->oap_cmd == OBD_BRW_WRITE) {
1898 lop = &loi->loi_write_lop;
1900 lop = &loi->loi_read_lop;
1903 spin_lock(&cli->cl_loi_list_lock);
1905 if (list_empty(&oap->oap_pending_item))
1906 GOTO(out, rc = -EINVAL);
1908 if ((oap->oap_async_flags & async_flags) == async_flags)
1911 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1912 oap->oap_async_flags |= ASYNC_READY;
1914 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1915 if (list_empty(&oap->oap_rpc_item)) {
1916 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1917 loi_list_maint(cli, loi);
1921 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1922 oap->oap_async_flags);
1924 osc_check_rpcs(cli);
1925 spin_unlock(&cli->cl_loi_list_lock);
1929 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1930 struct lov_oinfo *loi,
1931 struct obd_io_group *oig, void *cookie,
1932 int cmd, obd_off off, int count,
1934 obd_flag async_flags)
1936 struct client_obd *cli = &exp->exp_obd->u.cli;
1937 struct osc_async_page *oap;
1938 struct loi_oap_pages *lop;
1941 oap = oap_from_cookie(cookie);
1943 RETURN(PTR_ERR(oap));
1945 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1948 if (!list_empty(&oap->oap_pending_item) ||
1949 !list_empty(&oap->oap_urgent_item) ||
1950 !list_empty(&oap->oap_rpc_item))
1954 loi = &lsm->lsm_oinfo[0];
1956 spin_lock(&cli->cl_loi_list_lock);
1959 oap->oap_page_off = off;
1960 oap->oap_count = count;
1961 oap->oap_brw_flags = brw_flags;
1962 oap->oap_async_flags = async_flags;
1964 if (cmd == OBD_BRW_WRITE)
1965 lop = &loi->loi_write_lop;
1967 lop = &loi->loi_read_lop;
1969 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
1970 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
1972 oig_add_one(oig, &oap->oap_occ);
1975 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
1977 spin_unlock(&cli->cl_loi_list_lock);
1982 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
1983 struct loi_oap_pages *lop, int cmd)
1985 struct list_head *pos, *tmp;
1986 struct osc_async_page *oap;
1988 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
1989 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1990 list_del(&oap->oap_pending_item);
1991 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1992 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1993 lop_update_pending(cli, lop, cmd, 1);
1995 loi_list_maint(cli, loi);
1998 static int osc_trigger_group_io(struct obd_export *exp,
1999 struct lov_stripe_md *lsm,
2000 struct lov_oinfo *loi,
2001 struct obd_io_group *oig)
2003 struct client_obd *cli = &exp->exp_obd->u.cli;
2007 loi = &lsm->lsm_oinfo[0];
2009 spin_lock(&cli->cl_loi_list_lock);
2011 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2012 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2014 osc_check_rpcs(cli);
2015 spin_unlock(&cli->cl_loi_list_lock);
2020 static int osc_teardown_async_page(struct obd_export *exp,
2021 struct lov_stripe_md *lsm,
2022 struct lov_oinfo *loi, void *cookie)
2024 struct client_obd *cli = &exp->exp_obd->u.cli;
2025 struct loi_oap_pages *lop;
2026 struct osc_async_page *oap;
2030 oap = oap_from_cookie(cookie);
2032 RETURN(PTR_ERR(oap));
2035 loi = &lsm->lsm_oinfo[0];
2037 if (oap->oap_cmd == OBD_BRW_WRITE) {
2038 lop = &loi->loi_write_lop;
2040 lop = &loi->loi_read_lop;
2043 spin_lock(&cli->cl_loi_list_lock);
2045 if (!list_empty(&oap->oap_rpc_item))
2046 GOTO(out, rc = -EBUSY);
2048 osc_exit_cache(cli, oap, 0);
2049 osc_wake_cache_waiters(cli);
2051 if (!list_empty(&oap->oap_urgent_item)) {
2052 list_del_init(&oap->oap_urgent_item);
2053 oap->oap_async_flags &= ~ASYNC_URGENT;
2055 if (!list_empty(&oap->oap_pending_item)) {
2056 list_del_init(&oap->oap_pending_item);
2057 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2059 loi_list_maint(cli, loi);
2061 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2063 spin_unlock(&cli->cl_loi_list_lock);
2065 OBD_FREE(oap, sizeof(*oap));
2070 /* Note: caller will lock/unlock, and set uptodate on the pages */
2071 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2072 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2073 struct lov_stripe_md *lsm, obd_count page_count,
2074 struct brw_page *pga)
2076 struct ptlrpc_request *request = NULL;
2077 struct ost_body *body;
2078 struct niobuf_remote *nioptr;
2079 struct obd_ioobj *iooptr;
2080 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2084 /* XXX does not handle 'new' brw protocol */
2086 size[1] = sizeof(struct obd_ioobj);
2087 size[2] = page_count * sizeof(*nioptr);
2089 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_READ, 3,
2094 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2095 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2096 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2097 sizeof(*nioptr) * page_count);
2099 memcpy(&body->oa, oa, sizeof(body->oa));
2101 obdo_to_ioobj(oa, iooptr);
2102 iooptr->ioo_bufcnt = page_count;
2104 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2105 LASSERT(PageLocked(pga[mapped].pg));
2106 LASSERT(mapped == 0 ||
2107 pga[mapped].off > pga[mapped - 1].off);
2109 nioptr->offset = pga[mapped].off;
2110 nioptr->len = pga[mapped].count;
2111 nioptr->flags = pga[mapped].flag;
2114 size[1] = page_count * sizeof(*nioptr);
2115 request->rq_replen = lustre_msg_size(2, size);
2117 rc = ptlrpc_queue_wait(request);
2121 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2122 lustre_swab_ost_body);
2124 CERROR("Can't unpack body\n");
2125 GOTO(out_req, rc = -EPROTO);
2128 memcpy(oa, &body->oa, sizeof(*oa));
2130 swab = lustre_msg_swabbed(request->rq_repmsg);
2131 LASSERT_REPSWAB(request, 1);
2132 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2134 /* nioptr missing or short */
2135 GOTO(out_req, rc = -EPROTO);
2139 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2140 struct page *page = pga[mapped].pg;
2141 struct buffer_head *bh;
2145 lustre_swab_niobuf_remote (nioptr);
2147 /* got san device associated */
2148 LASSERT(exp->exp_obd != NULL);
2149 dev = exp->exp_obd->u.cli.cl_sandev;
2152 if (!nioptr->offset) {
2153 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2154 page->mapping->host->i_ino,
2156 memset(page_address(page), 0, PAGE_SIZE);
2160 if (!page->buffers) {
2161 create_empty_buffers(page, dev, PAGE_SIZE);
2164 clear_bit(BH_New, &bh->b_state);
2165 set_bit(BH_Mapped, &bh->b_state);
2166 bh->b_blocknr = (unsigned long)nioptr->offset;
2168 clear_bit(BH_Uptodate, &bh->b_state);
2170 ll_rw_block(READ, 1, &bh);
2174 /* if buffer already existed, it must be the
2175 * one we mapped before, check it */
2176 LASSERT(!test_bit(BH_New, &bh->b_state));
2177 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2178 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2180 /* wait it's io completion */
2181 if (test_bit(BH_Lock, &bh->b_state))
2184 if (!test_bit(BH_Uptodate, &bh->b_state))
2185 ll_rw_block(READ, 1, &bh);
2189 /* must do syncronous write here */
2191 if (!buffer_uptodate(bh)) {
2199 ptlrpc_req_finished(request);
2203 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2204 struct lov_stripe_md *lsm, obd_count page_count,
2205 struct brw_page *pga)
2207 struct ptlrpc_request *request = NULL;
2208 struct ost_body *body;
2209 struct niobuf_remote *nioptr;
2210 struct obd_ioobj *iooptr;
2211 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2215 size[1] = sizeof(struct obd_ioobj);
2216 size[2] = page_count * sizeof(*nioptr);
2218 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_WRITE,
2223 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2224 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2225 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2226 sizeof (*nioptr) * page_count);
2228 memcpy(&body->oa, oa, sizeof(body->oa));
2230 obdo_to_ioobj(oa, iooptr);
2231 iooptr->ioo_bufcnt = page_count;
2234 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2235 LASSERT(PageLocked(pga[mapped].pg));
2236 LASSERT(mapped == 0 ||
2237 pga[mapped].off > pga[mapped - 1].off);
2239 nioptr->offset = pga[mapped].off;
2240 nioptr->len = pga[mapped].count;
2241 nioptr->flags = pga[mapped].flag;
2244 size[1] = page_count * sizeof(*nioptr);
2245 request->rq_replen = lustre_msg_size(2, size);
2247 rc = ptlrpc_queue_wait(request);
2251 swab = lustre_msg_swabbed (request->rq_repmsg);
2252 LASSERT_REPSWAB (request, 1);
2253 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2255 CERROR("absent/short niobuf array\n");
2256 GOTO(out_req, rc = -EPROTO);
2260 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2261 struct page *page = pga[mapped].pg;
2262 struct buffer_head *bh;
2266 lustre_swab_niobuf_remote (nioptr);
2268 /* got san device associated */
2269 LASSERT(exp->exp_obd != NULL);
2270 dev = exp->exp_obd->u.cli.cl_sandev;
2272 if (!page->buffers) {
2273 create_empty_buffers(page, dev, PAGE_SIZE);
2276 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2277 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2278 LASSERT(page->buffers->b_blocknr ==
2279 (unsigned long)nioptr->offset);
2285 /* if buffer locked, wait it's io completion */
2286 if (test_bit(BH_Lock, &bh->b_state))
2289 clear_bit(BH_New, &bh->b_state);
2290 set_bit(BH_Mapped, &bh->b_state);
2292 /* override the block nr */
2293 bh->b_blocknr = (unsigned long)nioptr->offset;
2295 /* we are about to write it, so set it
2297 * page lock should garentee no race condition here */
2298 set_bit(BH_Uptodate, &bh->b_state);
2299 set_bit(BH_Dirty, &bh->b_state);
2301 ll_rw_block(WRITE, 1, &bh);
2303 /* must do syncronous write here */
2305 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2313 ptlrpc_req_finished(request);
2317 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2318 struct lov_stripe_md *lsm, obd_count page_count,
2319 struct brw_page *pga, struct obd_trans_info *oti)
2323 while (page_count) {
2324 obd_count pages_per_brw;
2327 if (page_count > PTLRPC_MAX_BRW_PAGES)
2328 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
2330 pages_per_brw = page_count;
2332 if (cmd & OBD_BRW_WRITE)
2333 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2335 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2340 page_count -= pages_per_brw;
2341 pga += pages_per_brw;
2348 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2350 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2352 LASSERT(lock != NULL);
2353 l_lock(&lock->l_resource->lr_namespace->ns_lock);
2355 if (lock->l_ast_data && lock->l_ast_data != data) {
2356 struct inode *new_inode = data;
2357 struct inode *old_inode = lock->l_ast_data;
2358 LASSERTF(old_inode->i_state & I_FREEING,
2359 "Found existing inode %p/%lu/%u state %lu in lock: "
2360 "setting data to %p/%lu/%u\n", old_inode,
2361 old_inode->i_ino, old_inode->i_generation,
2363 new_inode, new_inode->i_ino, new_inode->i_generation);
2366 lock->l_ast_data = data;
2367 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
2368 LDLM_LOCK_PUT(lock);
2371 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2372 ldlm_iterator_t replace, void *data)
2374 struct ldlm_res_id res_id = { .name = {0} };
2375 struct obd_device *obd = class_exp2obd(exp);
2377 res_id.name[0] = lsm->lsm_object_id;
2378 res_id.name[2] = lsm->lsm_object_gr;
2379 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2383 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2384 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2385 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2386 void *data, __u32 lvb_len, void *lvb_swabber,
2387 struct lustre_handle *lockh)
2389 struct obd_device *obd = exp->exp_obd;
2390 struct ldlm_res_id res_id = { .name = {0} };
2395 res_id.name[0] = lsm->lsm_object_id;
2396 res_id.name[2] = lsm->lsm_object_gr;
2398 /* Filesystem lock extents are extended to page boundaries so that
2399 * dealing with the page cache is a little smoother. */
2400 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2401 policy->l_extent.end |= ~PAGE_MASK;
2403 if (lsm->lsm_oinfo->loi_kms_valid == 0)
2406 /* Next, search for already existing extent locks that will cover us */
2407 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2410 osc_set_data_with_check(lockh, data);
2411 if (*flags & LDLM_FL_HAS_INTENT) {
2412 /* I would like to be able to ASSERT here that rss <=
2413 * kms, but I can't, for reasons which are explained in
2416 /* We already have a lock, and it's referenced */
2420 /* If we're trying to read, we also search for an existing PW lock. The
2421 * VFS and page cache already protect us locally, so lots of readers/
2422 * writers can share a single PW lock.
2424 * There are problems with conversion deadlocks, so instead of
2425 * converting a read lock to a write lock, we'll just enqueue a new
2428 * At some point we should cancel the read lock instead of making them
2429 * send us a blocking callback, but there are problems with canceling
2430 * locks out from other users right now, too. */
2432 if (mode == LCK_PR) {
2433 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2434 policy, LCK_PW, lockh);
2436 /* FIXME: This is not incredibly elegant, but it might
2437 * be more elegant than adding another parameter to
2438 * lock_match. I want a second opinion. */
2439 ldlm_lock_addref(lockh, LCK_PR);
2440 ldlm_lock_decref(lockh, LCK_PW);
2441 osc_set_data_with_check(lockh, data);
2447 rc = ldlm_cli_enqueue(exp, NULL, obd->obd_namespace, res_id, type,
2448 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2449 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2451 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
2452 CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
2453 lvb.lvb_size, lvb.lvb_blocks);
2454 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2455 lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
2461 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2462 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2463 int *flags, void *data, struct lustre_handle *lockh)
2465 struct ldlm_res_id res_id = { .name = {0} };
2466 struct obd_device *obd = exp->exp_obd;
2470 res_id.name[0] = lsm->lsm_object_id;
2471 res_id.name[2] = lsm->lsm_object_gr;
2473 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2475 /* Filesystem lock extents are extended to page boundaries so that
2476 * dealing with the page cache is a little smoother */
2477 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2478 policy->l_extent.end |= ~PAGE_MASK;
2480 /* Next, search for already existing extent locks that will cover us */
2481 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2482 policy, mode, lockh);
2484 osc_set_data_with_check(lockh, data);
2487 /* If we're trying to read, we also search for an existing PW lock. The
2488 * VFS and page cache already protect us locally, so lots of readers/
2489 * writers can share a single PW lock. */
2490 if (mode == LCK_PR) {
2491 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2492 policy, LCK_PW, lockh);
2493 if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
2494 /* FIXME: This is not incredibly elegant, but it might
2495 * be more elegant than adding another parameter to
2496 * lock_match. I want a second opinion. */
2497 osc_set_data_with_check(lockh, data);
2498 ldlm_lock_addref(lockh, LCK_PR);
2499 ldlm_lock_decref(lockh, LCK_PW);
2505 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2506 __u32 mode, struct lustre_handle *lockh)
2510 if (mode == LCK_GROUP)
2511 ldlm_lock_decref_and_cancel(lockh, mode);
2513 ldlm_lock_decref(lockh, mode);
2518 static int osc_cancel_unused(struct obd_export *exp,
2519 struct lov_stripe_md *lsm, int flags, void *opaque)
2521 struct obd_device *obd = class_exp2obd(exp);
2522 struct ldlm_res_id res_id = { .name = {0} };
2524 res_id.name[0] = lsm->lsm_object_id;
2525 res_id.name[2] = lsm->lsm_object_gr;
2527 return ldlm_cli_cancel_unused(obd->obd_namespace, &res_id, flags,
2531 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2532 unsigned long max_age)
2534 struct obd_statfs *msfs;
2535 struct ptlrpc_request *request;
2536 int rc, size = sizeof(*osfs);
2539 /* We could possibly pass max_age in the request (as an absolute
2540 * timestamp or a "seconds.usec ago") so the target can avoid doing
2541 * extra calls into the filesystem if that isn't necessary (e.g.
2542 * during mount that would help a bit). Having relative timestamps
2543 * is not so great if request processing is slow, while absolute
2544 * timestamps are not ideal because they need time synchronization. */
2545 request = ptlrpc_prep_req(obd->u.cli.cl_import, OST_STATFS,0,NULL,NULL);
2549 request->rq_replen = lustre_msg_size(1, &size);
2550 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2552 rc = ptlrpc_queue_wait(request);
2556 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2557 lustre_swab_obd_statfs);
2559 CERROR("Can't unpack obd_statfs\n");
2560 GOTO(out, rc = -EPROTO);
2563 memcpy(osfs, msfs, sizeof(*osfs));
2567 ptlrpc_req_finished(request);
2571 /* Retrieve object striping information.
2573 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2574 * the maximum number of OST indices which will fit in the user buffer.
2575 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2577 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2579 struct lov_user_md lum, *lumk;
2586 rc = copy_from_user(&lum, lump, sizeof(lum));
2590 if (lum.lmm_magic != LOV_USER_MAGIC)
2593 if (lum.lmm_stripe_count > 0) {
2594 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2595 OBD_ALLOC(lumk, lum_size);
2599 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2600 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
2602 lum_size = sizeof(lum);
2606 lumk->lmm_object_id = lsm->lsm_object_id;
2607 lumk->lmm_object_gr = lsm->lsm_object_gr;
2608 lumk->lmm_stripe_count = 1;
2610 if (copy_to_user(lump, lumk, lum_size))
2614 OBD_FREE(lumk, lum_size);
2619 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2620 void *karg, void *uarg)
2622 struct obd_device *obd = exp->exp_obd;
2623 struct obd_ioctl_data *data = karg;
2627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2630 if (!try_module_get(THIS_MODULE)) {
2631 CERROR("Can't get module. Is it alive?");
2636 case OBD_IOC_LOV_GET_CONFIG: {
2638 struct lov_desc *desc;
2639 struct obd_uuid uuid;
2643 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2644 GOTO(out, err = -EINVAL);
2646 data = (struct obd_ioctl_data *)buf;
2648 if (sizeof(*desc) > data->ioc_inllen1) {
2650 GOTO(out, err = -EINVAL);
2653 if (data->ioc_inllen2 < sizeof(uuid)) {
2655 GOTO(out, err = -EINVAL);
2658 desc = (struct lov_desc *)data->ioc_inlbuf1;
2659 desc->ld_tgt_count = 1;
2660 desc->ld_active_tgt_count = 1;
2661 desc->ld_default_stripe_count = 1;
2662 desc->ld_default_stripe_size = 0;
2663 desc->ld_default_stripe_offset = 0;
2664 desc->ld_pattern = 0;
2665 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2667 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2669 err = copy_to_user((void *)uarg, buf, len);
2672 obd_ioctl_freedata(buf, len);
2675 case LL_IOC_LOV_SETSTRIPE:
2676 err = obd_alloc_memmd(exp, karg);
2680 case LL_IOC_LOV_GETSTRIPE:
2681 err = osc_getstripe(karg, uarg);
2683 case OBD_IOC_CLIENT_RECOVER:
2684 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2689 case IOC_OSC_SET_ACTIVE:
2690 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2694 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2695 GOTO(out, err = -ENOTTY);
2698 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2701 module_put(THIS_MODULE);
2706 static int osc_get_info(struct obd_export *exp, obd_count keylen,
2707 void *key, __u32 *vallen, void *val)
2710 if (!vallen || !val)
2713 if (keylen > strlen("lock_to_stripe") &&
2714 strcmp(key, "lock_to_stripe") == 0) {
2715 __u32 *stripe = val;
2716 *vallen = sizeof(*stripe);
2719 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2720 struct ptlrpc_request *req;
2722 char *bufs[1] = {key};
2724 req = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GET_INFO, 1,
2729 req->rq_replen = lustre_msg_size(1, vallen);
2730 rc = ptlrpc_queue_wait(req);
2734 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2735 lustre_swab_ost_last_id);
2736 if (reply == NULL) {
2737 CERROR("Can't unpack OST last ID\n");
2738 GOTO(out, rc = -EPROTO);
2740 *((obd_id *)val) = *reply;
2742 ptlrpc_req_finished(req);
2748 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2749 void *key, obd_count vallen, void *val)
2751 struct ptlrpc_request *req;
2752 struct obd_device *obd = exp->exp_obd;
2753 struct obd_import *imp = class_exp2cliimp(exp);
2754 struct llog_ctxt *ctxt;
2755 int rc, size[2] = {keylen, vallen};
2756 char *bufs[2] = {key, val};
2759 if (keylen == strlen("next_id") &&
2760 memcmp(key, "next_id", strlen("next_id")) == 0) {
2761 if (vallen != sizeof(obd_id))
2763 obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1;
2764 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
2765 exp->exp_obd->obd_name,
2766 obd->u.cli.cl_oscc.oscc_next_id);
2771 if (keylen == strlen("growth_count") &&
2772 memcmp(key, "growth_count", strlen("growth_count")) == 0) {
2773 if (vallen != sizeof(int))
2775 obd->u.cli.cl_oscc.oscc_grow_count = *((int*)val);
2779 if (keylen == strlen("unlinked") &&
2780 memcmp(key, "unlinked", keylen) == 0) {
2781 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2782 spin_lock(&oscc->oscc_lock);
2783 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2784 spin_unlock(&oscc->oscc_lock);
2787 if (keylen == strlen("unrecovery") &&
2788 memcmp(key, "unrecovery", keylen) == 0) {
2789 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2790 spin_lock(&oscc->oscc_lock);
2791 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
2792 spin_unlock(&oscc->oscc_lock);
2795 if (keylen == strlen("initial_recov") &&
2796 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
2797 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2798 if (vallen != sizeof(int))
2800 imp->imp_initial_recov = *(int *)val;
2801 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
2802 exp->exp_obd->obd_name,
2803 imp->imp_initial_recov);
2807 if (keylen < strlen("mds_conn") ||
2808 memcmp(key, "mds_conn", strlen("mds_conn")) != 0)
2811 req = ptlrpc_prep_req(imp, OST_SET_INFO, 2, size, bufs);
2815 req->rq_replen = lustre_msg_size(0, NULL);
2816 rc = ptlrpc_queue_wait(req);
2817 ptlrpc_req_finished(req);
2819 ctxt = llog_get_context(&exp->exp_obd->obd_llogs, LLOG_UNLINK_ORIG_CTXT);
2821 rc = llog_initiator_connect(ctxt);
2826 imp->imp_server_timeout = 1;
2827 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
2828 imp->imp_pingable = 1;
2834 static struct llog_operations osc_size_repl_logops = {
2835 lop_cancel: llog_obd_repl_cancel
2838 static struct llog_operations osc_unlink_orig_logops;
2839 static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
2840 struct obd_device *tgt, int count,
2841 struct llog_catid *catid)
2846 osc_unlink_orig_logops = llog_lvfs_ops;
2847 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
2848 osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
2849 osc_unlink_orig_logops.lop_add = llog_catalog_add;
2850 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
2852 rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
2853 &catid->lci_logid, &osc_unlink_orig_logops);
2857 rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
2858 &osc_size_repl_logops);
2862 static int osc_llog_finish(struct obd_device *obd,
2863 struct obd_llogs *llogs, int count)
2868 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
2872 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
2877 static int osc_connect(struct lustre_handle *exph,
2878 struct obd_device *obd, struct obd_uuid *cluuid)
2882 rc = client_connect_import(exph, obd, cluuid);
2887 static int osc_disconnect(struct obd_export *exp, int flags)
2889 struct obd_device *obd = class_exp2obd(exp);
2890 struct llog_ctxt *ctxt;
2893 ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
2894 if (obd->u.cli.cl_conn_count == 1)
2895 /* flush any remaining cancel messages out to the target */
2896 llog_sync(ctxt, exp);
2898 rc = client_disconnect_export(exp, flags);
2902 static int osc_import_event(struct obd_device *obd,
2903 struct obd_import *imp,
2904 enum obd_import_event event)
2906 struct client_obd *cli;
2909 LASSERT(imp->imp_obd == obd);
2912 case IMP_EVENT_DISCON: {
2913 /* Only do this on the MDS OSC's */
2914 if (imp->imp_server_timeout) {
2915 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2917 spin_lock(&oscc->oscc_lock);
2918 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
2919 spin_unlock(&oscc->oscc_lock);
2923 case IMP_EVENT_INACTIVE: {
2924 if (obd->obd_observer)
2925 rc = obd_notify(obd->obd_observer, obd, 0);
2928 case IMP_EVENT_INVALIDATE: {
2929 struct ldlm_namespace *ns = obd->obd_namespace;
2933 spin_lock(&cli->cl_loi_list_lock);
2934 cli->cl_avail_grant = 0;
2935 cli->cl_lost_grant = 0;
2936 /* all pages go to failing rpcs due to the invalid import */
2937 osc_check_rpcs(cli);
2938 spin_unlock(&cli->cl_loi_list_lock);
2940 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
2944 case IMP_EVENT_ACTIVE: {
2945 if (obd->obd_observer)
2946 rc = obd_notify(obd->obd_observer, obd, 1);
2950 CERROR("Unknown import event %d\n", event);
2956 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
2958 struct lprocfs_static_vars lvars;
2962 lprocfs_init_vars(osc,&lvars);
2963 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
2967 rc = lproc_osc_attach_seqstat(dev);
2969 lprocfs_obd_detach(dev);
2973 ptlrpc_lprocfs_register_obd(dev);
2977 static int osc_detach(struct obd_device *dev)
2979 ptlrpc_lprocfs_unregister_obd(dev);
2980 return lprocfs_obd_detach(dev);
2983 int osc_setup(struct obd_device *obd, obd_count len, void *buf)
2987 rc = ptlrpcd_addref();
2991 rc = client_obd_setup(obd, len, buf);
3000 int osc_cleanup(struct obd_device *obd, int flags)
3004 rc = client_obd_cleanup(obd, flags);
3009 struct obd_ops osc_obd_ops = {
3010 .o_owner = THIS_MODULE,
3011 .o_attach = osc_attach,
3012 .o_detach = osc_detach,
3013 .o_setup = osc_setup,
3014 .o_cleanup = osc_cleanup,
3015 .o_connect = osc_connect,
3016 .o_disconnect = osc_disconnect,
3017 .o_statfs = osc_statfs,
3018 .o_packmd = osc_packmd,
3019 .o_unpackmd = osc_unpackmd,
3020 .o_create = osc_create,
3021 .o_destroy = osc_destroy,
3022 .o_getattr = osc_getattr,
3023 .o_getattr_async = osc_getattr_async,
3024 .o_setattr = osc_setattr,
3026 .o_brw_async = osc_brw_async,
3027 .o_prep_async_page = osc_prep_async_page,
3028 .o_queue_async_io = osc_queue_async_io,
3029 .o_set_async_flags = osc_set_async_flags,
3030 .o_queue_group_io = osc_queue_group_io,
3031 .o_trigger_group_io = osc_trigger_group_io,
3032 .o_teardown_async_page = osc_teardown_async_page,
3033 .o_punch = osc_punch,
3035 .o_enqueue = osc_enqueue,
3036 .o_match = osc_match,
3037 .o_change_cbdata = osc_change_cbdata,
3038 .o_cancel = osc_cancel,
3039 .o_cancel_unused = osc_cancel_unused,
3040 .o_iocontrol = osc_iocontrol,
3041 .o_get_info = osc_get_info,
3042 .o_set_info = osc_set_info,
3043 .o_import_event = osc_import_event,
3044 .o_llog_init = osc_llog_init,
3045 .o_llog_finish = osc_llog_finish,
3048 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3049 struct obd_ops sanosc_obd_ops = {
3050 .o_owner = THIS_MODULE,
3051 .o_attach = osc_attach,
3052 .o_detach = osc_detach,
3053 .o_cleanup = client_obd_cleanup,
3054 .o_connect = osc_connect,
3055 .o_disconnect = client_disconnect_export,
3056 .o_statfs = osc_statfs,
3057 .o_packmd = osc_packmd,
3058 .o_unpackmd = osc_unpackmd,
3059 .o_create = osc_real_create,
3060 .o_destroy = osc_destroy,
3061 .o_getattr = osc_getattr,
3062 .o_getattr_async = osc_getattr_async,
3063 .o_setattr = osc_setattr,
3064 .o_setup = client_sanobd_setup,
3065 .o_brw = sanosc_brw,
3066 .o_punch = osc_punch,
3068 .o_enqueue = osc_enqueue,
3069 .o_match = osc_match,
3070 .o_change_cbdata = osc_change_cbdata,
3071 .o_cancel = osc_cancel,
3072 .o_cancel_unused = osc_cancel_unused,
3073 .o_iocontrol = osc_iocontrol,
3074 .o_import_event = osc_import_event,
3075 .o_llog_init = osc_llog_init,
3076 .o_llog_finish = osc_llog_finish,
3080 int __init osc_init(void)
3082 struct lprocfs_static_vars lvars;
3083 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3084 struct lprocfs_static_vars sanlvars;
3089 lprocfs_init_vars(osc, &lvars);
3090 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3091 lprocfs_init_vars(osc, &sanlvars);
3094 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
3099 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3100 rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
3101 LUSTRE_SANOSC_NAME);
3103 class_unregister_type(LUSTRE_OSC_NAME);
3110 static void /*__exit*/ osc_exit(void)
3112 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3113 class_unregister_type(LUSTRE_SANOSC_NAME);
3115 class_unregister_type(LUSTRE_OSC_NAME);
3118 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3119 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3120 MODULE_LICENSE("GPL");
3122 module_init(osc_init);
3123 module_exit(osc_exit);