1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/lustre_dlm.h>
40 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
41 # include <linux/workqueue.h>
42 # include <linux/smp_lock.h>
44 # include <linux/locks.h>
46 #else /* __KERNEL__ */
47 # include <liblustre.h>
50 #include <linux/kp30.h>
51 #include <linux/lustre_net.h>
52 #include <linux/lustre_user.h>
53 #include <linux/obd_ost.h>
54 #include <linux/obd_lov.h>
57 # include <linux/ctype.h>
58 # include <linux/init.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include "osc_internal.h"
69 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
71 struct lprocfs_static_vars lvars;
75 lprocfs_init_vars(osc,&lvars);
76 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
80 rc = lproc_osc_attach_seqstat(dev);
82 lprocfs_obd_detach(dev);
86 ptlrpc_lprocfs_register_obd(dev);
90 static int osc_detach(struct obd_device *dev)
92 ptlrpc_lprocfs_unregister_obd(dev);
93 return lprocfs_obd_detach(dev);
97 /* Pack OSC object metadata for disk storage (LE byte order). */
98 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
99 struct lov_stripe_md *lsm)
104 lmm_size = sizeof(**lmmp);
109 OBD_FREE(*lmmp, lmm_size);
115 OBD_ALLOC(*lmmp, lmm_size);
121 LASSERT(lsm->lsm_object_id);
122 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
128 /* Unpack OSC object metadata from disk storage (LE byte order). */
129 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
130 struct lov_mds_md *lmm, int lmm_bytes)
136 if (lmm_bytes < sizeof (*lmm)) {
137 CERROR("lov_mds_md too small: %d, need %d\n",
138 lmm_bytes, (int)sizeof(*lmm));
141 /* XXX LOV_MAGIC etc check? */
143 if (lmm->lmm_object_id == 0) {
144 CERROR("lov_mds_md: zero lmm_object_id\n");
149 lsm_size = lov_stripe_md_size(1);
153 if (*lsmp != NULL && lmm == NULL) {
154 OBD_FREE(*lsmp, lsm_size);
160 OBD_ALLOC(*lsmp, lsm_size);
163 loi_init((*lsmp)->lsm_oinfo);
167 /* XXX zero *lsmp? */
168 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
169 LASSERT((*lsmp)->lsm_object_id);
172 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
177 static int osc_getattr_interpret(struct ptlrpc_request *req,
178 struct osc_getattr_async_args *aa, int rc)
180 struct ost_body *body;
186 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
188 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
189 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
191 /* This should really be sent by the OST */
192 aa->aa_oa->o_blksize = OSC_BRW_MAX_SIZE;
193 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
195 CERROR("can't unpack ost_body\n");
197 aa->aa_oa->o_valid = 0;
203 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
204 struct lov_stripe_md *md,
205 struct ptlrpc_request_set *set)
207 struct ptlrpc_request *request;
208 struct ost_body *body;
209 int size = sizeof(*body);
210 struct osc_getattr_async_args *aa;
213 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
218 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
219 memcpy(&body->oa, oa, sizeof(*oa));
221 request->rq_replen = lustre_msg_size(1, &size);
222 request->rq_interpret_reply = osc_getattr_interpret;
224 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
225 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
228 ptlrpc_set_add_req (set, request);
232 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
233 struct lov_stripe_md *md)
235 struct ptlrpc_request *request;
236 struct ost_body *body;
237 int rc, size = sizeof(*body);
240 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
245 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
246 memcpy(&body->oa, oa, sizeof(*oa));
248 request->rq_replen = lustre_msg_size(1, &size);
250 rc = ptlrpc_queue_wait(request);
252 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
256 body = lustre_swab_repbuf(request, 0, sizeof (*body),
257 lustre_swab_ost_body);
259 CERROR ("can't unpack ost_body\n");
260 GOTO (out, rc = -EPROTO);
263 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
264 memcpy(oa, &body->oa, sizeof(*oa));
266 /* This should really be sent by the OST */
267 oa->o_blksize = OSC_BRW_MAX_SIZE;
268 oa->o_valid |= OBD_MD_FLBLKSZ;
272 ptlrpc_req_finished(request);
276 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
277 struct lov_stripe_md *md, struct obd_trans_info *oti)
279 struct ptlrpc_request *request;
280 struct ost_body *body;
281 int rc, size = sizeof(*body);
284 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SETATTR, 1, &size,
289 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
290 memcpy(&body->oa, oa, sizeof(*oa));
292 request->rq_replen = lustre_msg_size(1, &size);
294 rc = ptlrpc_queue_wait(request);
298 body = lustre_swab_repbuf(request, 0, sizeof(*body),
299 lustre_swab_ost_body);
301 GOTO(out, rc = -EPROTO);
303 memcpy(oa, &body->oa, sizeof(*oa));
307 ptlrpc_req_finished(request);
311 int osc_real_create(struct obd_export *exp, struct obdo *oa,
312 struct lov_stripe_md **ea, struct obd_trans_info *oti)
314 struct ptlrpc_request *request;
315 struct ost_body *body;
316 struct lov_stripe_md *lsm;
317 int rc, size = sizeof(*body);
325 rc = obd_alloc_memmd(exp, &lsm);
330 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_CREATE, 1, &size,
333 GOTO(out, rc = -ENOMEM);
335 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
336 memcpy(&body->oa, oa, sizeof(body->oa));
338 request->rq_replen = lustre_msg_size(1, &size);
339 if (oa->o_valid & OBD_MD_FLINLINE) {
340 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
341 oa->o_flags == OBD_FL_DELORPHAN);
342 DEBUG_REQ(D_HA, request,
343 "delorphan from OST integration; level == RECOVER");
344 request->rq_send_state = LUSTRE_IMP_RECOVER;
347 rc = ptlrpc_queue_wait(request);
351 body = lustre_swab_repbuf(request, 0, sizeof(*body),
352 lustre_swab_ost_body);
354 CERROR ("can't unpack ost_body\n");
355 GOTO (out_req, rc = -EPROTO);
358 memcpy(oa, &body->oa, sizeof(*oa));
360 /* This should really be sent by the OST */
361 oa->o_blksize = OSC_BRW_MAX_SIZE;
362 oa->o_valid |= OBD_MD_FLBLKSZ;
364 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
365 * have valid lsm_oinfo data structs, so don't go touching that.
366 * This needs to be fixed in a big way.
368 lsm->lsm_object_id = oa->o_id;
372 oti->oti_transno = request->rq_repmsg->transno;
374 if (oa->o_valid & OBD_MD_FLCOOKIE) {
375 if (!oti->oti_logcookies)
376 oti_alloc_cookies(oti, 1);
377 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
378 sizeof(oti->oti_onecookie));
382 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
385 ptlrpc_req_finished(request);
388 obd_free_memmd(exp, &lsm);
392 static int osc_punch(struct obd_export *exp, struct obdo *oa,
393 struct lov_stripe_md *md, obd_size start,
394 obd_size end, struct obd_trans_info *oti)
396 struct ptlrpc_request *request;
397 struct ost_body *body;
398 int rc, size = sizeof(*body);
406 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_PUNCH, 1, &size,
411 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
412 memcpy(&body->oa, oa, sizeof(*oa));
414 /* overload the size and blocks fields in the oa with start/end */
415 body->oa.o_size = start;
416 body->oa.o_blocks = end;
417 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
419 request->rq_replen = lustre_msg_size(1, &size);
421 rc = ptlrpc_queue_wait(request);
425 body = lustre_swab_repbuf (request, 0, sizeof (*body),
426 lustre_swab_ost_body);
428 CERROR ("can't unpack ost_body\n");
429 GOTO (out, rc = -EPROTO);
432 memcpy(oa, &body->oa, sizeof(*oa));
436 ptlrpc_req_finished(request);
440 static int osc_sync(struct obd_export *exp, struct obdo *oa,
441 struct lov_stripe_md *md, obd_size start, obd_size end)
443 struct ptlrpc_request *request;
444 struct ost_body *body;
445 int rc, size = sizeof(*body);
453 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SYNC, 1, &size,
458 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
459 memcpy(&body->oa, oa, sizeof(*oa));
461 /* overload the size and blocks fields in the oa with start/end */
462 body->oa.o_size = start;
463 body->oa.o_blocks = end;
464 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
466 request->rq_replen = lustre_msg_size(1, &size);
468 rc = ptlrpc_queue_wait(request);
472 body = lustre_swab_repbuf(request, 0, sizeof(*body),
473 lustre_swab_ost_body);
475 CERROR ("can't unpack ost_body\n");
476 GOTO (out, rc = -EPROTO);
479 memcpy(oa, &body->oa, sizeof(*oa));
483 ptlrpc_req_finished(request);
487 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
488 struct lov_stripe_md *ea, struct obd_trans_info *oti)
490 struct ptlrpc_request *request;
491 struct ost_body *body;
492 int rc, size = sizeof(*body);
500 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_DESTROY, 1,
505 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
507 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
508 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
509 sizeof(*oti->oti_logcookies));
510 oti->oti_logcookies++;
513 memcpy(&body->oa, oa, sizeof(*oa));
514 request->rq_replen = lustre_msg_size(1, &size);
516 rc = ptlrpc_queue_wait(request);
520 body = lustre_swab_repbuf(request, 0, sizeof(*body),
521 lustre_swab_ost_body);
523 CERROR ("Can't unpack body\n");
524 GOTO (out, rc = -EPROTO);
527 memcpy(oa, &body->oa, sizeof(*oa));
531 ptlrpc_req_finished(request);
535 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
538 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
540 LASSERT(!(oa->o_valid & bits));
543 spin_lock(&cli->cl_loi_list_lock);
544 oa->o_dirty = cli->cl_dirty;
545 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
546 oa->o_grant = cli->cl_avail_grant;
547 oa->o_dropped = cli->cl_lost_grant;
548 cli->cl_lost_grant = 0;
549 spin_unlock(&cli->cl_loi_list_lock);
550 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
551 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
554 /* caller must hold loi_list_lock */
555 static void osc_consume_write_grant(struct client_obd *cli,
556 struct osc_async_page *oap)
558 cli->cl_dirty += PAGE_SIZE;
559 cli->cl_avail_grant -= PAGE_SIZE;
560 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
561 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
562 LASSERT(cli->cl_avail_grant >= 0);
565 /* caller must hold loi_list_lock */
566 void osc_wake_cache_waiters(struct client_obd *cli)
568 struct list_head *l, *tmp;
569 struct osc_cache_waiter *ocw;
571 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
572 /* if we can't dirty more, we must wait until some is written */
573 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
574 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
575 cli->cl_dirty, cli->cl_dirty_max);
579 /* if still dirty cache but no grant wait for pending RPCs that
580 * may yet return us some grant before doing sync writes */
581 if (cli->cl_brw_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
582 CDEBUG(D_CACHE, "%d BRWs in flight, no grant\n",
583 cli->cl_brw_in_flight);
587 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
588 list_del_init(&ocw->ocw_entry);
589 if (cli->cl_avail_grant < PAGE_SIZE) {
590 /* no more RPCs in flight to return grant, do sync IO */
591 ocw->ocw_rc = -EDQUOT;
592 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
594 osc_consume_write_grant(cli, ocw->ocw_oap);
597 wake_up(&ocw->ocw_waitq);
603 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
605 spin_lock(&cli->cl_loi_list_lock);
606 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
607 cli->cl_avail_grant += body->oa.o_grant;
608 /* waiters are woken in brw_interpret_oap */
609 spin_unlock(&cli->cl_loi_list_lock);
612 /* We assume that the reason this OSC got a short read is because it read
613 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
614 * via the LOV, and it _knows_ it's reading inside the file, it's just that
615 * this stripe never got written at or beyond this stripe offset yet. */
616 static void handle_short_read(int nob_read, obd_count page_count,
617 struct brw_page *pga)
621 /* skip bytes read OK */
622 while (nob_read > 0) {
623 LASSERT (page_count > 0);
625 if (pga->count > nob_read) {
626 /* EOF inside this page */
627 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
628 memset(ptr + nob_read, 0, pga->count - nob_read);
635 nob_read -= pga->count;
640 /* zero remaining pages */
641 while (page_count-- > 0) {
642 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
643 memset(ptr, 0, pga->count);
649 static int check_write_rcs(struct ptlrpc_request *request,
650 int requested_nob, int niocount,
651 obd_count page_count, struct brw_page *pga)
655 /* return error if any niobuf was in error */
656 remote_rcs = lustre_swab_repbuf(request, 1,
657 sizeof(*remote_rcs) * niocount, NULL);
658 if (remote_rcs == NULL) {
659 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
662 if (lustre_msg_swabbed(request->rq_repmsg))
663 for (i = 0; i < niocount; i++)
664 __swab32s(&remote_rcs[i]);
666 for (i = 0; i < niocount; i++) {
667 if (remote_rcs[i] < 0)
668 return(remote_rcs[i]);
670 if (remote_rcs[i] != 0) {
671 CERROR("rc[%d] invalid (%d) req %p\n",
672 i, remote_rcs[i], request);
677 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
678 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
679 requested_nob, request->rq_bulk->bd_nob_transferred);
686 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
688 if (p1->flag != p2->flag) {
689 unsigned mask = ~OBD_BRW_FROM_GRANT;
691 /* warn if we try to combine flags that we don't know to be
693 if ((p1->flag & mask) != (p2->flag & mask))
694 CERROR("is it ok to have flags 0x%x and 0x%x in the "
695 "same brw?\n", p1->flag, p2->flag);
699 return (p1->off + p1->count == p2->off);
703 static obd_count cksum_pages(int nob, obd_count page_count,
704 struct brw_page *pga)
710 LASSERT (page_count > 0);
713 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
714 pga->count > nob ? nob : pga->count);
726 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
727 struct lov_stripe_md *lsm, obd_count page_count,
728 struct brw_page *pga, int *requested_nobp,
729 int *niocountp, struct ptlrpc_request **reqp)
731 struct ptlrpc_request *req;
732 struct ptlrpc_bulk_desc *desc;
733 struct client_obd *cli = &imp->imp_obd->u.cli;
734 struct ost_body *body;
735 struct obd_ioobj *ioobj;
736 struct niobuf_remote *niobuf;
745 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
747 for (niocount = i = 1; i < page_count; i++)
748 if (!can_merge_pages(&pga[i - 1], &pga[i]))
751 size[0] = sizeof(*body);
752 size[1] = sizeof(*ioobj);
753 size[2] = niocount * sizeof(*niobuf);
755 req = ptlrpc_prep_req(imp, opc, 3, size, NULL);
759 if (opc == OST_WRITE)
760 desc = ptlrpc_prep_bulk_imp (req, page_count,
761 BULK_GET_SOURCE, OST_BULK_PORTAL);
763 desc = ptlrpc_prep_bulk_imp (req, page_count,
764 BULK_PUT_SINK, OST_BULK_PORTAL);
766 GOTO(out, rc = -ENOMEM);
767 /* NB request now owns desc and will free it when it gets freed */
769 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
770 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
771 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
773 memcpy(&body->oa, oa, sizeof(*oa));
775 obdo_to_ioobj(oa, ioobj);
776 ioobj->ioo_bufcnt = niocount;
778 LASSERT (page_count > 0);
779 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
780 struct brw_page *pg = &pga[i];
781 struct brw_page *pg_prev = pg - 1;
783 LASSERT(pg->count > 0);
784 LASSERT((pg->off & ~PAGE_MASK) + pg->count <= PAGE_SIZE);
785 LASSERTF(i == 0 || pg->off > pg_prev->off,
786 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
787 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
789 pg->pg, pg->pg->private, pg->pg->index, pg->off,
790 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
793 ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~PAGE_MASK,
795 requested_nob += pg->count;
797 if (i > 0 && can_merge_pages(pg_prev, pg)) {
799 niobuf->len += pg->count;
801 niobuf->offset = pg->off;
802 niobuf->len = pg->count;
803 niobuf->flags = pg->flag;
807 LASSERT((void *)(niobuf - niocount) ==
808 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
809 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
810 spin_lock_irqsave(&req->rq_lock, flags);
811 req->rq_no_resend = 1;
812 spin_unlock_irqrestore(&req->rq_lock, flags);
814 /* size[0] still sizeof (*body) */
815 if (opc == OST_WRITE) {
817 body->oa.o_valid |= OBD_MD_FLCKSUM;
818 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
820 /* 1 RC per niobuf */
821 size[1] = sizeof(__u32) * niocount;
822 req->rq_replen = lustre_msg_size(2, size);
824 /* 1 RC for the whole I/O */
825 req->rq_replen = lustre_msg_size(1, size);
828 *niocountp = niocount;
829 *requested_nobp = requested_nob;
834 ptlrpc_req_finished (req);
838 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
839 int requested_nob, int niocount,
840 obd_count page_count, struct brw_page *pga,
843 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
844 struct ost_body *body;
850 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
852 CERROR ("Can't unpack body\n");
856 osc_update_grant(cli, body);
858 if (req->rq_reqmsg->opc == OST_WRITE) {
860 CERROR ("Unexpected +ve rc %d\n", rc);
863 LASSERT (req->rq_bulk->bd_nob == requested_nob);
865 RETURN(check_write_rcs(req, requested_nob, niocount,
869 if (rc > requested_nob) {
870 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
874 if (rc != req->rq_bulk->bd_nob_transferred) {
875 CERROR ("Unexpected rc %d (%d transferred)\n",
876 rc, req->rq_bulk->bd_nob_transferred);
880 if (rc < requested_nob)
881 handle_short_read(rc, page_count, pga);
883 memcpy(oa, &body->oa, sizeof(*oa));
886 if (oa->o_valid & OBD_MD_FLCKSUM) {
887 const struct ptlrpc_peer *peer =
888 &req->rq_import->imp_connection->c_peer;
889 static int cksum_counter;
890 obd_count server_cksum = oa->o_cksum;
891 obd_count cksum = cksum_pages(rc, page_count, pga);
892 char str[PTL_NALFMT_SIZE];
894 portals_nid2str(peer->peer_ni->pni_number, peer->peer_nid, str);
897 if (server_cksum != cksum) {
898 CERROR("Bad checksum: server %x, client %x, server NID "
899 LPX64" (%s)\n", server_cksum, cksum,
900 peer->peer_nid, str);
903 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
904 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
905 cksum_counter, peer->peer_nid, str, cksum);
908 static int cksum_missed;
911 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
912 CERROR("Request checksum %u from "LPX64", no reply\n",
914 req->rq_import->imp_connection->c_peer.peer_nid);
920 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
921 struct lov_stripe_md *lsm,
922 obd_count page_count, struct brw_page *pga)
926 struct ptlrpc_request *request;
931 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
932 page_count, pga, &requested_nob, &niocount,
934 /* NB ^ sets rq_no_resend */
939 rc = ptlrpc_queue_wait(request);
941 if (rc == -ETIMEDOUT && request->rq_resend) {
942 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
943 ptlrpc_req_finished(request);
947 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
948 page_count, pga, rc);
950 ptlrpc_req_finished(request);
954 static int brw_interpret(struct ptlrpc_request *request,
955 struct osc_brw_async_args *aa, int rc)
957 struct obdo *oa = aa->aa_oa;
958 int requested_nob = aa->aa_requested_nob;
959 int niocount = aa->aa_nio_count;
960 obd_count page_count = aa->aa_page_count;
961 struct brw_page *pga = aa->aa_pga;
964 /* XXX bug 937 here */
965 if (rc == -ETIMEDOUT && request->rq_resend) {
966 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
967 LBUG(); /* re-send. later. */
971 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
972 page_count, pga, rc);
976 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
977 struct lov_stripe_md *lsm, obd_count page_count,
978 struct brw_page *pga, struct ptlrpc_request_set *set)
980 struct ptlrpc_request *request;
983 struct osc_brw_async_args *aa;
987 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
988 page_count, pga, &requested_nob, &nio_count,
990 /* NB ^ sets rq_no_resend */
993 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
994 aa = (struct osc_brw_async_args *)&request->rq_async_args;
996 aa->aa_requested_nob = requested_nob;
997 aa->aa_nio_count = nio_count;
998 aa->aa_page_count = page_count;
1001 request->rq_interpret_reply = brw_interpret;
1002 ptlrpc_set_add_req(set, request);
1008 #define min_t(type,x,y) \
1009 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
1013 * ugh, we want disk allocation on the target to happen in offset order. we'll
1014 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1015 * fine for our small page arrays and doesn't require allocation. its an
1016 * insertion sort that swaps elements that are strides apart, shrinking the
1017 * stride down until its '1' and the array is sorted.
1019 static void sort_brw_pages(struct brw_page *array, int num)
1022 struct brw_page tmp;
1026 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1031 for (i = stride ; i < num ; i++) {
1034 while (j >= stride && array[j - stride].off > tmp.off) {
1035 array[j] = array[j - stride];
1040 } while (stride > 1);
1043 /* make sure we the regions we're passing to elan don't violate its '4
1044 * fragments' constraint. portal headers are a fragment, all full
1045 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1046 * counts as a fragment. I think. see bug 934. */
1047 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1050 int saw_whole_frag = 0;
1053 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1054 if (pg->count == PAGE_SIZE) {
1055 if (!saw_whole_frag) {
1066 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1067 struct lov_stripe_md *md, obd_count page_count,
1068 struct brw_page *pga, struct obd_trans_info *oti)
1072 if (cmd == OBD_BRW_CHECK) {
1073 /* The caller just wants to know if there's a chance that this
1074 * I/O can succeed */
1075 struct obd_import *imp = class_exp2cliimp(exp);
1077 if (imp == NULL || imp->imp_invalid)
1082 while (page_count) {
1083 obd_count pages_per_brw;
1086 if (page_count > OSC_BRW_MAX_IOV)
1087 pages_per_brw = OSC_BRW_MAX_IOV;
1089 pages_per_brw = page_count;
1091 sort_brw_pages(pga, pages_per_brw);
1092 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1094 rc = osc_brw_internal(cmd, exp, oa, md, pages_per_brw, pga);
1099 page_count -= pages_per_brw;
1100 pga += pages_per_brw;
1105 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1106 struct lov_stripe_md *md, obd_count page_count,
1107 struct brw_page *pga, struct ptlrpc_request_set *set,
1108 struct obd_trans_info *oti)
1112 if (cmd == OBD_BRW_CHECK) {
1113 /* The caller just wants to know if there's a chance that this
1114 * I/O can succeed */
1115 struct obd_import *imp = class_exp2cliimp(exp);
1117 if (imp == NULL || imp->imp_invalid)
1122 while (page_count) {
1123 obd_count pages_per_brw;
1126 if (page_count > OSC_BRW_MAX_IOV)
1127 pages_per_brw = OSC_BRW_MAX_IOV;
1129 pages_per_brw = page_count;
1131 sort_brw_pages(pga, pages_per_brw);
1132 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1134 rc = async_internal(cmd, exp, oa, md, pages_per_brw, pga, set);
1139 page_count -= pages_per_brw;
1140 pga += pages_per_brw;
1145 static void osc_check_rpcs(struct client_obd *cli);
1146 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1148 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1149 static void lop_update_pending(struct client_obd *cli,
1150 struct loi_oap_pages *lop, int cmd, int delta);
1152 /* this is called when a sync waiter receives an interruption. Its job is to
1153 * get the caller woken as soon as possible. If its page hasn't been put in an
1154 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1155 * desiring interruption which will forcefully complete the rpc once the rpc
1157 static void osc_occ_interrupted(struct oig_callback_context *occ)
1159 struct osc_async_page *oap;
1160 struct loi_oap_pages *lop;
1161 struct lov_oinfo *loi;
1164 /* XXX member_of() */
1165 oap = list_entry(occ, struct osc_async_page, oap_occ);
1167 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1169 oap->oap_interrupted = 1;
1171 /* ok, it's been put in an rpc. */
1172 if (oap->oap_request != NULL) {
1173 ptlrpc_mark_interrupted(oap->oap_request);
1178 /* we don't get interruption callbacks until osc_trigger_sync_io()
1179 * has been called and put the sync oaps in the pending/urgent lists.*/
1180 if (!list_empty(&oap->oap_pending_item)) {
1181 list_del_init(&oap->oap_pending_item);
1182 if (oap->oap_async_flags & ASYNC_URGENT)
1183 list_del_init(&oap->oap_urgent_item);
1186 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1187 &loi->loi_write_lop : &loi->loi_read_lop;
1188 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1189 loi_list_maint(oap->oap_cli, oap->oap_loi);
1191 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1192 oap->oap_oig = NULL;
1196 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1199 /* this must be called holding the loi list lock to give coverage to exit_cache,
1200 * async_flag maintenance, and oap_request */
1201 static void osc_complete_oap(struct client_obd *cli,
1202 struct osc_async_page *oap, int sent, int rc)
1204 osc_exit_cache(cli, oap, sent);
1205 oap->oap_async_flags = 0;
1206 oap->oap_interrupted = 0;
1208 if (oap->oap_request != NULL) {
1209 ptlrpc_req_finished(oap->oap_request);
1210 oap->oap_request = NULL;
1214 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1215 oap->oap_oig = NULL;
1220 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1224 static int brw_interpret_oap(struct ptlrpc_request *request,
1225 struct osc_brw_async_args *aa, int rc)
1227 struct osc_async_page *oap;
1228 struct client_obd *cli;
1229 struct list_head *pos, *n;
1232 CDEBUG(D_INODE, "request %p aa %p\n", request, aa);
1234 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1235 aa->aa_nio_count, aa->aa_page_count,
1239 /* in failout recovery we ignore writeback failure and want
1240 * to just tell llite to unlock the page and continue */
1241 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1244 spin_lock(&cli->cl_loi_list_lock);
1246 /* We need to decrement before osc_complete_oap->osc_wake_cache_waiters
1247 * is called so we know whether to go to sync BRWs or wait for more
1248 * RPCs to complete */
1249 cli->cl_brw_in_flight--;
1251 /* the caller may re-use the oap after the completion call so
1252 * we need to clean it up a little */
1253 list_for_each_safe(pos, n, &aa->aa_oaps) {
1254 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1256 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1257 //oap->oap_page, oap->oap_page->index, oap);
1259 list_del_init(&oap->oap_rpc_item);
1260 osc_complete_oap(cli, oap, 1, rc);
1263 osc_wake_cache_waiters(cli);
1264 osc_check_rpcs(cli);
1266 spin_unlock(&cli->cl_loi_list_lock);
1268 obdo_free(aa->aa_oa);
1269 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1274 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1275 struct list_head *rpc_list,
1276 int page_count, int cmd)
1278 struct ptlrpc_request *req;
1279 struct brw_page *pga = NULL;
1280 int requested_nob, nio_count;
1281 struct osc_brw_async_args *aa;
1282 struct obdo *oa = NULL;
1283 struct obd_async_page_ops *ops = NULL;
1284 void *caller_data = NULL;
1285 struct list_head *pos;
1288 LASSERT(!list_empty(rpc_list));
1290 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1292 RETURN(ERR_PTR(-ENOMEM));
1296 GOTO(out, req = ERR_PTR(-ENOMEM));
1299 list_for_each(pos, rpc_list) {
1300 struct osc_async_page *oap;
1302 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1304 ops = oap->oap_caller_ops;
1305 caller_data = oap->oap_caller_data;
1307 pga[i].off = oap->oap_obj_off + oap->oap_page_off;
1308 pga[i].pg = oap->oap_page;
1309 pga[i].count = oap->oap_count;
1310 pga[i].flag = oap->oap_brw_flags;
1311 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1312 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1316 /* always get the data for the obdo for the rpc */
1317 LASSERT(ops != NULL);
1318 ops->ap_fill_obdo(caller_data, cmd, oa);
1320 sort_brw_pages(pga, page_count);
1321 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1322 pga, &requested_nob, &nio_count, &req);
1324 CERROR("prep_req failed: %d\n", rc);
1325 GOTO(out, req = ERR_PTR(rc));
1328 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1329 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1331 aa->aa_requested_nob = requested_nob;
1332 aa->aa_nio_count = nio_count;
1333 aa->aa_page_count = page_count;
1342 OBD_FREE(pga, sizeof(*pga) * page_count);
1347 static void lop_update_pending(struct client_obd *cli,
1348 struct loi_oap_pages *lop, int cmd, int delta)
1350 lop->lop_num_pending += delta;
1351 if (cmd == OBD_BRW_WRITE)
1352 cli->cl_pending_w_pages += delta;
1354 cli->cl_pending_r_pages += delta;
1357 /* the loi lock is held across this function but it's allowed to release
1358 * and reacquire it during its work */
1359 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1360 int cmd, struct loi_oap_pages *lop)
1362 struct ptlrpc_request *request;
1363 obd_count page_count = 0;
1364 struct list_head *tmp, *pos;
1365 struct osc_async_page *oap = NULL;
1366 struct osc_brw_async_args *aa;
1367 struct obd_async_page_ops *ops;
1368 LIST_HEAD(rpc_list);
1371 /* first we find the pages we're allowed to work with */
1372 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1373 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1374 ops = oap->oap_caller_ops;
1376 LASSERT(oap->oap_magic == OAP_MAGIC);
1378 /* in llite being 'ready' equates to the page being locked
1379 * until completion unlocks it. commit_write submits a page
1380 * as not ready because its unlock will happen unconditionally
1381 * as the call returns. if we race with commit_write giving
1382 * us that page we dont' want to create a hole in the page
1383 * stream, so we stop and leave the rpc to be fired by
1384 * another dirtier or kupdated interval (the not ready page
1385 * will still be on the dirty list). we could call in
1386 * at the end of ll_file_write to process the queue again. */
1387 if (!(oap->oap_async_flags & ASYNC_READY)) {
1388 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1390 CDEBUG(D_INODE, "oap %p page %p returned %d "
1391 "instead of ready\n", oap,
1395 /* llite is telling us that the page is still
1396 * in commit_write and that we should try
1397 * and put it in an rpc again later. we
1398 * break out of the loop so we don't create
1399 * a hole in the sequence of pages in the rpc
1404 /* the io isn't needed.. tell the checks
1405 * below to complete the rpc with EINTR */
1406 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1407 oap->oap_count = -EINTR;
1410 oap->oap_async_flags |= ASYNC_READY;
1413 LASSERTF(0, "oap %p page %p returned %d "
1414 "from make_ready\n", oap,
1422 /* take the page out of our book-keeping */
1423 list_del_init(&oap->oap_pending_item);
1424 lop_update_pending(cli, lop, cmd, -1);
1425 if (!list_empty(&oap->oap_urgent_item))
1426 list_del_init(&oap->oap_urgent_item);
1428 /* ask the caller for the size of the io as the rpc leaves. */
1429 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1431 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1432 if (oap->oap_count <= 0) {
1433 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1435 osc_complete_oap(cli, oap, 0, oap->oap_count);
1439 /* now put the page back in our accounting */
1440 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1441 if (++page_count >= cli->cl_max_pages_per_rpc)
1445 osc_wake_cache_waiters(cli);
1447 if (page_count == 0)
1450 loi_list_maint(cli, loi);
1451 spin_unlock(&cli->cl_loi_list_lock);
1453 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1454 if (IS_ERR(request)) {
1455 /* this should happen rarely and is pretty bad, it makes the
1456 * pending list not follow the dirty order */
1457 spin_lock(&cli->cl_loi_list_lock);
1458 list_for_each_safe(pos, tmp, &rpc_list) {
1459 oap = list_entry(pos, struct osc_async_page,
1461 list_del_init(&oap->oap_rpc_item);
1463 /* queued sync pages can be torn down while the pages
1464 * were between the pending list and the rpc */
1465 if (oap->oap_interrupted) {
1466 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1467 osc_complete_oap(cli, oap, 0, oap->oap_count);
1471 /* put the page back in the loi/lop lists */
1472 list_add_tail(&oap->oap_pending_item,
1474 lop_update_pending(cli, lop, cmd, 1);
1475 if (oap->oap_async_flags & ASYNC_URGENT)
1476 list_add(&oap->oap_urgent_item,
1479 loi_list_maint(cli, loi);
1480 RETURN(PTR_ERR(request));
1483 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1484 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1485 INIT_LIST_HEAD(&aa->aa_oaps);
1486 list_splice(&rpc_list, &aa->aa_oaps);
1487 INIT_LIST_HEAD(&rpc_list);
1490 if (cmd == OBD_BRW_READ) {
1491 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1492 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_brw_in_flight);
1494 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1495 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1496 cli->cl_brw_in_flight);
1500 spin_lock(&cli->cl_loi_list_lock);
1502 cli->cl_brw_in_flight++;
1503 /* queued sync pages can be torn down while the pages
1504 * were between the pending list and the rpc */
1505 list_for_each(pos, &aa->aa_oaps) {
1506 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1507 if (oap->oap_interrupted) {
1508 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1510 ptlrpc_mark_interrupted(request);
1515 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %d in flight\n", request,
1516 page_count, aa, cli->cl_brw_in_flight);
1518 oap->oap_request = ptlrpc_request_addref(request);
1519 request->rq_interpret_reply = brw_interpret_oap;
1520 ptlrpcd_add_req(request);
1524 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1530 if (lop->lop_num_pending == 0)
1533 /* stream rpcs in queue order as long as as there is an urgent page
1534 * queued. this is our cheap solution for good batching in the case
1535 * where writepage marks some random page in the middle of the file as
1536 * urgent because of, say, memory pressure */
1537 if (!list_empty(&lop->lop_urgent))
1540 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1541 optimal = cli->cl_max_pages_per_rpc;
1542 if (cmd == OBD_BRW_WRITE) {
1543 /* trigger a write rpc stream as long as there are dirtiers
1544 * waiting for space. as they're waiting, they're not going to
1545 * create more pages to coallesce with what's waiting.. */
1546 if (!list_empty(&cli->cl_cache_waiters))
1549 /* *2 to avoid triggering rpcs that would want to include pages
1550 * that are being queued but which can't be made ready until
1551 * the queuer finishes with the page. this is a wart for
1552 * llite::commit_write() */
1555 if (lop->lop_num_pending >= optimal)
1561 static void on_list(struct list_head *item, struct list_head *list,
1564 if (list_empty(item) && should_be_on)
1565 list_add_tail(item, list);
1566 else if (!list_empty(item) && !should_be_on)
1567 list_del_init(item);
1570 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1571 * can find pages to build into rpcs quickly */
1572 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1574 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1575 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1576 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1578 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1579 loi->loi_write_lop.lop_num_pending);
1582 #define LOI_DEBUG(LOI, STR, args...) \
1583 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1584 !list_empty(&(LOI)->loi_cli_item), \
1585 (LOI)->loi_write_lop.lop_num_pending, \
1586 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1587 (LOI)->loi_read_lop.lop_num_pending, \
1588 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1591 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1594 /* first return all objects which we already know to have
1595 * pages ready to be stuffed into rpcs */
1596 if (!list_empty(&cli->cl_loi_ready_list))
1597 RETURN(list_entry(cli->cl_loi_ready_list.next,
1598 struct lov_oinfo, loi_cli_item));
1600 /* then if we have cache waiters, return all objects with queued
1601 * writes. This is especially important when many small files
1602 * have filled up the cache and not been fired into rpcs because
1603 * they don't pass the nr_pending/object threshhold */
1604 if (!list_empty(&cli->cl_cache_waiters) &&
1605 !list_empty(&cli->cl_loi_write_list))
1606 RETURN(list_entry(cli->cl_loi_write_list.next,
1607 struct lov_oinfo, loi_write_item));
1611 /* called with the loi list lock held */
1612 static void osc_check_rpcs(struct client_obd *cli)
1614 struct lov_oinfo *loi;
1615 int rc = 0, race_counter = 0;
1618 while ((loi = osc_next_loi(cli)) != NULL) {
1619 LOI_DEBUG(loi, "%d in flight\n", cli->cl_brw_in_flight);
1621 if (cli->cl_brw_in_flight >= cli->cl_max_rpcs_in_flight)
1624 /* attempt some read/write balancing by alternating between
1625 * reads and writes in an object. The makes_rpc checks here
1626 * would be redundant if we were getting read/write work items
1627 * instead of objects. we don't want send_oap_rpc to drain a
1628 * partial read pending queue when we're given this object to
1629 * do io on writes while there are cache waiters */
1630 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1631 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1632 &loi->loi_write_lop);
1640 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1641 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1642 &loi->loi_read_lop);
1651 /* attempt some inter-object balancing by issueing rpcs
1652 * for each object in turn */
1653 if (!list_empty(&loi->loi_cli_item))
1654 list_del_init(&loi->loi_cli_item);
1655 if (!list_empty(&loi->loi_write_item))
1656 list_del_init(&loi->loi_write_item);
1658 loi_list_maint(cli, loi);
1660 /* send_oap_rpc fails with 0 when make_ready tells it to
1661 * back off. llite's make_ready does this when it tries
1662 * to lock a page queued for write that is already locked.
1663 * we want to try sending rpcs from many objects, but we
1664 * don't want to spin failing with 0. */
1665 if (race_counter == 10)
1671 /* we're trying to queue a page in the osc so we're subject to the
1672 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1673 * If the osc's queued pages are already at that limit, then we want to sleep
1674 * until there is space in the osc's queue for us. We also may be waiting for
1675 * write credits from the OST if there are RPCs in flight that may return some
1676 * before we fall back to sync writes.
1678 * We need this know our allocation was granted in the presence of signals */
1679 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1683 spin_lock(&cli->cl_loi_list_lock);
1684 rc = list_empty(&ocw->ocw_entry) || cli->cl_brw_in_flight == 0;
1685 spin_unlock(&cli->cl_loi_list_lock);
1689 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1690 * grant or cache space. */
1691 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1692 struct osc_async_page *oap)
1694 struct osc_cache_waiter ocw;
1695 struct l_wait_info lwi = { 0 };
1697 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1698 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1699 cli->cl_avail_grant);
1701 if (cli->cl_dirty_max < PAGE_SIZE)
1704 /* Hopefully normal case - cache space and write credits available */
1705 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1706 cli->cl_avail_grant >= PAGE_SIZE) {
1707 /* account for ourselves */
1708 osc_consume_write_grant(cli, oap);
1712 /* Make sure that there are write rpcs in flight to wait for. This
1713 * is a little silly as this object may not have any pending but
1714 * other objects sure might. */
1715 if (cli->cl_brw_in_flight) {
1716 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1717 init_waitqueue_head(&ocw.ocw_waitq);
1721 loi_list_maint(cli, loi);
1722 osc_check_rpcs(cli);
1723 spin_unlock(&cli->cl_loi_list_lock);
1725 CDEBUG(0, "sleeping for cache space\n");
1726 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1728 spin_lock(&cli->cl_loi_list_lock);
1729 if (!list_empty(&ocw.ocw_entry)) {
1730 list_del(&ocw.ocw_entry);
1739 /* the companion to enter_cache, called when an oap is no longer part of the
1740 * dirty accounting.. so writeback completes or truncate happens before writing
1741 * starts. must be called with the loi lock held. */
1742 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1747 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1752 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1753 cli->cl_dirty -= PAGE_SIZE;
1755 cli->cl_lost_grant += PAGE_SIZE;
1756 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1757 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1763 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1764 struct lov_oinfo *loi, struct page *page,
1765 obd_off offset, struct obd_async_page_ops *ops,
1766 void *data, void **res)
1768 struct osc_async_page *oap;
1771 OBD_ALLOC(oap, sizeof(*oap));
1775 oap->oap_magic = OAP_MAGIC;
1776 oap->oap_cli = &exp->exp_obd->u.cli;
1779 oap->oap_caller_ops = ops;
1780 oap->oap_caller_data = data;
1782 oap->oap_page = page;
1783 oap->oap_obj_off = offset;
1785 INIT_LIST_HEAD(&oap->oap_pending_item);
1786 INIT_LIST_HEAD(&oap->oap_urgent_item);
1787 INIT_LIST_HEAD(&oap->oap_rpc_item);
1789 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1791 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1796 struct osc_async_page *oap_from_cookie(void *cookie)
1798 struct osc_async_page *oap = cookie;
1799 if (oap->oap_magic != OAP_MAGIC)
1800 return ERR_PTR(-EINVAL);
1804 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1805 struct lov_oinfo *loi, void *cookie,
1806 int cmd, obd_off off, int count,
1807 obd_flag brw_flags, enum async_flags async_flags)
1809 struct client_obd *cli = &exp->exp_obd->u.cli;
1810 struct osc_async_page *oap;
1811 struct loi_oap_pages *lop;
1815 oap = oap_from_cookie(cookie);
1817 RETURN(PTR_ERR(oap));
1819 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1822 if (!list_empty(&oap->oap_pending_item) ||
1823 !list_empty(&oap->oap_urgent_item) ||
1824 !list_empty(&oap->oap_rpc_item))
1828 loi = &lsm->lsm_oinfo[0];
1830 spin_lock(&cli->cl_loi_list_lock);
1833 oap->oap_async_flags = async_flags;
1834 oap->oap_page_off = off;
1835 oap->oap_count = count;
1836 oap->oap_brw_flags = brw_flags;
1838 if (cmd == OBD_BRW_WRITE) {
1839 rc = osc_enter_cache(cli, loi, oap);
1841 spin_unlock(&cli->cl_loi_list_lock);
1844 lop = &loi->loi_write_lop;
1846 lop = &loi->loi_read_lop;
1849 if (oap->oap_async_flags & ASYNC_URGENT)
1850 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1851 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1852 lop_update_pending(cli, lop, cmd, 1);
1854 loi_list_maint(cli, loi);
1856 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1859 osc_check_rpcs(cli);
1860 spin_unlock(&cli->cl_loi_list_lock);
1865 /* aka (~was & now & flag), but this is more clear :) */
1866 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1868 static int osc_set_async_flags(struct obd_export *exp,
1869 struct lov_stripe_md *lsm,
1870 struct lov_oinfo *loi, void *cookie,
1871 obd_flag async_flags)
1873 struct client_obd *cli = &exp->exp_obd->u.cli;
1874 struct loi_oap_pages *lop;
1875 struct osc_async_page *oap;
1879 oap = oap_from_cookie(cookie);
1881 RETURN(PTR_ERR(oap));
1883 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1887 loi = &lsm->lsm_oinfo[0];
1889 if (oap->oap_cmd == OBD_BRW_WRITE) {
1890 lop = &loi->loi_write_lop;
1892 lop = &loi->loi_read_lop;
1895 spin_lock(&cli->cl_loi_list_lock);
1897 if (list_empty(&oap->oap_pending_item))
1898 GOTO(out, rc = -EINVAL);
1900 if ((oap->oap_async_flags & async_flags) == async_flags)
1903 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1904 oap->oap_async_flags |= ASYNC_READY;
1906 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1907 if (list_empty(&oap->oap_rpc_item)) {
1908 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1909 loi_list_maint(cli, loi);
1913 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1914 oap->oap_async_flags);
1916 osc_check_rpcs(cli);
1917 spin_unlock(&cli->cl_loi_list_lock);
1921 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1922 struct lov_oinfo *loi,
1923 struct obd_io_group *oig, void *cookie,
1924 int cmd, obd_off off, int count,
1926 obd_flag async_flags)
1928 struct client_obd *cli = &exp->exp_obd->u.cli;
1929 struct osc_async_page *oap;
1930 struct loi_oap_pages *lop;
1933 oap = oap_from_cookie(cookie);
1935 RETURN(PTR_ERR(oap));
1937 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1940 if (!list_empty(&oap->oap_pending_item) ||
1941 !list_empty(&oap->oap_urgent_item) ||
1942 !list_empty(&oap->oap_rpc_item))
1946 loi = &lsm->lsm_oinfo[0];
1948 spin_lock(&cli->cl_loi_list_lock);
1951 oap->oap_page_off = off;
1952 oap->oap_count = count;
1953 oap->oap_brw_flags = brw_flags;
1954 oap->oap_async_flags = async_flags;
1956 if (cmd == OBD_BRW_WRITE)
1957 lop = &loi->loi_write_lop;
1959 lop = &loi->loi_read_lop;
1961 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
1962 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
1964 oig_add_one(oig, &oap->oap_occ);
1967 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
1969 spin_unlock(&cli->cl_loi_list_lock);
1974 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
1975 struct loi_oap_pages *lop, int cmd)
1977 struct list_head *pos, *tmp;
1978 struct osc_async_page *oap;
1980 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
1981 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1982 list_del(&oap->oap_pending_item);
1983 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1984 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1985 lop_update_pending(cli, lop, cmd, 1);
1987 loi_list_maint(cli, loi);
1990 static int osc_trigger_group_io(struct obd_export *exp,
1991 struct lov_stripe_md *lsm,
1992 struct lov_oinfo *loi,
1993 struct obd_io_group *oig)
1995 struct client_obd *cli = &exp->exp_obd->u.cli;
1998 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2002 loi = &lsm->lsm_oinfo[0];
2004 spin_lock(&cli->cl_loi_list_lock);
2006 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2007 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2009 osc_check_rpcs(cli);
2010 spin_unlock(&cli->cl_loi_list_lock);
2015 static int osc_teardown_async_page(struct obd_export *exp,
2016 struct lov_stripe_md *lsm,
2017 struct lov_oinfo *loi, void *cookie)
2019 struct client_obd *cli = &exp->exp_obd->u.cli;
2020 struct loi_oap_pages *lop;
2021 struct osc_async_page *oap;
2025 oap = oap_from_cookie(cookie);
2027 RETURN(PTR_ERR(oap));
2030 loi = &lsm->lsm_oinfo[0];
2032 if (oap->oap_cmd == OBD_BRW_WRITE) {
2033 lop = &loi->loi_write_lop;
2035 lop = &loi->loi_read_lop;
2038 spin_lock(&cli->cl_loi_list_lock);
2040 if (!list_empty(&oap->oap_rpc_item))
2041 GOTO(out, rc = -EBUSY);
2043 osc_exit_cache(cli, oap, 0);
2044 osc_wake_cache_waiters(cli);
2046 if (!list_empty(&oap->oap_urgent_item)) {
2047 list_del_init(&oap->oap_urgent_item);
2048 oap->oap_async_flags &= ~ASYNC_URGENT;
2050 if (!list_empty(&oap->oap_pending_item)) {
2051 list_del_init(&oap->oap_pending_item);
2052 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2054 loi_list_maint(cli, loi);
2056 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2058 spin_unlock(&cli->cl_loi_list_lock);
2060 OBD_FREE(oap, sizeof(*oap));
2065 /* Note: caller will lock/unlock, and set uptodate on the pages */
2066 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2067 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2068 struct lov_stripe_md *lsm, obd_count page_count,
2069 struct brw_page *pga)
2071 struct ptlrpc_request *request = NULL;
2072 struct ost_body *body;
2073 struct niobuf_remote *nioptr;
2074 struct obd_ioobj *iooptr;
2075 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2079 /* XXX does not handle 'new' brw protocol */
2081 size[1] = sizeof(struct obd_ioobj);
2082 size[2] = page_count * sizeof(*nioptr);
2084 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_READ, 3,
2089 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2090 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2091 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2092 sizeof(*nioptr) * page_count);
2094 memcpy(&body->oa, oa, sizeof(body->oa));
2096 obdo_to_ioobj(oa, iooptr);
2097 iooptr->ioo_bufcnt = page_count;
2099 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2100 LASSERT(PageLocked(pga[mapped].pg));
2101 LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
2103 nioptr->offset = pga[mapped].off;
2104 nioptr->len = pga[mapped].count;
2105 nioptr->flags = pga[mapped].flag;
2108 size[1] = page_count * sizeof(*nioptr);
2109 request->rq_replen = lustre_msg_size(2, size);
2111 rc = ptlrpc_queue_wait(request);
2115 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2116 lustre_swab_ost_body);
2118 CERROR("Can't unpack body\n");
2119 GOTO(out_req, rc = -EPROTO);
2122 memcpy(oa, &body->oa, sizeof(*oa));
2124 swab = lustre_msg_swabbed(request->rq_repmsg);
2125 LASSERT_REPSWAB(request, 1);
2126 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2128 /* nioptr missing or short */
2129 GOTO(out_req, rc = -EPROTO);
2133 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2134 struct page *page = pga[mapped].pg;
2135 struct buffer_head *bh;
2139 lustre_swab_niobuf_remote (nioptr);
2141 /* got san device associated */
2142 LASSERT(exp->exp_obd != NULL);
2143 dev = exp->exp_obd->u.cli.cl_sandev;
2146 if (!nioptr->offset) {
2147 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2148 page->mapping->host->i_ino,
2150 memset(page_address(page), 0, PAGE_SIZE);
2154 if (!page->buffers) {
2155 create_empty_buffers(page, dev, PAGE_SIZE);
2158 clear_bit(BH_New, &bh->b_state);
2159 set_bit(BH_Mapped, &bh->b_state);
2160 bh->b_blocknr = (unsigned long)nioptr->offset;
2162 clear_bit(BH_Uptodate, &bh->b_state);
2164 ll_rw_block(READ, 1, &bh);
2168 /* if buffer already existed, it must be the
2169 * one we mapped before, check it */
2170 LASSERT(!test_bit(BH_New, &bh->b_state));
2171 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2172 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2174 /* wait it's io completion */
2175 if (test_bit(BH_Lock, &bh->b_state))
2178 if (!test_bit(BH_Uptodate, &bh->b_state))
2179 ll_rw_block(READ, 1, &bh);
2183 /* must do syncronous write here */
2185 if (!buffer_uptodate(bh)) {
2193 ptlrpc_req_finished(request);
2197 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2198 struct lov_stripe_md *lsm, obd_count page_count,
2199 struct brw_page *pga)
2201 struct ptlrpc_request *request = NULL;
2202 struct ost_body *body;
2203 struct niobuf_remote *nioptr;
2204 struct obd_ioobj *iooptr;
2205 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2209 size[1] = sizeof(struct obd_ioobj);
2210 size[2] = page_count * sizeof(*nioptr);
2212 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_WRITE,
2217 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2218 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2219 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2220 sizeof (*nioptr) * page_count);
2222 memcpy(&body->oa, oa, sizeof(body->oa));
2224 obdo_to_ioobj(oa, iooptr);
2225 iooptr->ioo_bufcnt = page_count;
2228 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2229 LASSERT(PageLocked(pga[mapped].pg));
2230 LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
2232 nioptr->offset = pga[mapped].off;
2233 nioptr->len = pga[mapped].count;
2234 nioptr->flags = pga[mapped].flag;
2237 size[1] = page_count * sizeof(*nioptr);
2238 request->rq_replen = lustre_msg_size(2, size);
2240 rc = ptlrpc_queue_wait(request);
2244 swab = lustre_msg_swabbed (request->rq_repmsg);
2245 LASSERT_REPSWAB (request, 1);
2246 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2248 CERROR("absent/short niobuf array\n");
2249 GOTO(out_req, rc = -EPROTO);
2253 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2254 struct page *page = pga[mapped].pg;
2255 struct buffer_head *bh;
2259 lustre_swab_niobuf_remote (nioptr);
2261 /* got san device associated */
2262 LASSERT(exp->exp_obd != NULL);
2263 dev = exp->exp_obd->u.cli.cl_sandev;
2265 if (!page->buffers) {
2266 create_empty_buffers(page, dev, PAGE_SIZE);
2269 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2270 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2271 LASSERT(page->buffers->b_blocknr ==
2272 (unsigned long)nioptr->offset);
2278 /* if buffer locked, wait it's io completion */
2279 if (test_bit(BH_Lock, &bh->b_state))
2282 clear_bit(BH_New, &bh->b_state);
2283 set_bit(BH_Mapped, &bh->b_state);
2285 /* override the block nr */
2286 bh->b_blocknr = (unsigned long)nioptr->offset;
2288 /* we are about to write it, so set it
2290 * page lock should garentee no race condition here */
2291 set_bit(BH_Uptodate, &bh->b_state);
2292 set_bit(BH_Dirty, &bh->b_state);
2294 ll_rw_block(WRITE, 1, &bh);
2296 /* must do syncronous write here */
2298 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2306 ptlrpc_req_finished(request);
2310 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2311 struct lov_stripe_md *lsm, obd_count page_count,
2312 struct brw_page *pga, struct obd_trans_info *oti)
2316 while (page_count) {
2317 obd_count pages_per_brw;
2320 if (page_count > OSC_BRW_MAX_IOV)
2321 pages_per_brw = OSC_BRW_MAX_IOV;
2323 pages_per_brw = page_count;
2325 if (cmd & OBD_BRW_WRITE)
2326 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2328 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2333 page_count -= pages_per_brw;
2334 pga += pages_per_brw;
2341 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2343 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2345 LASSERT(lock != NULL);
2346 l_lock(&lock->l_resource->lr_namespace->ns_lock);
2348 if (lock->l_ast_data && lock->l_ast_data != data) {
2349 struct inode *new_inode = data;
2350 struct inode *old_inode = lock->l_ast_data;
2351 LASSERTF(old_inode->i_state & I_FREEING,
2352 "Found existing inode %p/%lu/%u state %lu in lock: "
2353 "setting data to %p/%lu/%u\n", old_inode,
2354 old_inode->i_ino, old_inode->i_generation,
2356 new_inode, new_inode->i_ino, new_inode->i_generation);
2359 lock->l_ast_data = data;
2360 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
2361 LDLM_LOCK_PUT(lock);
2364 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2365 ldlm_iterator_t replace, void *data)
2367 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2368 struct obd_device *obd = class_exp2obd(exp);
2370 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2374 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2375 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2376 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2377 void *data, __u32 lvb_len, void *lvb_swabber,
2378 struct lustre_handle *lockh)
2380 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2381 struct obd_device *obd = exp->exp_obd;
2386 /* Filesystem lock extents are extended to page boundaries so that
2387 * dealing with the page cache is a little smoother. */
2388 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2389 policy->l_extent.end |= ~PAGE_MASK;
2391 /* Next, search for already existing extent locks that will cover us */
2392 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2395 osc_set_data_with_check(lockh, data);
2396 if (*flags & LDLM_FL_HAS_INTENT) {
2397 /* I would like to be able to ASSERT here that rss <=
2398 * kms, but I can't, for reasons which are explained in
2401 /* We already have a lock, and it's referenced */
2405 /* If we're trying to read, we also search for an existing PW lock. The
2406 * VFS and page cache already protect us locally, so lots of readers/
2407 * writers can share a single PW lock.
2409 * There are problems with conversion deadlocks, so instead of
2410 * converting a read lock to a write lock, we'll just enqueue a new
2413 * At some point we should cancel the read lock instead of making them
2414 * send us a blocking callback, but there are problems with canceling
2415 * locks out from other users right now, too. */
2417 if (mode == LCK_PR) {
2418 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2419 policy, LCK_PW, lockh);
2421 /* FIXME: This is not incredibly elegant, but it might
2422 * be more elegant than adding another parameter to
2423 * lock_match. I want a second opinion. */
2424 ldlm_lock_addref(lockh, LCK_PR);
2425 ldlm_lock_decref(lockh, LCK_PW);
2426 osc_set_data_with_check(lockh, data);
2431 rc = ldlm_cli_enqueue(exp, NULL, obd->obd_namespace, res_id, type,
2432 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2433 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2435 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc)
2436 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2441 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2442 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2443 int *flags, void *data, struct lustre_handle *lockh)
2445 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2446 struct obd_device *obd = exp->exp_obd;
2450 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2452 /* Filesystem lock extents are extended to page boundaries so that
2453 * dealing with the page cache is a little smoother */
2454 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2455 policy->l_extent.end |= ~PAGE_MASK;
2457 /* Next, search for already existing extent locks that will cover us */
2458 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2459 policy, mode, lockh);
2461 osc_set_data_with_check(lockh, data);
2464 /* If we're trying to read, we also search for an existing PW lock. The
2465 * VFS and page cache already protect us locally, so lots of readers/
2466 * writers can share a single PW lock. */
2467 if (mode == LCK_PR) {
2468 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2469 policy, LCK_PW, lockh);
2471 /* FIXME: This is not incredibly elegant, but it might
2472 * be more elegant than adding another parameter to
2473 * lock_match. I want a second opinion. */
2474 osc_set_data_with_check(lockh, data);
2475 ldlm_lock_addref(lockh, LCK_PR);
2476 ldlm_lock_decref(lockh, LCK_PW);
2482 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2483 __u32 mode, struct lustre_handle *lockh)
2487 ldlm_lock_decref(lockh, mode);
2492 static int osc_cancel_unused(struct obd_export *exp,
2493 struct lov_stripe_md *lsm, int flags, void *opaque)
2495 struct obd_device *obd = class_exp2obd(exp);
2496 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2498 return ldlm_cli_cancel_unused(obd->obd_namespace, &res_id, flags,
2502 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2503 unsigned long max_age)
2505 struct obd_statfs *msfs;
2506 struct ptlrpc_request *request;
2507 int rc, size = sizeof(*osfs);
2510 /* We could possibly pass max_age in the request (as an absolute
2511 * timestamp or a "seconds.usec ago") so the target can avoid doing
2512 * extra calls into the filesystem if that isn't necessary (e.g.
2513 * during mount that would help a bit). Having relative timestamps
2514 * is not so great if request processing is slow, while absolute
2515 * timestamps are not ideal because they need time synchronization. */
2516 request = ptlrpc_prep_req(obd->u.cli.cl_import, OST_STATFS,0,NULL,NULL);
2520 request->rq_replen = lustre_msg_size(1, &size);
2521 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2523 rc = ptlrpc_queue_wait(request);
2527 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2528 lustre_swab_obd_statfs);
2530 CERROR("Can't unpack obd_statfs\n");
2531 GOTO(out, rc = -EPROTO);
2534 memcpy(osfs, msfs, sizeof(*osfs));
2538 ptlrpc_req_finished(request);
2542 /* Retrieve object striping information.
2544 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2545 * the maximum number of OST indices which will fit in the user buffer.
2546 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2548 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2550 struct lov_user_md lum, *lumk;
2557 rc = copy_from_user(&lum, lump, sizeof(lum));
2561 if (lum.lmm_magic != LOV_USER_MAGIC)
2564 if (lum.lmm_stripe_count > 0) {
2565 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2566 OBD_ALLOC(lumk, lum_size);
2570 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2572 lum_size = sizeof(lum);
2576 lumk->lmm_object_id = lsm->lsm_object_id;
2577 lumk->lmm_stripe_count = 1;
2579 if (copy_to_user(lump, lumk, lum_size))
2583 OBD_FREE(lumk, lum_size);
2588 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2589 void *karg, void *uarg)
2591 struct obd_device *obd = exp->exp_obd;
2592 struct obd_ioctl_data *data = karg;
2599 case OBD_IOC_LOV_GET_CONFIG: {
2601 struct lov_desc *desc;
2602 struct obd_uuid uuid;
2606 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2607 GOTO(out, err = -EINVAL);
2609 data = (struct obd_ioctl_data *)buf;
2611 if (sizeof(*desc) > data->ioc_inllen1) {
2613 GOTO(out, err = -EINVAL);
2616 if (data->ioc_inllen2 < sizeof(uuid)) {
2618 GOTO(out, err = -EINVAL);
2621 desc = (struct lov_desc *)data->ioc_inlbuf1;
2622 desc->ld_tgt_count = 1;
2623 desc->ld_active_tgt_count = 1;
2624 desc->ld_default_stripe_count = 1;
2625 desc->ld_default_stripe_size = 0;
2626 desc->ld_default_stripe_offset = 0;
2627 desc->ld_pattern = 0;
2628 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2630 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2632 err = copy_to_user((void *)uarg, buf, len);
2635 obd_ioctl_freedata(buf, len);
2638 case LL_IOC_LOV_SETSTRIPE:
2639 err = obd_alloc_memmd(exp, karg);
2643 case LL_IOC_LOV_GETSTRIPE:
2644 err = osc_getstripe(karg, uarg);
2646 case OBD_IOC_CLIENT_RECOVER:
2647 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2652 case IOC_OSC_SET_ACTIVE:
2653 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2657 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2658 GOTO(out, err = -ENOTTY);
2665 static int osc_get_info(struct obd_export *exp, obd_count keylen,
2666 void *key, __u32 *vallen, void *val)
2669 if (!vallen || !val)
2672 if (keylen > strlen("lock_to_stripe") &&
2673 strcmp(key, "lock_to_stripe") == 0) {
2674 __u32 *stripe = val;
2675 *vallen = sizeof(*stripe);
2678 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2679 struct ptlrpc_request *req;
2681 char *bufs[1] = {key};
2683 req = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GET_INFO, 1,
2688 req->rq_replen = lustre_msg_size(1, vallen);
2689 rc = ptlrpc_queue_wait(req);
2693 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2694 lustre_swab_ost_last_id);
2695 if (reply == NULL) {
2696 CERROR("Can't unpack OST last ID\n");
2697 GOTO(out, rc = -EPROTO);
2699 *((obd_id *)val) = *reply;
2701 ptlrpc_req_finished(req);
2707 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2708 void *key, obd_count vallen, void *val)
2710 struct ptlrpc_request *req;
2711 struct obd_import *imp = class_exp2cliimp(exp);
2712 struct llog_ctxt *ctxt;
2713 int rc, size = keylen;
2714 char *bufs[1] = {key};
2717 if (keylen == strlen("next_id") &&
2718 memcmp(key, "next_id", strlen("next_id")) == 0) {
2719 if (vallen != sizeof(obd_id))
2721 exp->u.eu_osc_data.oed_oscc.oscc_next_id = *((obd_id*)val) + 1;
2722 CDEBUG(D_INODE, "%s: set oscc_next_id = "LPU64"\n",
2723 exp->exp_obd->obd_name,
2724 exp->u.eu_osc_data.oed_oscc.oscc_next_id);
2729 if (keylen == strlen("growth_count") &&
2730 memcmp(key, "growth_count", strlen("growth_count")) == 0) {
2731 if (vallen != sizeof(int))
2733 exp->u.eu_osc_data.oed_oscc.oscc_grow_count = *((int*)val);
2737 if (keylen == strlen("unlinked") &&
2738 memcmp(key, "unlinked", keylen) == 0) {
2739 struct osc_creator *oscc = &exp->u.eu_osc_data.oed_oscc;
2740 spin_lock(&oscc->oscc_lock);
2741 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2742 spin_unlock(&oscc->oscc_lock);
2747 if (keylen == strlen("initial_recov") &&
2748 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
2749 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2750 if (vallen != sizeof(int))
2752 imp->imp_initial_recov = *(int *)val;
2753 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
2754 exp->exp_obd->obd_name,
2755 imp->imp_initial_recov);
2759 if (keylen < strlen("mds_conn") ||
2760 memcmp(key, "mds_conn", strlen("mds_conn")) != 0)
2764 req = ptlrpc_prep_req(imp, OST_SET_INFO, 1, &size, bufs);
2768 req->rq_replen = lustre_msg_size(0, NULL);
2769 rc = ptlrpc_queue_wait(req);
2770 ptlrpc_req_finished(req);
2772 ctxt = llog_get_context(exp->exp_obd, LLOG_UNLINK_ORIG_CTXT);
2774 rc = llog_initiator_connect(ctxt);
2779 imp->imp_server_timeout = 1;
2780 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
2781 ptlrpc_pinger_add_import(imp);
2787 static struct llog_operations osc_size_repl_logops = {
2788 lop_cancel: llog_obd_repl_cancel
2791 static struct llog_operations osc_unlink_orig_logops;
2792 static int osc_llog_init(struct obd_device *obd, struct obd_device *tgt,
2793 int count, struct llog_logid *logid)
2798 osc_unlink_orig_logops = llog_lvfs_ops;
2799 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
2800 osc_unlink_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
2801 osc_unlink_orig_logops.lop_add = llog_obd_origin_add;
2802 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
2804 rc = llog_setup(obd, LLOG_UNLINK_ORIG_CTXT, tgt, count, logid,
2805 &osc_unlink_orig_logops);
2809 rc = llog_setup(obd, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
2810 &osc_size_repl_logops);
2814 static int osc_llog_finish(struct obd_device *obd, int count)
2819 rc = llog_cleanup(llog_get_context(obd, LLOG_UNLINK_ORIG_CTXT));
2823 rc = llog_cleanup(llog_get_context(obd, LLOG_SIZE_REPL_CTXT));
2828 static int osc_connect(struct lustre_handle *exph,
2829 struct obd_device *obd, struct obd_uuid *cluuid)
2832 struct obd_export *exp;
2834 rc = client_connect_import(exph, obd, cluuid);
2836 if (obd->u.cli.cl_conn_count == 1) {
2837 exp = class_conn2export(exph);
2844 static int osc_disconnect(struct obd_export *exp, int flags)
2846 struct obd_device *obd = class_exp2obd(exp);
2847 struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
2850 if (obd->u.cli.cl_conn_count == 1) {
2851 /* flush any remaining cancel messages out to the target */
2852 llog_sync(ctxt, exp);
2854 /* balance the conn2export for oscc in osc_connect */
2855 class_export_put(exp);
2858 rc = client_disconnect_export(exp, flags);
2862 static int osc_invalidate_import(struct obd_device *obd,
2863 struct obd_import *imp)
2865 struct client_obd *cli;
2866 LASSERT(imp->imp_obd == obd);
2867 /* this used to try and tear down queued pages, but it was
2868 * not correctly implemented. We'll have to do it again once
2869 * we call obd_invalidate_import() agian */
2870 /* XXX And we still need to do this */
2872 /* Reset grants, too */
2874 spin_lock(&cli->cl_loi_list_lock);
2875 cli->cl_avail_grant = 0;
2876 cli->cl_lost_grant = 0;
2877 spin_unlock(&cli->cl_loi_list_lock);
2882 int osc_setup(struct obd_device *obd, obd_count len, void *buf)
2886 rc = ptlrpcd_addref();
2890 rc = client_obd_setup(obd, len, buf);
2896 int osc_cleanup(struct obd_device *obd, int flags)
2900 rc = client_obd_cleanup(obd, flags);
2906 struct obd_ops osc_obd_ops = {
2907 o_owner: THIS_MODULE,
2908 o_attach: osc_attach,
2909 o_detach: osc_detach,
2911 o_cleanup: osc_cleanup,
2912 o_connect: osc_connect,
2913 o_disconnect: osc_disconnect,
2914 o_statfs: osc_statfs,
2915 o_packmd: osc_packmd,
2916 o_unpackmd: osc_unpackmd,
2917 o_create: osc_create,
2918 o_destroy: osc_destroy,
2919 o_getattr: osc_getattr,
2920 o_getattr_async:osc_getattr_async,
2921 o_setattr: osc_setattr,
2923 o_brw_async: osc_brw_async,
2924 .o_prep_async_page = osc_prep_async_page,
2925 .o_queue_async_io = osc_queue_async_io,
2926 .o_set_async_flags = osc_set_async_flags,
2927 .o_queue_group_io = osc_queue_group_io,
2928 .o_trigger_group_io = osc_trigger_group_io,
2929 .o_teardown_async_page = osc_teardown_async_page,
2932 o_enqueue: osc_enqueue,
2934 o_change_cbdata:osc_change_cbdata,
2935 o_cancel: osc_cancel,
2936 o_cancel_unused:osc_cancel_unused,
2937 o_iocontrol: osc_iocontrol,
2938 o_get_info: osc_get_info,
2939 o_set_info: osc_set_info,
2940 o_invalidate_import: osc_invalidate_import,
2941 o_llog_init: osc_llog_init,
2942 o_llog_finish: osc_llog_finish,
2945 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2946 struct obd_ops sanosc_obd_ops = {
2947 o_owner: THIS_MODULE,
2948 o_attach: osc_attach,
2949 o_detach: osc_detach,
2950 o_cleanup: client_obd_cleanup,
2951 o_connect: osc_connect,
2952 o_disconnect: client_disconnect_export,
2953 o_statfs: osc_statfs,
2954 o_packmd: osc_packmd,
2955 o_unpackmd: osc_unpackmd,
2956 o_create: osc_real_create,
2957 o_destroy: osc_destroy,
2958 o_getattr: osc_getattr,
2959 o_getattr_async:osc_getattr_async,
2960 o_setattr: osc_setattr,
2961 o_setup: client_sanobd_setup,
2965 o_enqueue: osc_enqueue,
2967 o_change_cbdata:osc_change_cbdata,
2968 o_cancel: osc_cancel,
2969 o_cancel_unused:osc_cancel_unused,
2970 o_iocontrol: osc_iocontrol,
2971 o_invalidate_import: osc_invalidate_import,
2972 o_llog_init: osc_llog_init,
2973 o_llog_finish: osc_llog_finish,
2977 int __init osc_init(void)
2979 struct lprocfs_static_vars lvars, sanlvars;
2983 lprocfs_init_vars(osc, &lvars);
2984 lprocfs_init_vars(osc, &sanlvars);
2986 rc = class_register_type(&osc_obd_ops, lvars.module_vars,
2991 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2992 rc = class_register_type(&sanosc_obd_ops, sanlvars.module_vars,
2993 LUSTRE_SANOSC_NAME);
2995 class_unregister_type(LUSTRE_OSC_NAME);
3001 static void /*__exit*/ osc_exit(void)
3003 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3004 class_unregister_type(LUSTRE_SANOSC_NAME);
3006 class_unregister_type(LUSTRE_OSC_NAME);
3010 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3011 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3012 MODULE_LICENSE("GPL");
3014 module_init(osc_init);
3015 module_exit(osc_exit);