1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/lustre_dlm.h>
40 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
41 # include <linux/workqueue.h>
42 # include <linux/smp_lock.h>
44 # include <linux/locks.h>
46 #else /* __KERNEL__ */
47 # include <liblustre.h>
50 #include <linux/kp30.h>
51 #include <linux/lustre_net.h>
52 #include <linux/lustre_user.h>
53 #include <linux/obd_ost.h>
54 #include <linux/obd_lov.h>
57 # include <linux/ctype.h>
58 # include <linux/init.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include "osc_internal.h"
69 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
71 struct lprocfs_static_vars lvars;
75 lprocfs_init_vars(osc,&lvars);
76 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
80 rc = lproc_osc_attach_seqstat(dev);
82 lprocfs_obd_detach(dev);
86 ptlrpc_lprocfs_register_obd(dev);
90 static int osc_detach(struct obd_device *dev)
92 ptlrpc_lprocfs_unregister_obd(dev);
93 return lprocfs_obd_detach(dev);
97 /* Pack OSC object metadata for disk storage (LE byte order). */
98 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
99 struct lov_stripe_md *lsm)
104 lmm_size = sizeof(**lmmp);
109 OBD_FREE(*lmmp, lmm_size);
115 OBD_ALLOC(*lmmp, lmm_size);
121 LASSERT(lsm->lsm_object_id);
122 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
128 /* Unpack OSC object metadata from disk storage (LE byte order). */
129 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
130 struct lov_mds_md *lmm, int lmm_bytes)
136 if (lmm_bytes < sizeof (*lmm)) {
137 CERROR("lov_mds_md too small: %d, need %d\n",
138 lmm_bytes, (int)sizeof(*lmm));
141 /* XXX LOV_MAGIC etc check? */
143 if (lmm->lmm_object_id == 0) {
144 CERROR("lov_mds_md: zero lmm_object_id\n");
149 lsm_size = lov_stripe_md_size(1);
153 if (*lsmp != NULL && lmm == NULL) {
154 OBD_FREE(*lsmp, lsm_size);
160 OBD_ALLOC(*lsmp, lsm_size);
163 loi_init((*lsmp)->lsm_oinfo);
167 /* XXX zero *lsmp? */
168 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
169 LASSERT((*lsmp)->lsm_object_id);
172 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
177 static int osc_getattr_interpret(struct ptlrpc_request *req,
178 struct osc_getattr_async_args *aa, int rc)
180 struct ost_body *body;
186 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
188 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
189 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
191 /* This should really be sent by the OST */
192 aa->aa_oa->o_blksize = OSC_BRW_MAX_SIZE;
193 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
195 CERROR("can't unpack ost_body\n");
197 aa->aa_oa->o_valid = 0;
203 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
204 struct lov_stripe_md *md,
205 struct ptlrpc_request_set *set)
207 struct ptlrpc_request *request;
208 struct ost_body *body;
209 int size = sizeof(*body);
210 struct osc_getattr_async_args *aa;
213 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
218 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
219 memcpy(&body->oa, oa, sizeof(*oa));
221 request->rq_replen = lustre_msg_size(1, &size);
222 request->rq_interpret_reply = osc_getattr_interpret;
224 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
225 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
228 ptlrpc_set_add_req (set, request);
232 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
233 struct lov_stripe_md *md)
235 struct ptlrpc_request *request;
236 struct ost_body *body;
237 int rc, size = sizeof(*body);
240 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
245 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
246 memcpy(&body->oa, oa, sizeof(*oa));
248 request->rq_replen = lustre_msg_size(1, &size);
250 rc = ptlrpc_queue_wait(request);
252 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
256 body = lustre_swab_repbuf(request, 0, sizeof (*body),
257 lustre_swab_ost_body);
259 CERROR ("can't unpack ost_body\n");
260 GOTO (out, rc = -EPROTO);
263 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
264 memcpy(oa, &body->oa, sizeof(*oa));
266 /* This should really be sent by the OST */
267 oa->o_blksize = OSC_BRW_MAX_SIZE;
268 oa->o_valid |= OBD_MD_FLBLKSZ;
272 ptlrpc_req_finished(request);
276 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
277 struct lov_stripe_md *md, struct obd_trans_info *oti)
279 struct ptlrpc_request *request;
280 struct ost_body *body;
281 int rc, size = sizeof(*body);
284 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SETATTR, 1, &size,
289 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
290 memcpy(&body->oa, oa, sizeof(*oa));
292 request->rq_replen = lustre_msg_size(1, &size);
294 rc = ptlrpc_queue_wait(request);
298 body = lustre_swab_repbuf(request, 0, sizeof(*body),
299 lustre_swab_ost_body);
301 GOTO(out, rc = -EPROTO);
303 memcpy(oa, &body->oa, sizeof(*oa));
307 ptlrpc_req_finished(request);
311 int osc_real_create(struct obd_export *exp, struct obdo *oa,
312 struct lov_stripe_md **ea, struct obd_trans_info *oti)
314 struct ptlrpc_request *request;
315 struct ost_body *body;
316 struct lov_stripe_md *lsm;
317 int rc, size = sizeof(*body);
325 rc = obd_alloc_memmd(exp, &lsm);
330 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_CREATE, 1, &size,
333 GOTO(out, rc = -ENOMEM);
335 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
336 memcpy(&body->oa, oa, sizeof(body->oa));
338 request->rq_replen = lustre_msg_size(1, &size);
339 if (oa->o_valid & OBD_MD_FLINLINE) {
340 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
341 oa->o_flags == OBD_FL_DELORPHAN);
342 DEBUG_REQ(D_HA, request,
343 "delorphan from OST integration; level == RECOVER");
344 request->rq_send_state = LUSTRE_IMP_RECOVER;
347 rc = ptlrpc_queue_wait(request);
351 body = lustre_swab_repbuf(request, 0, sizeof(*body),
352 lustre_swab_ost_body);
354 CERROR ("can't unpack ost_body\n");
355 GOTO (out_req, rc = -EPROTO);
358 memcpy(oa, &body->oa, sizeof(*oa));
360 /* This should really be sent by the OST */
361 oa->o_blksize = OSC_BRW_MAX_SIZE;
362 oa->o_valid |= OBD_MD_FLBLKSZ;
364 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
365 * have valid lsm_oinfo data structs, so don't go touching that.
366 * This needs to be fixed in a big way.
368 lsm->lsm_object_id = oa->o_id;
372 oti->oti_transno = request->rq_repmsg->transno;
374 if (oa->o_valid & OBD_MD_FLCOOKIE) {
375 if (!oti->oti_logcookies)
376 oti_alloc_cookies(oti, 1);
377 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
378 sizeof(oti->oti_onecookie));
382 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
385 ptlrpc_req_finished(request);
388 obd_free_memmd(exp, &lsm);
392 static int osc_punch(struct obd_export *exp, struct obdo *oa,
393 struct lov_stripe_md *md, obd_size start,
394 obd_size end, struct obd_trans_info *oti)
396 struct ptlrpc_request *request;
397 struct ost_body *body;
398 int rc, size = sizeof(*body);
406 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_PUNCH, 1, &size,
411 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
412 memcpy(&body->oa, oa, sizeof(*oa));
414 /* overload the size and blocks fields in the oa with start/end */
415 body->oa.o_size = start;
416 body->oa.o_blocks = end;
417 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
419 request->rq_replen = lustre_msg_size(1, &size);
421 rc = ptlrpc_queue_wait(request);
425 body = lustre_swab_repbuf (request, 0, sizeof (*body),
426 lustre_swab_ost_body);
428 CERROR ("can't unpack ost_body\n");
429 GOTO (out, rc = -EPROTO);
432 memcpy(oa, &body->oa, sizeof(*oa));
436 ptlrpc_req_finished(request);
440 static int osc_sync(struct obd_export *exp, struct obdo *oa,
441 struct lov_stripe_md *md, obd_size start, obd_size end)
443 struct ptlrpc_request *request;
444 struct ost_body *body;
445 int rc, size = sizeof(*body);
453 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SYNC, 1, &size,
458 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
459 memcpy(&body->oa, oa, sizeof(*oa));
461 /* overload the size and blocks fields in the oa with start/end */
462 body->oa.o_size = start;
463 body->oa.o_blocks = end;
464 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
466 request->rq_replen = lustre_msg_size(1, &size);
468 rc = ptlrpc_queue_wait(request);
472 body = lustre_swab_repbuf(request, 0, sizeof(*body),
473 lustre_swab_ost_body);
475 CERROR ("can't unpack ost_body\n");
476 GOTO (out, rc = -EPROTO);
479 memcpy(oa, &body->oa, sizeof(*oa));
483 ptlrpc_req_finished(request);
487 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
488 struct lov_stripe_md *ea, struct obd_trans_info *oti)
490 struct ptlrpc_request *request;
491 struct ost_body *body;
492 int rc, size = sizeof(*body);
500 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_DESTROY, 1,
505 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
507 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
508 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
509 sizeof(*oti->oti_logcookies));
510 oti->oti_logcookies++;
513 memcpy(&body->oa, oa, sizeof(*oa));
514 request->rq_replen = lustre_msg_size(1, &size);
516 rc = ptlrpc_queue_wait(request);
520 body = lustre_swab_repbuf(request, 0, sizeof(*body),
521 lustre_swab_ost_body);
523 CERROR ("Can't unpack body\n");
524 GOTO (out, rc = -EPROTO);
527 memcpy(oa, &body->oa, sizeof(*oa));
531 ptlrpc_req_finished(request);
535 static void osc_announce_cached(struct client_obd *cli, struct ost_body *body)
537 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLRDEV;
539 LASSERT(!(body->oa.o_valid & bits));
541 body->oa.o_valid |= bits;
542 down(&cli->cl_dirty_sem);
543 body->oa.o_blocks = cli->cl_dirty;
544 body->oa.o_rdev = cli->cl_dirty_granted;
545 up(&cli->cl_dirty_sem);
546 CDEBUG(D_INODE, "announcing "LPU64" dirty "LPU64" granted\n",
547 cli->cl_dirty, cli->cl_dirty_granted);
550 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
552 if(!(body->oa.o_valid & OBD_MD_FLRDEV)) {
553 if (cli->cl_ost_can_grant) {
554 CDEBUG(D_INODE, "%s can't grant\n",
555 cli->cl_import->imp_target_uuid.uuid);
557 cli->cl_ost_can_grant = 0;
561 CDEBUG(D_ERROR, "got "LPU64" grant\n", body->oa.o_rdev);
562 down(&cli->cl_dirty_sem);
563 cli->cl_dirty_granted = body->oa.o_rdev;
564 /* XXX check for over-run and wake up the io thread that
565 * doesn't exist yet */
566 up(&cli->cl_dirty_sem);
569 /* We assume that the reason this OSC got a short read is because it read
570 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
571 * via the LOV, and it _knows_ it's reading inside the file, it's just that
572 * this stripe never got written at or beyond this stripe offset yet. */
573 static void handle_short_read(int nob_read, obd_count page_count,
574 struct brw_page *pga)
578 /* skip bytes read OK */
579 while (nob_read > 0) {
580 LASSERT (page_count > 0);
582 if (pga->count > nob_read) {
583 /* EOF inside this page */
584 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
585 memset(ptr + nob_read, 0, pga->count - nob_read);
592 nob_read -= pga->count;
597 /* zero remaining pages */
598 while (page_count-- > 0) {
599 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
600 memset(ptr, 0, pga->count);
606 static int check_write_rcs(struct ptlrpc_request *request, int niocount,
607 obd_count page_count, struct brw_page *pga)
612 /* return error if any niobuf was in error */
613 remote_rcs = lustre_swab_repbuf(request, 1,
614 sizeof(*remote_rcs) * niocount, NULL);
615 if (remote_rcs == NULL) {
616 CERROR ("Missing/short RC vector on BRW_WRITE reply\n");
619 if (lustre_msg_swabbed (request->rq_repmsg))
620 for (i = 0; i < niocount; i++)
621 __swab32s (&remote_rcs[i]);
623 for (i = 0; i < niocount; i++) {
624 if (remote_rcs[i] < 0)
625 return (remote_rcs[i]);
627 if (remote_rcs[i] != 0) {
628 CERROR ("rc[%d] invalid (%d) req %p\n",
629 i, remote_rcs[i], request);
637 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
639 if (p1->flag != p2->flag) {
640 unsigned mask = ~(OBD_BRW_CREATE|OBD_BRW_FROM_GRANT);
642 /* warn if we try to combine flags that we don't know to be
644 if ((p1->flag & mask) != (p2->flag & mask))
645 CERROR("is it ok to have flags 0x%x and 0x%x in the "
646 "same brw?\n", p1->flag, p2->flag);
650 return (p1->off + p1->count == p2->off);
654 static obd_count cksum_pages(int nob, obd_count page_count,
655 struct brw_page *pga)
661 LASSERT (page_count > 0);
664 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
665 pga->count > nob ? nob : pga->count);
677 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
678 struct lov_stripe_md *lsm, obd_count page_count,
679 struct brw_page *pga, int *requested_nobp,
680 int *niocountp, struct ptlrpc_request **reqp)
682 struct ptlrpc_request *req;
683 struct ptlrpc_bulk_desc *desc;
684 struct client_obd *cli = &imp->imp_obd->u.cli;
685 struct ost_body *body;
686 struct obd_ioobj *ioobj;
687 struct niobuf_remote *niobuf;
696 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
698 for (niocount = i = 1; i < page_count; i++)
699 if (!can_merge_pages (&pga[i - 1], &pga[i]))
702 size[0] = sizeof(*body);
703 size[1] = sizeof(*ioobj);
704 size[2] = niocount * sizeof(*niobuf);
706 req = ptlrpc_prep_req(imp, opc, 3, size, NULL);
710 if (opc == OST_WRITE)
711 desc = ptlrpc_prep_bulk_imp(req, BULK_GET_SOURCE,
714 desc = ptlrpc_prep_bulk_imp(req, BULK_PUT_SINK,
717 GOTO(out, rc = -ENOMEM);
718 /* NB request now owns desc and will free it when it gets freed */
720 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
721 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
722 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
724 memcpy(&body->oa, oa, sizeof(*oa));
726 obdo_to_ioobj(oa, ioobj);
727 ioobj->ioo_bufcnt = niocount;
729 LASSERT (page_count > 0);
730 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
731 struct brw_page *pg = &pga[i];
732 struct brw_page *pg_prev = pg - 1;
734 LASSERT(pg->count > 0);
735 LASSERT((pg->off & ~PAGE_MASK) + pg->count <= PAGE_SIZE);
736 LASSERTF(i == 0 || pg->off > pg_prev->off,
737 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
738 " prev_pg %p [pri %lu ind %lu] off "LPU64,
740 pg->pg, pg->pg->private, pg->pg->index, pg->off,
741 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
744 rc = ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~PAGE_MASK,
749 requested_nob += pg->count;
751 if (i > 0 && can_merge_pages(pg_prev, pg)) {
753 niobuf->len += pg->count;
755 niobuf->offset = pg->off;
756 niobuf->len = pg->count;
757 niobuf->flags = pg->flag;
761 LASSERT((void *)(niobuf - niocount) ==
762 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
763 osc_announce_cached(cli, body);
764 spin_lock_irqsave(&req->rq_lock, flags);
765 req->rq_no_resend = 1;
766 spin_unlock_irqrestore(&req->rq_lock, flags);
768 /* size[0] still sizeof (*body) */
769 if (opc == OST_WRITE) {
771 body->oa.o_valid |= OBD_MD_FLCKSUM;
772 body->oa.o_nlink = cksum_pages(requested_nob, page_count, pga);
774 /* 1 RC per niobuf */
775 size[1] = sizeof(__u32) * niocount;
776 req->rq_replen = lustre_msg_size(2, size);
778 /* 1 RC for the whole I/O */
779 req->rq_replen = lustre_msg_size(1, size);
782 *niocountp = niocount;
783 *requested_nobp = requested_nob;
788 ptlrpc_req_finished (req);
792 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
793 int requested_nob, int niocount,
794 obd_count page_count, struct brw_page *pga,
797 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
798 struct ost_body *body;
803 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
805 CERROR ("Can't unpack body\n");
809 osc_update_grant(cli, body);
811 if (req->rq_reqmsg->opc == OST_WRITE) {
813 CERROR ("Unexpected +ve rc %d\n", rc);
817 return(check_write_rcs(req, niocount, page_count, pga));
820 if (rc > requested_nob) {
821 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
825 if (rc < requested_nob)
826 handle_short_read(rc, page_count, pga);
828 memcpy(oa, &body->oa, sizeof(*oa));
831 if (oa->o_valid & OBD_MD_FLCKSUM) {
832 const struct ptlrpc_peer *peer =
833 &req->rq_import->imp_connection->c_peer;
834 static int cksum_counter;
835 obd_count server_cksum = oa->o_nlink;
836 obd_count cksum = cksum_pages(rc, page_count, pga);
837 char str[PTL_NALFMT_SIZE];
839 portals_nid2str(peer->peer_ni->pni_number, peer->peer_nid, str);
842 if (server_cksum != cksum) {
843 CERROR("Bad checksum: server %x, client %x, server NID "
844 LPX64" (%s)\n", server_cksum, cksum,
845 peer->peer_nid, str);
848 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
849 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
850 cksum_counter, peer->peer_nid, str, cksum);
853 static int cksum_missed;
856 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
857 CERROR("Request checksum %u from "LPX64", no reply\n",
859 req->rq_import->imp_connection->c_peer.peer_nid);
865 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
866 struct lov_stripe_md *lsm,
867 obd_count page_count, struct brw_page *pga)
871 struct ptlrpc_request *request;
876 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
877 page_count, pga, &requested_nob, &niocount,
879 /* NB ^ sets rq_no_resend */
884 rc = ptlrpc_queue_wait(request);
886 if (rc == -ETIMEDOUT && request->rq_resend) {
887 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
888 ptlrpc_req_finished(request);
892 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
893 page_count, pga, rc);
895 ptlrpc_req_finished(request);
899 static int brw_interpret(struct ptlrpc_request *request,
900 struct osc_brw_async_args *aa, int rc)
902 struct obdo *oa = aa->aa_oa;
903 int requested_nob = aa->aa_requested_nob;
904 int niocount = aa->aa_nio_count;
905 obd_count page_count = aa->aa_page_count;
906 struct brw_page *pga = aa->aa_pga;
909 /* XXX bug 937 here */
910 if (rc == -ETIMEDOUT && request->rq_resend) {
911 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
912 LBUG(); /* re-send. later. */
916 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
917 page_count, pga, rc);
921 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
922 struct lov_stripe_md *lsm, obd_count page_count,
923 struct brw_page *pga, struct ptlrpc_request_set *set)
925 struct ptlrpc_request *request;
928 struct osc_brw_async_args *aa;
932 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
933 page_count, pga, &requested_nob, &nio_count,
935 /* NB ^ sets rq_no_resend */
938 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
939 aa = (struct osc_brw_async_args *)&request->rq_async_args;
941 aa->aa_requested_nob = requested_nob;
942 aa->aa_nio_count = nio_count;
943 aa->aa_page_count = page_count;
946 request->rq_interpret_reply = brw_interpret;
947 ptlrpc_set_add_req(set, request);
953 #define min_t(type,x,y) \
954 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
958 * ugh, we want disk allocation on the target to happen in offset order. we'll
959 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
960 * fine for our small page arrays and doesn't require allocation. its an
961 * insertion sort that swaps elements that are strides apart, shrinking the
962 * stride down until its '1' and the array is sorted.
964 static void sort_brw_pages(struct brw_page *array, int num)
971 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
976 for (i = stride ; i < num ; i++) {
979 while (j >= stride && array[j - stride].off > tmp.off) {
980 array[j] = array[j - stride];
985 } while (stride > 1);
988 /* make sure we the regions we're passing to elan don't violate its '4
989 * fragments' constraint. portal headers are a fragment, all full
990 * PAGE_SIZE long pages count as 1 fragment, and each partial page
991 * counts as a fragment. I think. see bug 934. */
992 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
995 int saw_whole_frag = 0;
998 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
999 if (pg->count == PAGE_SIZE) {
1000 if (!saw_whole_frag) {
1011 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1012 struct lov_stripe_md *md, obd_count page_count,
1013 struct brw_page *pga, struct obd_trans_info *oti)
1017 if (cmd == OBD_BRW_CHECK) {
1018 /* The caller just wants to know if there's a chance that this
1019 * I/O can succeed */
1020 struct obd_import *imp = class_exp2cliimp(exp);
1022 if (imp == NULL || imp->imp_invalid)
1027 while (page_count) {
1028 obd_count pages_per_brw;
1031 if (page_count > OSC_BRW_MAX_IOV)
1032 pages_per_brw = OSC_BRW_MAX_IOV;
1034 pages_per_brw = page_count;
1036 sort_brw_pages(pga, pages_per_brw);
1037 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1039 rc = osc_brw_internal(cmd, exp, oa, md, pages_per_brw, pga);
1044 page_count -= pages_per_brw;
1045 pga += pages_per_brw;
1050 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1051 struct lov_stripe_md *md, obd_count page_count,
1052 struct brw_page *pga, struct ptlrpc_request_set *set,
1053 struct obd_trans_info *oti)
1057 if (cmd == OBD_BRW_CHECK) {
1058 /* The caller just wants to know if there's a chance that this
1059 * I/O can succeed */
1060 struct obd_import *imp = class_exp2cliimp(exp);
1062 if (imp == NULL || imp->imp_invalid)
1067 while (page_count) {
1068 obd_count pages_per_brw;
1071 if (page_count > OSC_BRW_MAX_IOV)
1072 pages_per_brw = OSC_BRW_MAX_IOV;
1074 pages_per_brw = page_count;
1076 sort_brw_pages(pga, pages_per_brw);
1077 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1079 rc = async_internal(cmd, exp, oa, md, pages_per_brw, pga, set);
1084 page_count -= pages_per_brw;
1085 pga += pages_per_brw;
1090 static void osc_check_rpcs(struct client_obd *cli);
1091 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap);
1092 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1093 static void lop_update_pending(struct client_obd *cli,
1094 struct loi_oap_pages *lop, int cmd, int delta);
1096 /* this is called when a sync waiter receives an interruption. Its job is to
1097 * get the caller woken as soon as possible. If its page hasn't been put in an
1098 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1099 * desiring interruption which will forcefully complete the rpc once the rpc
1101 static void osc_occ_interrupted(struct osic_callback_context *occ)
1103 struct osc_async_page *oap;
1104 struct loi_oap_pages *lop;
1105 struct lov_oinfo *loi;
1108 /* XXX member_of() */
1109 oap = list_entry(occ, struct osc_async_page, oap_occ);
1111 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1113 oap->oap_interrupted = 1;
1115 /* ok, it's been put in an rpc. */
1116 if (oap->oap_request != NULL) {
1117 ptlrpc_mark_interrupted(oap->oap_request);
1122 /* we don't get interruption callbacks until osc_trigger_sync_io()
1123 * has been called and put the sync oaps in the pending/urgent lists.*/
1124 if (!list_empty(&oap->oap_pending_item)) {
1125 list_del_init(&oap->oap_pending_item);
1126 if (oap->oap_async_flags & ASYNC_URGENT)
1127 list_del_init(&oap->oap_urgent_item);
1130 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1131 &loi->loi_write_lop : &loi->loi_read_lop;
1132 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1133 loi_list_maint(oap->oap_cli, oap->oap_loi);
1135 osic_complete_one(oap->oap_osic, &oap->oap_occ, 0);
1136 oap->oap_osic = NULL;
1141 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1144 /* this must be called holding the list lock to give coverage to exit_cache,
1145 * async_flag maintenance, and oap_request */
1146 static void osc_complete_oap(struct client_obd *cli,
1147 struct osc_async_page *oap, int rc)
1150 osc_exit_cache(cli, oap);
1151 oap->oap_async_flags = 0;
1152 oap->oap_interrupted = 0;
1154 if (oap->oap_request != NULL) {
1155 ptlrpc_req_finished(oap->oap_request);
1156 oap->oap_request = NULL;
1159 if (oap->oap_osic) {
1160 osic_complete_one(oap->oap_osic, &oap->oap_occ, rc);
1161 oap->oap_osic = NULL;
1166 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1171 static int brw_interpret_oap(struct ptlrpc_request *request,
1172 struct osc_brw_async_args *aa, int rc)
1174 struct osc_async_page *oap;
1175 struct client_obd *cli;
1176 struct list_head *pos, *n;
1179 CDEBUG(D_INODE, "request %p aa %p\n", request, aa);
1181 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1182 aa->aa_nio_count, aa->aa_page_count,
1186 /* in failout recovery we ignore writeback failure and want
1187 * to just tell llite to unlock the page and continue */
1188 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1191 spin_lock(&cli->cl_loi_list_lock);
1193 /* the caller may re-use the oap after the completion call so
1194 * we need to clean it up a little */
1195 list_for_each_safe(pos, n, &aa->aa_oaps) {
1196 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1198 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1199 //oap->oap_page, oap->oap_page->index, oap);
1201 list_del_init(&oap->oap_rpc_item);
1202 osc_complete_oap(cli, oap, rc);
1205 cli->cl_brw_in_flight--;
1206 osc_check_rpcs(cli);
1208 spin_unlock(&cli->cl_loi_list_lock);
1210 obdo_free(aa->aa_oa);
1211 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1216 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1217 struct list_head *rpc_list,
1218 int page_count, int cmd)
1220 struct ptlrpc_request *req;
1221 struct brw_page *pga = NULL;
1222 int requested_nob, nio_count;
1223 struct osc_brw_async_args *aa;
1224 struct obdo *oa = NULL;
1225 struct obd_async_page_ops *ops = NULL;
1226 void *caller_data = NULL;
1227 struct list_head *pos;
1230 LASSERT(!list_empty(rpc_list));
1232 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1234 RETURN(ERR_PTR(-ENOMEM));
1238 GOTO(out, req = ERR_PTR(-ENOMEM));
1241 list_for_each(pos, rpc_list) {
1242 struct osc_async_page *oap;
1244 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1246 ops = oap->oap_caller_ops;
1247 caller_data = oap->oap_caller_data;
1249 pga[i].off = oap->oap_obj_off + oap->oap_page_off;
1250 pga[i].pg = oap->oap_page;
1251 pga[i].count = oap->oap_count;
1252 pga[i].flag = oap->oap_brw_flags;
1253 //CDEBUG(D_INODE, "putting page %p index %lu oap %p into pga\n",
1254 //pga[i].pg, oap->oap_page->index, oap);
1258 /* always get the data for the obdo for the rpc */
1259 LASSERT(ops != NULL);
1260 ops->ap_fill_obdo(caller_data, cmd, oa);
1262 sort_brw_pages(pga, page_count);
1263 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1264 pga, &requested_nob, &nio_count, &req);
1266 CERROR("prep_req failed: %d\n", rc);
1267 GOTO(out, req = ERR_PTR(rc));
1270 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1271 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1273 aa->aa_requested_nob = requested_nob;
1274 aa->aa_nio_count = nio_count;
1275 aa->aa_page_count = page_count;
1284 OBD_FREE(pga, sizeof(*pga) * page_count);
1289 static void lop_update_pending(struct client_obd *cli,
1290 struct loi_oap_pages *lop, int cmd, int delta)
1292 lop->lop_num_pending += delta;
1293 if (cmd == OBD_BRW_WRITE)
1294 cli->cl_pending_w_pages += delta;
1296 cli->cl_pending_r_pages += delta;
1299 /* the loi lock is held across this function but it's allowed to release
1300 * and reacquire it during its work */
1301 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1302 int cmd, struct loi_oap_pages *lop)
1304 struct ptlrpc_request *request;
1305 obd_count page_count = 0;
1306 struct list_head *tmp, *pos;
1307 struct osc_async_page *oap = NULL;
1308 struct osc_brw_async_args *aa;
1309 struct obd_async_page_ops *ops;
1310 LIST_HEAD(rpc_list);
1313 /* first we find the pages we're allowed to work with */
1314 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1315 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1316 ops = oap->oap_caller_ops;
1318 /* in llite being 'ready' equates to the page being locked
1319 * until completion unlocks it. commit_write submits a page
1320 * as not ready because its unlock will happen unconditionally
1321 * as the call returns. if we race with commit_write giving
1322 * us that page we dont' want to create a hole in the page
1323 * stream, so we stop and leave the rpc to be fired by
1324 * another dirtier or kupdated interval (the not ready page
1325 * will still be on the dirty list). we could call in
1326 * at the end of ll_file_write to process the queue again. */
1327 if (!(oap->oap_async_flags & ASYNC_READY)) {
1328 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1330 CDEBUG(D_INODE, "oap %p page %p returned %d "
1331 "instead of ready\n", oap,
1335 /* llite is telling us that the page is still
1336 * in commit_write and that we should try
1337 * and put it in an rpc again later. we
1338 * break out of the loop so we don't create
1339 * a hole in the sequence of pages in the rpc
1344 /* the io isn't needed.. tell the checks
1345 * below to complete the rpc with EINTR */
1346 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1347 oap->oap_count = -EINTR;
1350 oap->oap_async_flags |= ASYNC_READY;
1353 LASSERTF(0, "oap %p page %p returned %d "
1354 "from make_ready\n", oap,
1362 /* take the page out of our book-keeping */
1363 list_del_init(&oap->oap_pending_item);
1364 lop_update_pending(cli, lop, cmd, -1);
1365 if (!list_empty(&oap->oap_urgent_item))
1366 list_del_init(&oap->oap_urgent_item);
1368 /* ask the caller for the size of the io as the rpc leaves. */
1369 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1370 oap->oap_count = ops->ap_refresh_count(
1371 oap->oap_caller_data,
1373 if (oap->oap_count <= 0) {
1374 CDEBUG(D_INODE, "oap %p count %d, completing\n", oap,
1376 osc_complete_oap(cli, oap, oap->oap_count);
1380 /* now put the page back in our accounting */
1381 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1382 if (++page_count >= cli->cl_max_pages_per_rpc)
1386 if (page_count == 0)
1389 loi_list_maint(cli, loi);
1390 spin_unlock(&cli->cl_loi_list_lock);
1392 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1393 if (IS_ERR(request)) {
1394 /* this should happen rarely and is pretty bad, it makes the
1395 * pending list not follow the dirty order */
1396 spin_lock(&cli->cl_loi_list_lock);
1397 list_for_each_safe(pos, tmp, &rpc_list) {
1398 oap = list_entry(pos, struct osc_async_page,
1400 list_del_init(&oap->oap_rpc_item);
1402 /* queued sync pages can be torn down while the pages
1403 * were between the pending list and the rpc */
1404 if (oap->oap_interrupted) {
1405 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1406 osc_complete_oap(cli, oap, oap->oap_count);
1410 /* put the page back in the loi/lop lists */
1411 list_add_tail(&oap->oap_pending_item,
1413 lop_update_pending(cli, lop, cmd, 1);
1414 if (oap->oap_async_flags & ASYNC_URGENT)
1415 list_add(&oap->oap_urgent_item,
1418 loi_list_maint(cli, loi);
1419 RETURN(PTR_ERR(request));
1422 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1423 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1424 INIT_LIST_HEAD(&aa->aa_oaps);
1425 list_splice(&rpc_list, &aa->aa_oaps);
1426 INIT_LIST_HEAD(&rpc_list);
1428 if (cmd == OBD_BRW_READ) {
1429 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1430 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_brw_in_flight);
1432 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1433 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1434 cli->cl_brw_in_flight);
1437 spin_lock(&cli->cl_loi_list_lock);
1439 cli->cl_brw_in_flight++;
1440 /* queued sync pages can be torn down while the pages
1441 * were between the pending list and the rpc */
1442 list_for_each(pos, &aa->aa_oaps) {
1443 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1444 if (oap->oap_interrupted) {
1445 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1447 ptlrpc_mark_interrupted(request);
1452 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %d in flight\n", request,
1453 page_count, aa, cli->cl_brw_in_flight);
1455 oap->oap_request = ptlrpc_request_addref(request);
1456 request->rq_interpret_reply = brw_interpret_oap;
1457 ptlrpcd_add_req(request);
1461 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1467 if (lop->lop_num_pending == 0)
1470 /* stream rpcs in queue order as long as as there is an urgent page
1471 * queued. this is our cheap solution for good batching in the case
1472 * where writepage marks some random page in the middle of the file as
1473 * urgent because of, say, memory pressure */
1474 if (!list_empty(&lop->lop_urgent))
1477 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1478 optimal = cli->cl_max_pages_per_rpc;
1479 if (cmd == OBD_BRW_WRITE) {
1480 /* trigger a write rpc stream as long as there are dirtiers
1481 * waiting for space. as they're waiting, they're not going to
1482 * create more pages to coallesce with what's waiting.. */
1483 if (!list_empty(&cli->cl_cache_waiters))
1486 /* *2 to avoid triggering rpcs that would want to include pages
1487 * that are being queued but which can't be made ready until
1488 * the queuer finishes with the page. this is a wart for
1489 * llite::commit_write() */
1492 if (lop->lop_num_pending >= optimal)
1498 static void on_list(struct list_head *item, struct list_head *list,
1501 if (list_empty(item) && should_be_on)
1502 list_add_tail(item, list);
1503 else if (!list_empty(item) && !should_be_on)
1504 list_del_init(item);
1507 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1508 * can find pages to build into rpcs quickly */
1509 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1511 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1512 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1513 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1515 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1516 loi->loi_write_lop.lop_num_pending);
1519 #define LOI_DEBUG(LOI, STR, args...) \
1520 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1521 !list_empty(&(LOI)->loi_cli_item), \
1522 (LOI)->loi_write_lop.lop_num_pending, \
1523 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1524 (LOI)->loi_read_lop.lop_num_pending, \
1525 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1528 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1531 /* first return all objects which we already know to have
1532 * pages ready to be stuffed into rpcs */
1533 if (!list_empty(&cli->cl_loi_ready_list))
1534 RETURN(list_entry(cli->cl_loi_ready_list.next,
1535 struct lov_oinfo, loi_cli_item));
1537 /* then if we have cache waiters, return all objects with queued
1538 * writes. This is especially important when many small files
1539 * have filled up the cache and not been fired into rpcs because
1540 * they don't pass the nr_pending/object threshhold */
1541 if (!list_empty(&cli->cl_cache_waiters) &&
1542 !list_empty(&cli->cl_loi_write_list))
1543 RETURN(list_entry(cli->cl_loi_write_list.next,
1544 struct lov_oinfo, loi_write_item));
1548 /* called with the loi list lock held */
1549 static void osc_check_rpcs(struct client_obd *cli)
1551 struct lov_oinfo *loi;
1552 int rc = 0, race_counter = 0;
1555 while ((loi = osc_next_loi(cli)) != NULL) {
1557 LOI_DEBUG(loi, "%d in flight", cli->cl_brw_in_flight);
1559 if (cli->cl_brw_in_flight >= cli->cl_max_rpcs_in_flight)
1562 /* attempt some read/write balancing by alternating between
1563 * reads and writes in an object. The makes_rpc checks here
1564 * would be redundant if we were getting read/write work items
1565 * instead of objects. we don't want send_oap_rpc to drain a
1566 * partial read pending queue when we're given this object to
1567 * do io on writes while there are cache waiters */
1568 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1569 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1570 &loi->loi_write_lop);
1578 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1579 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1580 &loi->loi_read_lop);
1589 /* attempt some inter-object balancing by issueing rpcs
1590 * for each object in turn */
1591 if (!list_empty(&loi->loi_cli_item))
1592 list_del_init(&loi->loi_cli_item);
1593 if (!list_empty(&loi->loi_write_item))
1594 list_del_init(&loi->loi_write_item);
1596 loi_list_maint(cli, loi);
1598 /* send_oap_rpc fails with 0 when make_ready tells it to
1599 * back off. llite's make_ready does this when it tries
1600 * to lock a page queued for write that is already locked.
1601 * we want to try sending rpcs from many objects, but we
1602 * don't want to spin failing with 0. */
1603 if (race_counter == 10)
1609 /* we're trying to queue a page in the osc so we're subject to the
1610 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1611 * If the osc's queued pages are already at that limit, then we want to sleep
1612 * until there is space in the osc's queue for us. we need this goofy
1613 * little struct to really tell that our allocation was fulfilled in
1614 * the presence of pending signals */
1615 struct osc_cache_waiter {
1616 struct list_head ocw_entry;
1617 wait_queue_head_t ocw_waitq;
1619 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1623 spin_lock(&cli->cl_loi_list_lock);
1624 rc = list_empty(&ocw->ocw_entry);
1625 spin_unlock(&cli->cl_loi_list_lock);
1628 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1629 struct osc_async_page *oap)
1631 struct osc_cache_waiter ocw;
1632 struct l_wait_info lwi = {0};
1636 /* XXX check for ost grants here as well.. for now we ignore them. */
1637 if (cli->cl_dirty_max < PAGE_SIZE)
1640 /* if we fail this test then cl_dirty contains at least one page
1641 * that will have to be completed after we release the lock */
1642 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max) {
1643 /* account for ourselves */
1644 cli->cl_dirty += PAGE_SIZE;
1648 init_waitqueue_head(&ocw.ocw_waitq);
1649 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1651 /* make sure that there are write rpcs in flight to wait for. this
1652 * is a little silly as this object may not have any pending
1653 * but other objects sure might. this should probably be cleaned. */
1654 loi_list_maint(cli, loi);
1655 osc_check_rpcs(cli);
1656 spin_unlock(&cli->cl_loi_list_lock);
1658 CDEBUG(D_INODE, "sleeping for cache space\n");
1659 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1661 spin_lock(&cli->cl_loi_list_lock);
1662 if (!list_empty(&ocw.ocw_entry)) {
1664 list_del(&ocw.ocw_entry);
1669 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
1673 /* the companion to enter_cache, called when an oap is now longer part of the
1674 * dirty accounting.. so writeback completes or truncate happens before writing
1675 * starts. must be called with the loi lock held. */
1676 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
1678 struct osc_cache_waiter *ocw;
1681 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1686 if (list_empty(&cli->cl_cache_waiters)) {
1687 cli->cl_dirty -= PAGE_SIZE;
1689 ocw = list_entry(cli->cl_cache_waiters.next,
1690 struct osc_cache_waiter, ocw_entry);
1691 list_del_init(&ocw->ocw_entry);
1692 wake_up(&ocw->ocw_waitq);
1695 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1699 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1700 struct lov_oinfo *loi, struct page *page,
1701 obd_off offset, struct obd_async_page_ops *ops,
1702 void *data, void **res)
1704 struct osc_async_page *oap;
1707 OBD_ALLOC(oap, sizeof(*oap));
1711 oap->oap_magic = OAP_MAGIC;
1712 oap->oap_cli = &exp->exp_obd->u.cli;
1715 oap->oap_caller_ops = ops;
1716 oap->oap_caller_data = data;
1718 oap->oap_page = page;
1719 oap->oap_obj_off = offset;
1721 INIT_LIST_HEAD(&oap->oap_pending_item);
1722 INIT_LIST_HEAD(&oap->oap_urgent_item);
1723 INIT_LIST_HEAD(&oap->oap_rpc_item);
1725 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1727 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1732 struct osc_async_page *oap_from_cookie(void *cookie)
1734 struct osc_async_page *oap = cookie;
1735 if (oap->oap_magic != OAP_MAGIC)
1736 return ERR_PTR(-EINVAL);
1740 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1741 struct lov_oinfo *loi, void *cookie,
1742 int cmd, obd_off off, int count,
1743 obd_flag brw_flags, enum async_flags async_flags)
1745 struct client_obd *cli = &exp->exp_obd->u.cli;
1746 struct osc_async_page *oap;
1747 struct loi_oap_pages *lop;
1751 oap = oap_from_cookie(cookie);
1753 RETURN(PTR_ERR(oap));
1755 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1758 if (!list_empty(&oap->oap_pending_item) ||
1759 !list_empty(&oap->oap_urgent_item) ||
1760 !list_empty(&oap->oap_rpc_item))
1764 loi = &lsm->lsm_oinfo[0];
1766 spin_lock(&cli->cl_loi_list_lock);
1769 oap->oap_async_flags = async_flags;
1770 oap->oap_page_off = off;
1771 oap->oap_count = count;
1772 oap->oap_brw_flags = brw_flags;
1774 if (cmd == OBD_BRW_WRITE) {
1775 rc = osc_enter_cache(cli, loi, oap);
1777 spin_unlock(&cli->cl_loi_list_lock);
1780 lop = &loi->loi_write_lop;
1782 lop = &loi->loi_read_lop;
1785 if (oap->oap_async_flags & ASYNC_URGENT)
1786 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1787 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1788 lop_update_pending(cli, lop, cmd, 1);
1790 loi_list_maint(cli, loi);
1792 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1795 osc_check_rpcs(cli);
1796 spin_unlock(&cli->cl_loi_list_lock);
1801 /* aka (~was & now & flag), but this is more clear :) */
1802 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1804 static int osc_set_async_flags(struct obd_export *exp,
1805 struct lov_stripe_md *lsm,
1806 struct lov_oinfo *loi, void *cookie,
1807 obd_flag async_flags)
1809 struct client_obd *cli = &exp->exp_obd->u.cli;
1810 struct loi_oap_pages *lop;
1811 struct osc_async_page *oap;
1815 oap = oap_from_cookie(cookie);
1817 RETURN(PTR_ERR(oap));
1819 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1823 loi = &lsm->lsm_oinfo[0];
1825 if (oap->oap_cmd == OBD_BRW_WRITE) {
1826 lop = &loi->loi_write_lop;
1828 lop = &loi->loi_read_lop;
1831 spin_lock(&cli->cl_loi_list_lock);
1833 if (list_empty(&oap->oap_pending_item))
1834 GOTO(out, rc = -EINVAL);
1836 if ((oap->oap_async_flags & async_flags) == async_flags)
1839 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1840 oap->oap_async_flags |= ASYNC_READY;
1842 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1843 if (list_empty(&oap->oap_rpc_item)) {
1844 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1845 loi_list_maint(cli, loi);
1849 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1850 oap->oap_async_flags);
1852 osc_check_rpcs(cli);
1853 spin_unlock(&cli->cl_loi_list_lock);
1857 static int osc_queue_sync_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1858 struct lov_oinfo *loi,
1859 struct obd_sync_io_container *osic, void *cookie,
1860 int cmd, obd_off off, int count,
1863 struct client_obd *cli = &exp->exp_obd->u.cli;
1864 struct osc_async_page *oap;
1865 struct loi_oap_pages *lop;
1868 oap = oap_from_cookie(cookie);
1870 RETURN(PTR_ERR(oap));
1872 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1875 if (!list_empty(&oap->oap_pending_item) ||
1876 !list_empty(&oap->oap_urgent_item) ||
1877 !list_empty(&oap->oap_rpc_item))
1881 loi = &lsm->lsm_oinfo[0];
1883 spin_lock(&cli->cl_loi_list_lock);
1886 oap->oap_page_off = off;
1887 oap->oap_count = count;
1888 oap->oap_brw_flags = brw_flags;
1890 if (cmd == OBD_BRW_WRITE)
1891 lop = &loi->loi_write_lop;
1893 lop = &loi->loi_read_lop;
1895 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_sync);
1896 oap->oap_osic = osic;
1897 osic_add_one(osic, &oap->oap_occ);
1899 LOI_DEBUG(loi, "oap %p page %p on sync pending\n", oap, oap->oap_page);
1901 spin_unlock(&cli->cl_loi_list_lock);
1906 static void osc_sync_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
1907 struct loi_oap_pages *lop, int cmd)
1909 struct list_head *pos, *tmp;
1910 struct osc_async_page *oap;
1912 list_for_each_safe(pos, tmp, &lop->lop_pending_sync) {
1913 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1914 list_del(&oap->oap_pending_item);
1915 oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT |
1917 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1918 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1919 lop_update_pending(cli, lop, cmd, 1);
1921 loi_list_maint(cli, loi);
1924 static int osc_trigger_sync_io(struct obd_export *exp,
1925 struct lov_stripe_md *lsm,
1926 struct lov_oinfo *loi,
1927 struct obd_sync_io_container *osic)
1929 struct client_obd *cli = &exp->exp_obd->u.cli;
1932 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1936 loi = &lsm->lsm_oinfo[0];
1938 spin_lock(&cli->cl_loi_list_lock);
1940 osc_sync_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
1941 osc_sync_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
1943 osc_check_rpcs(cli);
1944 spin_unlock(&cli->cl_loi_list_lock);
1949 static int osc_teardown_async_page(struct obd_export *exp,
1950 struct lov_stripe_md *lsm,
1951 struct lov_oinfo *loi, void *cookie)
1953 struct client_obd *cli = &exp->exp_obd->u.cli;
1954 struct loi_oap_pages *lop;
1955 struct osc_async_page *oap;
1959 oap = oap_from_cookie(cookie);
1961 RETURN(PTR_ERR(oap));
1964 loi = &lsm->lsm_oinfo[0];
1966 if (oap->oap_cmd == OBD_BRW_WRITE) {
1967 lop = &loi->loi_write_lop;
1969 lop = &loi->loi_read_lop;
1972 spin_lock(&cli->cl_loi_list_lock);
1974 if (!list_empty(&oap->oap_rpc_item))
1975 GOTO(out, rc = -EBUSY);
1977 osc_exit_cache(cli, oap);
1979 if (!list_empty(&oap->oap_urgent_item)) {
1980 list_del_init(&oap->oap_urgent_item);
1981 oap->oap_async_flags &= ~ASYNC_URGENT;
1983 if (!list_empty(&oap->oap_pending_item)) {
1984 list_del_init(&oap->oap_pending_item);
1985 lop_update_pending(cli, lop, oap->oap_cmd, -1);
1987 loi_list_maint(cli, loi);
1989 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
1991 spin_unlock(&cli->cl_loi_list_lock);
1993 OBD_FREE(oap, sizeof(*oap));
1998 /* Note: caller will lock/unlock, and set uptodate on the pages */
1999 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2000 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2001 struct lov_stripe_md *lsm, obd_count page_count,
2002 struct brw_page *pga)
2004 struct ptlrpc_request *request = NULL;
2005 struct ost_body *body;
2006 struct niobuf_remote *nioptr;
2007 struct obd_ioobj *iooptr;
2008 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2012 /* XXX does not handle 'new' brw protocol */
2014 size[1] = sizeof(struct obd_ioobj);
2015 size[2] = page_count * sizeof(*nioptr);
2017 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_READ, 3,
2022 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2023 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2024 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2025 sizeof(*nioptr) * page_count);
2027 memcpy(&body->oa, oa, sizeof(body->oa));
2029 obdo_to_ioobj(oa, iooptr);
2030 iooptr->ioo_bufcnt = page_count;
2032 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2033 LASSERT(PageLocked(pga[mapped].pg));
2034 LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
2036 nioptr->offset = pga[mapped].off;
2037 nioptr->len = pga[mapped].count;
2038 nioptr->flags = pga[mapped].flag;
2041 size[1] = page_count * sizeof(*nioptr);
2042 request->rq_replen = lustre_msg_size(2, size);
2044 rc = ptlrpc_queue_wait(request);
2048 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2049 lustre_swab_ost_body);
2051 CERROR("Can't unpack body\n");
2052 GOTO(out_req, rc = -EPROTO);
2055 memcpy(oa, &body->oa, sizeof(*oa));
2057 swab = lustre_msg_swabbed(request->rq_repmsg);
2058 LASSERT_REPSWAB(request, 1);
2059 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2061 /* nioptr missing or short */
2062 GOTO(out_req, rc = -EPROTO);
2066 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2067 struct page *page = pga[mapped].pg;
2068 struct buffer_head *bh;
2072 lustre_swab_niobuf_remote (nioptr);
2074 /* got san device associated */
2075 LASSERT(exp->exp_obd != NULL);
2076 dev = exp->exp_obd->u.cli.cl_sandev;
2079 if (!nioptr->offset) {
2080 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2081 page->mapping->host->i_ino,
2083 memset(page_address(page), 0, PAGE_SIZE);
2087 if (!page->buffers) {
2088 create_empty_buffers(page, dev, PAGE_SIZE);
2091 clear_bit(BH_New, &bh->b_state);
2092 set_bit(BH_Mapped, &bh->b_state);
2093 bh->b_blocknr = (unsigned long)nioptr->offset;
2095 clear_bit(BH_Uptodate, &bh->b_state);
2097 ll_rw_block(READ, 1, &bh);
2101 /* if buffer already existed, it must be the
2102 * one we mapped before, check it */
2103 LASSERT(!test_bit(BH_New, &bh->b_state));
2104 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2105 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2107 /* wait it's io completion */
2108 if (test_bit(BH_Lock, &bh->b_state))
2111 if (!test_bit(BH_Uptodate, &bh->b_state))
2112 ll_rw_block(READ, 1, &bh);
2116 /* must do syncronous write here */
2118 if (!buffer_uptodate(bh)) {
2126 ptlrpc_req_finished(request);
2130 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2131 struct lov_stripe_md *lsm, obd_count page_count,
2132 struct brw_page *pga)
2134 struct ptlrpc_request *request = NULL;
2135 struct ost_body *body;
2136 struct niobuf_remote *nioptr;
2137 struct obd_ioobj *iooptr;
2138 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2142 size[1] = sizeof(struct obd_ioobj);
2143 size[2] = page_count * sizeof(*nioptr);
2145 request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_WRITE,
2150 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2151 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2152 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2153 sizeof (*nioptr) * page_count);
2155 memcpy(&body->oa, oa, sizeof(body->oa));
2157 obdo_to_ioobj(oa, iooptr);
2158 iooptr->ioo_bufcnt = page_count;
2161 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2162 LASSERT(PageLocked(pga[mapped].pg));
2163 LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
2165 nioptr->offset = pga[mapped].off;
2166 nioptr->len = pga[mapped].count;
2167 nioptr->flags = pga[mapped].flag;
2170 size[1] = page_count * sizeof(*nioptr);
2171 request->rq_replen = lustre_msg_size(2, size);
2173 rc = ptlrpc_queue_wait(request);
2177 swab = lustre_msg_swabbed (request->rq_repmsg);
2178 LASSERT_REPSWAB (request, 1);
2179 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2181 CERROR("absent/short niobuf array\n");
2182 GOTO(out_req, rc = -EPROTO);
2186 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2187 struct page *page = pga[mapped].pg;
2188 struct buffer_head *bh;
2192 lustre_swab_niobuf_remote (nioptr);
2194 /* got san device associated */
2195 LASSERT(exp->exp_obd != NULL);
2196 dev = exp->exp_obd->u.cli.cl_sandev;
2198 if (!page->buffers) {
2199 create_empty_buffers(page, dev, PAGE_SIZE);
2202 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2203 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2204 LASSERT(page->buffers->b_blocknr ==
2205 (unsigned long)nioptr->offset);
2211 /* if buffer locked, wait it's io completion */
2212 if (test_bit(BH_Lock, &bh->b_state))
2215 clear_bit(BH_New, &bh->b_state);
2216 set_bit(BH_Mapped, &bh->b_state);
2218 /* override the block nr */
2219 bh->b_blocknr = (unsigned long)nioptr->offset;
2221 /* we are about to write it, so set it
2223 * page lock should garentee no race condition here */
2224 set_bit(BH_Uptodate, &bh->b_state);
2225 set_bit(BH_Dirty, &bh->b_state);
2227 ll_rw_block(WRITE, 1, &bh);
2229 /* must do syncronous write here */
2231 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2239 ptlrpc_req_finished(request);
2243 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2244 struct lov_stripe_md *lsm, obd_count page_count,
2245 struct brw_page *pga, struct obd_trans_info *oti)
2249 while (page_count) {
2250 obd_count pages_per_brw;
2253 if (page_count > OSC_BRW_MAX_IOV)
2254 pages_per_brw = OSC_BRW_MAX_IOV;
2256 pages_per_brw = page_count;
2258 if (cmd & OBD_BRW_WRITE)
2259 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2261 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2266 page_count -= pages_per_brw;
2267 pga += pages_per_brw;
2274 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2276 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2278 LASSERT(lock != NULL);
2279 l_lock(&lock->l_resource->lr_namespace->ns_lock);
2281 if (lock->l_ast_data && lock->l_ast_data != data) {
2282 struct inode *new_inode = data;
2283 struct inode *old_inode = lock->l_ast_data;
2284 unsigned long state = old_inode->i_state & I_FREEING;
2285 CERROR("Found existing inode %p/%lu/%u state %lu in lock: "
2286 "setting data to %p/%lu/%u\n", old_inode,
2287 old_inode->i_ino, old_inode->i_generation, state,
2288 new_inode, new_inode->i_ino, new_inode->i_generation);
2292 lock->l_ast_data = data;
2293 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
2294 LDLM_LOCK_PUT(lock);
2297 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2298 ldlm_iterator_t replace, void *data)
2300 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2301 struct obd_device *obd = class_exp2obd(exp);
2303 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2307 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2308 struct lustre_handle *parent_lock,
2309 __u32 type, void *extentp, int extent_len, __u32 mode,
2310 int *flags, void *callback, void *data,
2311 struct lustre_handle *lockh)
2313 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2314 struct obd_device *obd = exp->exp_obd;
2315 struct ldlm_extent *extent = extentp;
2319 /* Filesystem lock extents are extended to page boundaries so that
2320 * dealing with the page cache is a little smoother. */
2321 extent->start -= extent->start & ~PAGE_MASK;
2322 extent->end |= ~PAGE_MASK;
2324 /* Next, search for already existing extent locks that will cover us */
2325 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id,
2326 type, extent, sizeof(*extent), mode, lockh);
2328 osc_set_data_with_check(lockh, data);
2329 /* We already have a lock, and it's referenced */
2333 /* If we're trying to read, we also search for an existing PW lock. The
2334 * VFS and page cache already protect us locally, so lots of readers/
2335 * writers can share a single PW lock.
2337 * There are problems with conversion deadlocks, so instead of
2338 * converting a read lock to a write lock, we'll just enqueue a new
2341 * At some point we should cancel the read lock instead of making them
2342 * send us a blocking callback, but there are problems with canceling
2343 * locks out from other users right now, too. */
2345 if (mode == LCK_PR) {
2346 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2347 extent, sizeof(*extent), LCK_PW, lockh);
2349 /* FIXME: This is not incredibly elegant, but it might
2350 * be more elegant than adding another parameter to
2351 * lock_match. I want a second opinion. */
2352 ldlm_lock_addref(lockh, LCK_PR);
2353 ldlm_lock_decref(lockh, LCK_PW);
2354 osc_set_data_with_check(lockh, data);
2359 rc = ldlm_cli_enqueue(exp, NULL, obd->obd_namespace, parent_lock,
2360 res_id, type, extent, sizeof(*extent), mode,
2361 flags,ldlm_completion_ast, callback, data, lockh);
2365 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2366 __u32 type, void *extentp, int extent_len, __u32 mode,
2367 int *flags, void *data, struct lustre_handle *lockh)
2369 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2370 struct obd_device *obd = exp->exp_obd;
2371 struct ldlm_extent *extent = extentp;
2375 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2377 /* Filesystem lock extents are extended to page boundaries so that
2378 * dealing with the page cache is a little smoother */
2379 extent->start -= extent->start & ~PAGE_MASK;
2380 extent->end |= ~PAGE_MASK;
2382 /* Next, search for already existing extent locks that will cover us */
2383 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2384 extent, sizeof(*extent), mode, lockh);
2386 osc_set_data_with_check(lockh, data);
2389 /* If we're trying to read, we also search for an existing PW lock. The
2390 * VFS and page cache already protect us locally, so lots of readers/
2391 * writers can share a single PW lock. */
2392 if (mode == LCK_PR) {
2393 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2394 extent, sizeof(*extent), LCK_PW, lockh);
2396 /* FIXME: This is not incredibly elegant, but it might
2397 * be more elegant than adding another parameter to
2398 * lock_match. I want a second opinion. */
2399 osc_set_data_with_check(lockh, data);
2400 ldlm_lock_addref(lockh, LCK_PR);
2401 ldlm_lock_decref(lockh, LCK_PW);
2407 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2408 __u32 mode, struct lustre_handle *lockh)
2412 ldlm_lock_decref(lockh, mode);
2417 static int osc_cancel_unused(struct obd_export *exp,
2418 struct lov_stripe_md *lsm, int flags, void *opaque)
2420 struct obd_device *obd = class_exp2obd(exp);
2421 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
2423 return ldlm_cli_cancel_unused(obd->obd_namespace, &res_id, flags,
2427 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2428 unsigned long max_age)
2430 struct obd_statfs *msfs;
2431 struct ptlrpc_request *request;
2432 int rc, size = sizeof(*osfs);
2435 /* We could possibly pass max_age in the request (as an absolute
2436 * timestamp or a "seconds.usec ago") so the target can avoid doing
2437 * extra calls into the filesystem if that isn't necessary (e.g.
2438 * during mount that would help a bit). Having relative timestamps
2439 * is not so great if request processing is slow, while absolute
2440 * timestamps are not ideal because they need time synchronization. */
2441 request = ptlrpc_prep_req(obd->u.cli.cl_import, OST_STATFS,0,NULL,NULL);
2445 request->rq_replen = lustre_msg_size(1, &size);
2446 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2448 rc = ptlrpc_queue_wait(request);
2452 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2453 lustre_swab_obd_statfs);
2455 CERROR("Can't unpack obd_statfs\n");
2456 GOTO(out, rc = -EPROTO);
2459 memcpy(osfs, msfs, sizeof(*osfs));
2463 ptlrpc_req_finished(request);
2467 /* Retrieve object striping information.
2469 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2470 * the maximum number of OST indices which will fit in the user buffer.
2471 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2473 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2475 struct lov_user_md lum;
2476 struct lov_mds_md *lmmk;
2483 rc = copy_from_user(&lum, lump, sizeof(lum));
2487 if (lum.lmm_magic != LOV_USER_MAGIC)
2490 if (lum.lmm_stripe_count < 1)
2493 lmm_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2494 OBD_ALLOC(lmmk, lmm_size);
2498 lmmk->lmm_stripe_count = 1;
2499 lmmk->lmm_object_id = lsm->lsm_object_id;
2500 lmmk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2502 if (copy_to_user(lump, lmmk, lmm_size))
2505 OBD_FREE(lmmk, lmm_size);
2510 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2511 void *karg, void *uarg)
2513 struct obd_device *obd = exp->exp_obd;
2514 struct obd_ioctl_data *data = karg;
2519 case OBD_IOC_LOV_GET_CONFIG: {
2521 struct lov_desc *desc;
2522 struct obd_uuid uuid;
2526 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2527 GOTO(out, err = -EINVAL);
2529 data = (struct obd_ioctl_data *)buf;
2531 if (sizeof(*desc) > data->ioc_inllen1) {
2533 GOTO(out, err = -EINVAL);
2536 if (data->ioc_inllen2 < sizeof(uuid)) {
2538 GOTO(out, err = -EINVAL);
2541 desc = (struct lov_desc *)data->ioc_inlbuf1;
2542 desc->ld_tgt_count = 1;
2543 desc->ld_active_tgt_count = 1;
2544 desc->ld_default_stripe_count = 1;
2545 desc->ld_default_stripe_size = 0;
2546 desc->ld_default_stripe_offset = 0;
2547 desc->ld_pattern = 0;
2548 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2550 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2552 err = copy_to_user((void *)uarg, buf, len);
2555 obd_ioctl_freedata(buf, len);
2558 case LL_IOC_LOV_SETSTRIPE:
2559 err = obd_alloc_memmd(exp, karg);
2563 case LL_IOC_LOV_GETSTRIPE:
2564 err = osc_getstripe(karg, uarg);
2566 case OBD_IOC_CLIENT_RECOVER:
2567 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2572 case IOC_OSC_SET_ACTIVE:
2573 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2577 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2578 GOTO(out, err = -ENOTTY);
2584 static int osc_get_info(struct obd_export *exp, obd_count keylen,
2585 void *key, __u32 *vallen, void *val)
2588 if (!vallen || !val)
2591 if (keylen > strlen("lock_to_stripe") &&
2592 strcmp(key, "lock_to_stripe") == 0) {
2593 __u32 *stripe = val;
2594 *vallen = sizeof(*stripe);
2597 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2598 struct ptlrpc_request *req;
2600 char *bufs[1] = {key};
2602 req = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GET_INFO, 1,
2607 req->rq_replen = lustre_msg_size(1, vallen);
2608 rc = ptlrpc_queue_wait(req);
2612 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2613 lustre_swab_ost_last_id);
2614 if (reply == NULL) {
2615 CERROR("Can't unpack OST last ID\n");
2616 GOTO(out, rc = -EPROTO);
2618 *((obd_id *)val) = *reply;
2620 ptlrpc_req_finished(req);
2626 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2627 void *key, obd_count vallen, void *val)
2629 struct ptlrpc_request *req;
2630 struct obd_import *imp = class_exp2cliimp(exp);
2631 struct llog_ctxt *ctxt;
2632 int rc, size = keylen;
2633 char *bufs[1] = {key};
2636 if (keylen == strlen("next_id") &&
2637 memcmp(key, "next_id", strlen("next_id")) == 0) {
2638 if (vallen != sizeof(obd_id))
2640 exp->u.eu_osc_data.oed_oscc.oscc_next_id = *((obd_id*)val) + 1;
2641 CDEBUG(D_INODE, "%s: set oscc_next_id = "LPU64"\n",
2642 exp->exp_obd->obd_name,
2643 exp->u.eu_osc_data.oed_oscc.oscc_next_id);
2648 if (keylen == strlen("growth_count") &&
2649 memcmp(key, "growth_count", strlen("growth_count")) == 0) {
2650 if (vallen != sizeof(int))
2652 exp->u.eu_osc_data.oed_oscc.oscc_grow_count = *((int*)val);
2656 if (keylen == strlen("unlinked") &&
2657 memcmp(key, "unlinked", keylen) == 0) {
2658 struct osc_creator *oscc = &exp->u.eu_osc_data.oed_oscc;
2659 spin_lock(&oscc->oscc_lock);
2660 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2661 spin_unlock(&oscc->oscc_lock);
2665 if (keylen < strlen("mds_conn") ||
2666 memcmp(key, "mds_conn", strlen("mds_conn")) != 0)
2670 req = ptlrpc_prep_req(imp, OST_SET_INFO, 1, &size, bufs);
2674 req->rq_replen = lustre_msg_size(0, NULL);
2675 rc = ptlrpc_queue_wait(req);
2676 ptlrpc_req_finished(req);
2678 ctxt = llog_get_context(exp->exp_obd, LLOG_UNLINK_ORIG_CTXT);
2680 rc = llog_initiator_connect(ctxt);
2685 imp->imp_server_timeout = 1;
2686 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
2687 ptlrpc_pinger_add_import(imp);
2693 static struct llog_operations osc_size_repl_logops = {
2694 lop_cancel: llog_obd_repl_cancel
2697 static struct llog_operations osc_unlink_orig_logops;
2698 static int osc_llog_init(struct obd_device *obd, struct obd_device *tgt,
2699 int count, struct llog_logid *logid)
2704 osc_unlink_orig_logops = llog_lvfs_ops;
2705 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
2706 osc_unlink_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
2707 osc_unlink_orig_logops.lop_add = llog_obd_origin_add;
2708 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
2710 rc = llog_setup(obd, LLOG_UNLINK_ORIG_CTXT, tgt, count, logid,
2711 &osc_unlink_orig_logops);
2715 rc = llog_setup(obd, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
2716 &osc_size_repl_logops);
2720 static int osc_llog_finish(struct obd_device *obd, int count)
2725 rc = llog_cleanup(llog_get_context(obd, LLOG_UNLINK_ORIG_CTXT));
2729 rc = llog_cleanup(llog_get_context(obd, LLOG_SIZE_REPL_CTXT));
2734 static int osc_connect(struct lustre_handle *exph,
2735 struct obd_device *obd, struct obd_uuid *cluuid)
2738 struct obd_export *exp;
2740 rc = client_connect_import(exph, obd, cluuid);
2742 if (obd->u.cli.cl_conn_count == 1) {
2743 exp = class_conn2export(exph);
2750 static int osc_disconnect(struct obd_export *exp, int flags)
2752 struct obd_device *obd = class_exp2obd(exp);
2753 struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
2756 if (obd->u.cli.cl_conn_count == 1) {
2757 /* flush any remaining cancel messages out to the target */
2758 llog_sync(ctxt, exp);
2760 /* balance the conn2export for oscc in osc_connect */
2761 class_export_put(exp);
2764 rc = client_disconnect_export(exp, flags);
2768 static int osc_lock_contains(struct obd_export *exp, struct lov_stripe_md *lsm,
2769 struct ldlm_lock *lock, obd_off offset)
2775 if (lock->l_policy_data.l_extent.start <= offset &&
2776 lock->l_policy_data.l_extent.end >= offset)
2781 static int osc_invalidate_import(struct obd_device *obd,
2782 struct obd_import *imp)
2784 LASSERT(imp->imp_obd == obd);
2785 /* this used to try and tear down queued pages, but it was
2786 * not correctly implemented. We'll have to do it again once
2787 * we call obd_invalidate_import() agian */
2792 int osc_setup(struct obd_device *obd, obd_count len, void *buf)
2796 rc = ptlrpcd_addref();
2800 rc = client_obd_setup(obd, len, buf);
2806 int osc_cleanup(struct obd_device *obd, int flags)
2810 rc = client_obd_cleanup(obd, flags);
2816 struct obd_ops osc_obd_ops = {
2817 o_owner: THIS_MODULE,
2818 o_attach: osc_attach,
2819 o_detach: osc_detach,
2821 o_cleanup: osc_cleanup,
2822 o_connect: osc_connect,
2823 o_disconnect: osc_disconnect,
2824 o_statfs: osc_statfs,
2825 o_packmd: osc_packmd,
2826 o_unpackmd: osc_unpackmd,
2827 o_create: osc_create,
2828 o_destroy: osc_destroy,
2829 o_getattr: osc_getattr,
2830 o_getattr_async:osc_getattr_async,
2831 o_setattr: osc_setattr,
2833 o_brw_async: osc_brw_async,
2834 .o_prep_async_page = osc_prep_async_page,
2835 .o_queue_async_io = osc_queue_async_io,
2836 .o_set_async_flags = osc_set_async_flags,
2837 .o_queue_sync_io = osc_queue_sync_io,
2838 .o_trigger_sync_io = osc_trigger_sync_io,
2839 .o_teardown_async_page = osc_teardown_async_page,
2842 o_enqueue: osc_enqueue,
2844 o_change_cbdata:osc_change_cbdata,
2845 o_cancel: osc_cancel,
2846 o_cancel_unused:osc_cancel_unused,
2847 o_iocontrol: osc_iocontrol,
2848 o_get_info: osc_get_info,
2849 o_set_info: osc_set_info,
2850 o_lock_contains:osc_lock_contains,
2851 o_invalidate_import: osc_invalidate_import,
2852 o_llog_init: osc_llog_init,
2853 o_llog_finish: osc_llog_finish,
2856 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2857 struct obd_ops sanosc_obd_ops = {
2858 o_owner: THIS_MODULE,
2859 o_attach: osc_attach,
2860 o_detach: osc_detach,
2861 o_cleanup: client_obd_cleanup,
2862 o_connect: osc_connect,
2863 o_disconnect: client_disconnect_export,
2864 o_statfs: osc_statfs,
2865 o_packmd: osc_packmd,
2866 o_unpackmd: osc_unpackmd,
2867 o_create: osc_real_create,
2868 o_destroy: osc_destroy,
2869 o_getattr: osc_getattr,
2870 o_getattr_async:osc_getattr_async,
2871 o_setattr: osc_setattr,
2872 o_setup: client_sanobd_setup,
2876 o_enqueue: osc_enqueue,
2878 o_change_cbdata:osc_change_cbdata,
2879 o_cancel: osc_cancel,
2880 o_cancel_unused:osc_cancel_unused,
2881 o_iocontrol: osc_iocontrol,
2882 o_lock_contains:osc_lock_contains,
2883 o_invalidate_import: osc_invalidate_import,
2884 o_llog_init: osc_llog_init,
2885 o_llog_finish: osc_llog_finish,
2889 int __init osc_init(void)
2891 struct lprocfs_static_vars lvars, sanlvars;
2895 lprocfs_init_vars(osc, &lvars);
2896 lprocfs_init_vars(osc, &sanlvars);
2898 rc = class_register_type(&osc_obd_ops, lvars.module_vars,
2903 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2904 rc = class_register_type(&sanosc_obd_ops, sanlvars.module_vars,
2905 LUSTRE_SANOSC_NAME);
2907 class_unregister_type(LUSTRE_OSC_NAME);
2913 static void /*__exit*/ osc_exit(void)
2915 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2916 class_unregister_type(LUSTRE_SANOSC_NAME);
2918 class_unregister_type(LUSTRE_OSC_NAME);
2922 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2923 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
2924 MODULE_LICENSE("GPL");
2926 module_init(osc_init);
2927 module_exit(osc_exit);