1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_OSC
35 # include <linux/version.h>
36 # include <linux/module.h>
37 # include <linux/mm.h>
38 # include <linux/highmem.h>
39 # include <linux/ctype.h>
40 # include <linux/init.h>
41 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/workqueue.h>
43 # include <linux/smp_lock.h>
45 # include <linux/locks.h>
47 #else /* __KERNEL__ */
48 # include <liblustre.h>
51 #include <linux/lustre_dlm.h>
52 #include <libcfs/kp30.h>
53 #include <linux/lustre_net.h>
54 #include <linux/lustre_sec.h>
55 #include <lustre/lustre_user.h>
56 #include <linux/obd_ost.h>
57 #include <linux/obd_lov.h>
63 #include <linux/lustre_ha.h>
64 #include <linux/lprocfs_status.h>
65 #include <linux/lustre_log.h>
66 #include <linux/lustre_audit.h>
67 #include <linux/lustre_gs.h>
68 #include "osc_internal.h"
70 /* Pack OSC object metadata for disk storage (LE byte order). */
71 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
72 struct lov_stripe_md *lsm)
77 lmm_size = sizeof(**lmmp);
82 OBD_FREE(*lmmp, lmm_size);
88 OBD_ALLOC(*lmmp, lmm_size);
94 LASSERT(lsm->lsm_object_id);
95 LASSERT(lsm->lsm_object_gr);
96 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
97 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
103 /* Unpack OSC object metadata from disk storage (LE byte order). */
104 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
105 struct lov_mds_md *lmm, int lmm_bytes)
111 if (lmm_bytes < sizeof (*lmm)) {
112 CERROR("lov_mds_md too small: %d, need %d\n",
113 lmm_bytes, (int)sizeof(*lmm));
116 /* XXX LOV_MAGIC etc check? */
118 if (lmm->lmm_object_id == 0) {
119 CERROR("lov_mds_md: zero lmm_object_id\n");
124 lsm_size = lov_stripe_md_size(1);
128 if (*lsmp != NULL && lmm == NULL) {
129 OBD_FREE(*lsmp, lsm_size);
135 OBD_ALLOC(*lsmp, lsm_size);
138 loi_init((*lsmp)->lsm_oinfo);
142 /* XXX zero *lsmp? */
143 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
144 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
145 LASSERT((*lsmp)->lsm_object_id);
146 LASSERT((*lsmp)->lsm_object_gr);
149 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
154 static int osc_getattr_interpret(struct ptlrpc_request *req,
155 struct osc_getattr_async_args *aa, int rc)
157 struct ost_body *body;
163 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
165 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
166 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
168 /* This should really be sent by the OST */
169 aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
170 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
172 CERROR("can't unpack ost_body\n");
174 aa->aa_oa->o_valid = 0;
180 static int osc_getattr_async(struct obd_export *exp, struct obdo *oa,
181 struct lov_stripe_md *md,
182 struct ptlrpc_request_set *set)
184 struct ptlrpc_request *request;
185 struct ost_body *body;
186 int size = sizeof(*body);
187 struct osc_getattr_async_args *aa;
190 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
191 OST_GETATTR, 1, &size, NULL);
195 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
196 memcpy(&body->oa, oa, sizeof(*oa));
198 request->rq_replen = lustre_msg_size(1, &size);
199 request->rq_interpret_reply = osc_getattr_interpret;
201 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
202 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
205 ptlrpc_set_add_req (set, request);
209 static int osc_getattr(struct obd_export *exp, struct obdo *oa,
210 struct lov_stripe_md *md)
212 struct ptlrpc_request *request;
213 struct ost_body *body;
214 int rc, size = sizeof(*body);
217 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
218 OST_GETATTR, 1, &size, NULL);
222 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
223 memcpy(&body->oa, oa, sizeof(*oa));
225 request->rq_replen = lustre_msg_size(1, &size);
227 rc = ptlrpc_queue_wait(request);
229 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
233 body = lustre_swab_repbuf(request, 0, sizeof (*body),
234 lustre_swab_ost_body);
236 CERROR ("can't unpack ost_body\n");
237 GOTO (out, rc = -EPROTO);
240 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
241 memcpy(oa, &body->oa, sizeof(*oa));
243 /* This should really be sent by the OST */
244 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
245 oa->o_valid |= OBD_MD_FLBLKSZ;
249 ptlrpc_req_finished(request);
253 static int osc_setattr(struct obd_export *exp, struct obdo *oa,
254 struct lov_stripe_md *md, struct obd_trans_info *oti)
256 struct ptlrpc_request *request;
257 struct ost_body *body;
258 int rc, size = sizeof(*body);
261 LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
263 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
264 OST_SETATTR, 1, &size, NULL);
268 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
269 memcpy(&body->oa, oa, sizeof(*oa));
271 request->rq_replen = lustre_msg_size(1, &size);
273 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
274 ptlrpcd_add_req(request);
277 rc = ptlrpc_queue_wait(request);
281 body = lustre_swab_repbuf(request, 0, sizeof(*body),
282 lustre_swab_ost_body);
284 GOTO(out, rc = -EPROTO);
286 memcpy(oa, &body->oa, sizeof(*oa));
290 ptlrpc_req_finished(request);
294 int osc_real_create(struct obd_export *exp, struct obdo *oa,
295 struct lov_stripe_md **ea, struct obd_trans_info *oti)
297 struct osc_creator *oscc = &exp->exp_obd->u.cli.cl_oscc;
298 struct ptlrpc_request *request;
299 struct ost_body *body;
300 struct lov_stripe_md *lsm;
301 int rc, size = sizeof(*body);
309 rc = obd_alloc_memmd(exp, &lsm);
314 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
315 OST_CREATE, 1, &size, NULL);
317 GOTO(out, rc = -ENOMEM);
319 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
320 memcpy(&body->oa, oa, sizeof(body->oa));
322 request->rq_replen = lustre_msg_size(1, &size);
323 if (oa->o_valid & OBD_MD_FLINLINE) {
324 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
325 oa->o_flags == OBD_FL_DELORPHAN);
326 DEBUG_REQ(D_HA, request,
327 "delorphan from OST integration");
328 /* Don't resend the delorphan request */
329 request->rq_no_resend = request->rq_no_delay = 1;
332 rc = ptlrpc_queue_wait(request);
336 body = lustre_swab_repbuf(request, 0, sizeof(*body),
337 lustre_swab_ost_body);
339 CERROR ("can't unpack ost_body\n");
340 GOTO (out_req, rc = -EPROTO);
343 if ((oa->o_valid & OBD_MD_FLFLAGS) && oa->o_flags == OBD_FL_DELORPHAN) {
344 struct obd_import *imp = class_exp2cliimp(exp);
345 /* MDS declares last known object, OSS responses
346 * with next possible object -bzzz */
347 spin_lock(&oscc->oscc_lock);
348 oscc->oscc_next_id = body->oa.o_id;
349 spin_unlock(&oscc->oscc_lock);
350 CDEBUG(D_HA, "%s: set nextid "LPD64" after recovery\n",
351 imp->imp_target_uuid.uuid, oa->o_id);
353 memcpy(oa, &body->oa, sizeof(*oa));
355 /* This should really be sent by the OST */
356 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
357 oa->o_valid |= OBD_MD_FLBLKSZ;
359 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
360 * have valid lsm_oinfo data structs, so don't go touching that.
361 * This needs to be fixed in a big way.
363 lsm->lsm_object_id = oa->o_id;
364 lsm->lsm_object_gr = oa->o_gr;
368 oti->oti_transno = request->rq_repmsg->transno;
370 if (oa->o_valid & OBD_MD_FLCOOKIE) {
371 if (!oti->oti_logcookies)
372 oti_alloc_cookies(oti, 1);
373 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
374 sizeof(oti->oti_onecookie));
378 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
381 ptlrpc_req_finished(request);
384 obd_free_memmd(exp, &lsm);
388 static int osc_punch(struct obd_export *exp, struct obdo *oa,
389 struct lov_stripe_md *md, obd_size start,
390 obd_size end, struct obd_trans_info *oti)
392 struct ptlrpc_request *request;
393 struct ost_body *body;
394 int rc, size = sizeof(*body);
402 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
403 OST_PUNCH, 1, &size, NULL);
407 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
408 memcpy(&body->oa, oa, sizeof(*oa));
410 /* overload the size and blocks fields in the oa with start/end */
411 body->oa.o_size = start;
412 body->oa.o_blocks = end;
413 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
415 request->rq_replen = lustre_msg_size(1, &size);
417 rc = ptlrpc_queue_wait(request);
421 body = lustre_swab_repbuf (request, 0, sizeof (*body),
422 lustre_swab_ost_body);
424 CERROR ("can't unpack ost_body\n");
425 GOTO (out, rc = -EPROTO);
428 memcpy(oa, &body->oa, sizeof(*oa));
432 ptlrpc_req_finished(request);
436 static int osc_sync(struct obd_export *exp, struct obdo *oa,
437 struct lov_stripe_md *md, obd_size start,
440 struct ptlrpc_request *request;
441 struct ost_body *body;
442 int rc, size = sizeof(*body);
450 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
451 OST_SYNC, 1, &size, NULL);
455 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
456 memcpy(&body->oa, oa, sizeof(*oa));
458 /* overload the size and blocks fields in the oa with start/end */
459 body->oa.o_size = start;
460 body->oa.o_blocks = end;
461 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
463 request->rq_replen = lustre_msg_size(1, &size);
465 rc = ptlrpc_queue_wait(request);
469 body = lustre_swab_repbuf(request, 0, sizeof(*body),
470 lustre_swab_ost_body);
472 CERROR ("can't unpack ost_body\n");
473 GOTO (out, rc = -EPROTO);
476 memcpy(oa, &body->oa, sizeof(*oa));
480 ptlrpc_req_finished(request);
484 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
485 struct lov_stripe_md *ea, struct obd_trans_info *oti)
487 struct ptlrpc_request *request;
488 struct ost_body *body;
489 int rc, size = sizeof(*body);
497 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
498 OST_DESTROY, 1, &size, NULL);
501 request->rq_request_portal = OST_DESTROY_PORTAL;
503 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
505 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) {
506 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
507 sizeof(*oti->oti_logcookies));
508 oti->oti_logcookies++;
511 memcpy(&body->oa, oa, sizeof(*oa));
512 request->rq_replen = lustre_msg_size(1, &size);
514 if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
515 ptlrpcd_add_req(request);
518 rc = ptlrpc_queue_wait(request);
524 ptlrpc_req_finished(request);
528 body = lustre_swab_repbuf(request, 0, sizeof(*body),
529 lustre_swab_ost_body);
531 CERROR ("Can't unpack body\n");
532 ptlrpc_req_finished(request);
536 memcpy(oa, &body->oa, sizeof(*oa));
537 ptlrpc_req_finished(request);
542 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
545 obd_valid bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
547 LASSERT(!(oa->o_valid & bits));
550 spin_lock(&cli->cl_loi_list_lock);
551 oa->o_dirty = cli->cl_dirty;
552 oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
553 oa->o_grant = cli->cl_avail_grant;
554 oa->o_dropped = cli->cl_lost_grant;
555 cli->cl_lost_grant = 0;
556 spin_unlock(&cli->cl_loi_list_lock);
557 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
558 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
561 /* caller must hold loi_list_lock */
562 static void osc_consume_write_grant(struct client_obd *cli,
563 struct osc_async_page *oap)
565 cli->cl_dirty += PAGE_SIZE;
566 cli->cl_avail_grant -= PAGE_SIZE;
567 oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
568 CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
569 LASSERT(cli->cl_avail_grant >= 0);
572 static unsigned long rpcs_in_flight(struct client_obd *cli)
574 return cli->cl_r_in_flight + cli->cl_w_in_flight;
577 /* caller must hold loi_list_lock */
578 void osc_wake_cache_waiters(struct client_obd *cli)
580 struct list_head *l, *tmp;
581 struct osc_cache_waiter *ocw;
583 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
584 /* if we can't dirty more, we must wait until some is written */
585 if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
586 CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
587 cli->cl_dirty, cli->cl_dirty_max);
591 /* if still dirty cache but no grant wait for pending RPCs that
592 * may yet return us some grant before doing sync writes */
593 if (cli->cl_w_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
594 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
595 cli->cl_w_in_flight);
597 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
598 list_del_init(&ocw->ocw_entry);
599 if (cli->cl_avail_grant < PAGE_SIZE) {
600 /* no more RPCs in flight to return grant, do sync IO */
601 ocw->ocw_rc = -EDQUOT;
602 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
604 osc_consume_write_grant(cli, ocw->ocw_oap);
607 wake_up(&ocw->ocw_waitq);
613 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
615 spin_lock(&cli->cl_loi_list_lock);
616 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
617 cli->cl_avail_grant += body->oa.o_grant;
618 /* waiters are woken in brw_interpret_oap */
619 spin_unlock(&cli->cl_loi_list_lock);
622 /* We assume that the reason this OSC got a short read is because it read
623 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
624 * via the LOV, and it _knows_ it's reading inside the file, it's just that
625 * this stripe never got written at or beyond this stripe offset yet. */
626 static void handle_short_read(int nob_read, obd_count page_count,
627 struct brw_page *pga)
631 /* skip bytes read OK */
632 while (nob_read > 0) {
633 LASSERT (page_count > 0);
635 if (pga->count > nob_read) {
636 /* EOF inside this page */
637 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
638 memset(ptr + nob_read, 0, pga->count - nob_read);
645 nob_read -= pga->count;
650 /* zero remaining pages */
651 while (page_count-- > 0) {
652 ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
653 memset(ptr, 0, pga->count);
659 static int check_write_rcs(struct ptlrpc_request *request,
660 int requested_nob, int niocount,
661 obd_count page_count, struct brw_page *pga)
665 /* return error if any niobuf was in error */
666 remote_rcs = lustre_swab_repbuf(request, 1,
667 sizeof(*remote_rcs) * niocount, NULL);
668 if (remote_rcs == NULL) {
669 CERROR("Missing/short RC vector on BRW_WRITE reply\n");
672 if (lustre_msg_swabbed(request->rq_repmsg))
673 for (i = 0; i < niocount; i++)
674 __swab32s((__u32 *)&remote_rcs[i]);
676 for (i = 0; i < niocount; i++) {
677 if (remote_rcs[i] < 0)
678 return(remote_rcs[i]);
680 if (remote_rcs[i] != 0) {
681 CERROR("rc[%d] invalid (%d) req %p\n",
682 i, remote_rcs[i], request);
687 if (request->rq_bulk->bd_nob_transferred != requested_nob) {
688 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
689 requested_nob, request->rq_bulk->bd_nob_transferred);
696 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
698 if (p1->flag != p2->flag) {
699 unsigned mask = ~OBD_BRW_FROM_GRANT;
701 /* warn if we try to combine flags that we don't know to be
703 if ((p1->flag & mask) != (p2->flag & mask))
704 CERROR("is it ok to have flags 0x%x and 0x%x in the "
705 "same brw?\n", p1->flag, p2->flag);
709 return (p1->disk_offset + p1->count == p2->disk_offset);
713 static obd_count cksum_pages(int nob, obd_count page_count,
714 struct brw_page *pga)
720 LASSERT (page_count > 0);
723 ost_checksum(&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
724 pga->count > nob ? nob : pga->count);
736 #define osc_encrypt_page(page, off, count) \
737 osc_crypt_page(page, off, count, ENCRYPT_DATA)
738 #define osc_decrypt_page(page, off, count) \
739 osc_crypt_page(page, off, count, DECRYPT_DATA)
741 /*Put a global call back var here is Ugly, but put it to client_obd
742 *also seems not a good idea, WangDi*/
743 crypt_cb_t osc_crypt_cb = NULL;
745 static int osc_crypt_page(struct page *page, obd_off page_off, obd_off count,
751 if (osc_crypt_cb != NULL)
752 rc = osc_crypt_cb(page, page_off, count, flags);
754 CERROR("crypt page error %d \n", rc);
758 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
759 struct lov_stripe_md *lsm, obd_count page_count,
760 struct brw_page *pga, int *requested_nobp,
761 int *niocountp, struct ptlrpc_request **reqp)
763 struct ptlrpc_request *req;
764 struct ptlrpc_bulk_desc *desc;
765 struct client_obd *cli = &imp->imp_obd->u.cli;
766 struct ost_body *body;
767 struct obd_ioobj *ioobj;
768 struct niobuf_remote *niobuf;
776 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
778 for (niocount = i = 1; i < page_count; i++)
779 if (!can_merge_pages(&pga[i - 1], &pga[i]))
782 size[0] = sizeof(*body);
783 size[1] = sizeof(*ioobj);
784 size[2] = niocount * sizeof(*niobuf);
786 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, opc, 3, size, NULL);
790 if (opc == OST_WRITE)
791 desc = ptlrpc_prep_bulk_imp (req, page_count,
792 BULK_GET_SOURCE, OST_BULK_PORTAL);
794 desc = ptlrpc_prep_bulk_imp (req, page_count,
795 BULK_PUT_SINK, OST_BULK_PORTAL);
797 GOTO(out, rc = -ENOMEM);
798 /* NB request now owns desc and will free it when it gets freed */
800 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
801 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
802 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
804 memcpy(&body->oa, oa, sizeof(*oa));
806 obdo_to_ioobj(oa, ioobj);
807 ioobj->ioo_bufcnt = niocount;
809 LASSERT (page_count > 0);
811 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
812 struct brw_page *pg = &pga[i];
813 struct brw_page *pg_prev = pg - 1;
815 LASSERT(pg->count > 0);
816 LASSERTF((pg->page_offset & ~PAGE_MASK)+ pg->count <= PAGE_SIZE,
817 "i: %d pg: %p pg_off: "LPU64", count: %u\n", i, pg,
818 pg->page_offset, pg->count);
819 LASSERTF(i == 0 || pg->disk_offset > pg_prev->disk_offset,
820 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
821 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
823 pg->pg, pg->pg->private, pg->pg->index, pg->disk_offset,
824 pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
825 pg_prev->disk_offset);
827 if (opc == OST_WRITE) {
828 osc_encrypt_page(pg->pg, pg->page_offset, pg->count);
831 ptlrpc_prep_bulk_page(desc, pg->pg,
832 pg->page_offset & ~PAGE_MASK, pg->count);
833 requested_nob += pg->count;
835 if (i > 0 && can_merge_pages(pg_prev, pg)) {
837 niobuf->len += pg->count;
839 niobuf->offset = pg->disk_offset;
840 niobuf->len = pg->count;
841 niobuf->flags = pg->flag;
845 LASSERT((void *)(niobuf - niocount) ==
846 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
847 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
849 /* size[0] still sizeof (*body) */
850 if (opc == OST_WRITE) {
852 body->oa.o_valid |= OBD_MD_FLCKSUM;
853 body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
855 /* 1 RC per niobuf */
856 size[1] = sizeof(__u32) * niocount;
857 req->rq_replen = lustre_msg_size(2, size);
859 /* 1 RC for the whole I/O */
860 req->rq_replen = lustre_msg_size(1, size);
863 *niocountp = niocount;
864 *requested_nobp = requested_nob;
869 ptlrpc_req_finished (req);
873 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
874 int requested_nob, int niocount,
875 obd_count page_count, struct brw_page *pga,
878 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
879 struct ost_body *body;
885 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
887 CERROR ("Can't unpack body\n");
891 osc_update_grant(cli, body);
892 memcpy(oa, &body->oa, sizeof(*oa));
894 if (req->rq_reqmsg->opc == OST_WRITE) {
896 CERROR ("Unexpected +ve rc %d\n", rc);
899 LASSERT (req->rq_bulk->bd_nob == requested_nob);
900 osc_decrypt_page(pga->pg, pga->page_offset,
902 RETURN(check_write_rcs(req, requested_nob, niocount,
906 if (rc > requested_nob) {
907 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
911 if (rc != req->rq_bulk->bd_nob_transferred) {
912 CERROR ("Unexpected rc %d (%d transferred)\n",
913 rc, req->rq_bulk->bd_nob_transferred);
917 if (rc < requested_nob)
918 handle_short_read(rc, page_count, pga);
921 if (oa->o_valid & OBD_MD_FLCKSUM) {
922 const struct ptlrpc_peer *peer =
923 &req->rq_import->imp_connection->c_peer;
924 static int cksum_counter;
925 obd_count server_cksum = oa->o_cksum;
926 obd_count cksum = cksum_pages(rc, page_count, pga);
927 char str[PTL_NALFMT_SIZE];
929 ptlrpc_peernid2str(peer, str);
932 if (server_cksum != cksum) {
933 CERROR("Bad checksum: server %x, client %x, server NID "
934 LPX64" (%s)\n", server_cksum, cksum,
935 peer->peer_id.nid, str);
938 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
939 CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
940 cksum_counter, peer->peer_id.nid, str, cksum);
943 static int cksum_missed;
946 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
947 CERROR("Request checksum %u from "LPX64", no reply\n",
949 req->rq_import->imp_connection->c_peer.peer_id.nid);
952 osc_decrypt_page(pga->pg, pga->page_offset, pga->count);
956 static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
957 struct lov_stripe_md *lsm,
958 obd_count page_count, struct brw_page *pga)
962 struct ptlrpc_request *request;
967 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
968 page_count, pga, &requested_nob, &niocount,
973 rc = ptlrpc_queue_wait(request);
975 if (rc == -ETIMEDOUT && request->rq_resend) {
976 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
977 ptlrpc_req_finished(request);
981 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
982 page_count, pga, rc);
984 ptlrpc_req_finished(request);
988 static int brw_interpret(struct ptlrpc_request *request,
989 struct osc_brw_async_args *aa, int rc)
991 struct obdo *oa = aa->aa_oa;
992 int requested_nob = aa->aa_requested_nob;
993 int niocount = aa->aa_nio_count;
994 obd_count page_count = aa->aa_page_count;
995 struct brw_page *pga = aa->aa_pga;
998 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
999 page_count, pga, rc);
1003 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1004 struct lov_stripe_md *lsm, obd_count page_count,
1005 struct brw_page *pga, struct ptlrpc_request_set *set)
1007 struct ptlrpc_request *request;
1010 struct osc_brw_async_args *aa;
1014 rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
1015 page_count, pga, &requested_nob, &nio_count,
1018 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1019 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1021 aa->aa_requested_nob = requested_nob;
1022 aa->aa_nio_count = nio_count;
1023 aa->aa_page_count = page_count;
1026 request->rq_interpret_reply = brw_interpret;
1027 ptlrpc_set_add_req(set, request);
1033 #define min_t(type,x,y) \
1034 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
1038 * ugh, we want disk allocation on the target to happen in offset order. we'll
1039 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1040 * fine for our small page arrays and doesn't require allocation. its an
1041 * insertion sort that swaps elements that are strides apart, shrinking the
1042 * stride down until its '1' and the array is sorted.
1044 static void sort_brw_pages(struct brw_page *array, int num)
1047 struct brw_page tmp;
1051 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1056 for (i = stride ; i < num ; i++) {
1059 while (j >= stride && array[j - stride].disk_offset >
1061 array[j] = array[j - stride];
1066 } while (stride > 1);
1069 /* make sure we the regions we're passing to elan don't violate its '4
1070 * fragments' constraint. portal headers are a fragment, all full
1071 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1072 * counts as a fragment. I think. see bug 934. */
1073 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1076 int saw_whole_frag = 0;
1079 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1080 if (pg->count == PAGE_SIZE) {
1081 if (!saw_whole_frag) {
1092 static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
1093 struct lov_stripe_md *lsm, obd_count page_count,
1094 struct brw_page *pga, struct obd_trans_info *oti)
1098 if (cmd == OBD_BRW_CHECK) {
1099 /* The caller just wants to know if there's a chance that this
1100 * I/O can succeed */
1101 struct obd_import *imp = class_exp2cliimp(exp);
1103 if (imp == NULL || imp->imp_invalid)
1108 while (page_count) {
1109 obd_count pages_per_brw;
1112 if (page_count > PTLRPC_MAX_BRW_PAGES)
1113 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1115 pages_per_brw = page_count;
1117 sort_brw_pages(pga, pages_per_brw);
1118 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1120 rc = osc_brw_internal(cmd, exp, oa, lsm, pages_per_brw, pga);
1125 page_count -= pages_per_brw;
1126 pga += pages_per_brw;
1131 static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
1132 struct lov_stripe_md *lsm, obd_count page_count,
1133 struct brw_page *pga, struct ptlrpc_request_set *set,
1134 struct obd_trans_info *oti)
1138 if (cmd == OBD_BRW_CHECK) {
1139 /* The caller just wants to know if there's a chance that this
1140 * I/O can succeed */
1141 struct obd_import *imp = class_exp2cliimp(exp);
1143 if (imp == NULL || imp->imp_invalid)
1148 while (page_count) {
1149 obd_count pages_per_brw;
1152 if (page_count > PTLRPC_MAX_BRW_PAGES)
1153 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
1155 pages_per_brw = page_count;
1157 sort_brw_pages(pga, pages_per_brw);
1158 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1160 rc = async_internal(cmd, exp, oa, lsm, pages_per_brw, pga, set);
1165 page_count -= pages_per_brw;
1166 pga += pages_per_brw;
1171 static void osc_check_rpcs(struct client_obd *cli);
1172 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1174 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
1175 static void lop_update_pending(struct client_obd *cli,
1176 struct loi_oap_pages *lop, int cmd, int delta);
1178 /* this is called when a sync waiter receives an interruption. Its job is to
1179 * get the caller woken as soon as possible. If its page hasn't been put in an
1180 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1181 * desiring interruption which will forcefully complete the rpc once the rpc
1183 static void osc_occ_interrupted(struct oig_callback_context *occ)
1185 struct osc_async_page *oap;
1186 struct loi_oap_pages *lop;
1187 struct lov_oinfo *loi;
1190 /* XXX member_of() */
1191 oap = list_entry(occ, struct osc_async_page, oap_occ);
1193 spin_lock(&oap->oap_cli->cl_loi_list_lock);
1195 oap->oap_interrupted = 1;
1197 /* ok, it's been put in an rpc. */
1198 if (oap->oap_request != NULL) {
1199 ptlrpc_mark_interrupted(oap->oap_request);
1200 ptlrpcd_wake(oap->oap_request);
1204 /* we don't get interruption callbacks until osc_trigger_sync_io()
1205 * has been called and put the sync oaps in the pending/urgent lists.*/
1206 if (!list_empty(&oap->oap_pending_item)) {
1207 list_del_init(&oap->oap_pending_item);
1208 if (oap->oap_async_flags & ASYNC_URGENT)
1209 list_del_init(&oap->oap_urgent_item);
1212 lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
1213 &loi->loi_write_lop : &loi->loi_read_lop;
1214 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1215 loi_list_maint(oap->oap_cli, oap->oap_loi);
1217 oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
1218 oap->oap_oig = NULL;
1222 spin_unlock(&oap->oap_cli->cl_loi_list_lock);
1225 /* this must be called holding the loi list lock to give coverage to exit_cache,
1226 * async_flag maintenance, and oap_request */
1227 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1228 struct osc_async_page *oap, int sent, int rc)
1230 osc_exit_cache(cli, oap, sent);
1231 oap->oap_async_flags = 0;
1232 oap->oap_interrupted = 0;
1234 if (oap->oap_request != NULL) {
1235 ptlrpc_req_finished(oap->oap_request);
1236 oap->oap_request = NULL;
1239 if (rc == 0 && oa != NULL)
1240 oap->oap_loi->loi_blocks = oa->o_blocks;
1243 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
1244 oap->oap_oig = NULL;
1249 oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
1253 static int brw_interpret_oap(struct ptlrpc_request *request,
1254 struct osc_brw_async_args *aa, int rc)
1256 struct osc_async_page *oap;
1257 struct client_obd *cli;
1258 struct list_head *pos, *n;
1262 do_gettimeofday(&now);
1263 rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
1264 aa->aa_nio_count, aa->aa_page_count,
1267 CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
1270 /* in failout recovery we ignore writeback failure and want
1271 * to just tell llite to unlock the page and continue */
1272 if (request->rq_reqmsg->opc == OST_WRITE &&
1273 (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
1274 CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
1276 cli->cl_import ? cli->cl_import->imp_invalid : -1);
1280 spin_lock(&cli->cl_loi_list_lock);
1282 if (request->rq_reqmsg->opc == OST_WRITE)
1283 lprocfs_stime_record(&cli->cl_write_stime, &now,
1284 &request->rq_rpcd_start);
1286 lprocfs_stime_record(&cli->cl_read_stime, &now,
1287 &request->rq_rpcd_start);
1291 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1292 * is called so we know whether to go to sync BRWs or wait for more
1293 * RPCs to complete */
1294 if (request->rq_reqmsg->opc == OST_WRITE)
1295 cli->cl_w_in_flight--;
1297 cli->cl_r_in_flight--;
1299 /* the caller may re-use the oap after the completion call so
1300 * we need to clean it up a little */
1301 list_for_each_safe(pos, n, &aa->aa_oaps) {
1302 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1304 //CDEBUG(D_INODE, "page %p index %lu oap %p\n",
1305 //oap->oap_page, oap->oap_page->index, oap);
1307 list_del_init(&oap->oap_rpc_item);
1308 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
1311 osc_wake_cache_waiters(cli);
1312 osc_check_rpcs(cli);
1313 spin_unlock(&cli->cl_loi_list_lock);
1315 obdo_free(aa->aa_oa);
1316 OBD_FREE(aa->aa_pga, aa->aa_page_count * sizeof(struct brw_page));
1321 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
1322 struct list_head *rpc_list,
1323 int page_count, int cmd)
1325 struct ptlrpc_request *req;
1326 struct brw_page *pga = NULL;
1327 int requested_nob, nio_count;
1328 struct osc_brw_async_args *aa;
1329 struct obdo *oa = NULL;
1330 struct obd_async_page_ops *ops = NULL;
1331 void *caller_data = NULL;
1332 struct list_head *pos;
1335 LASSERT(!list_empty(rpc_list));
1337 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1339 RETURN(ERR_PTR(-ENOMEM));
1343 GOTO(out, req = ERR_PTR(-ENOMEM));
1346 list_for_each(pos, rpc_list) {
1347 struct osc_async_page *oap;
1349 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1351 ops = oap->oap_caller_ops;
1352 caller_data = oap->oap_caller_data;
1354 pga[i].disk_offset = oap->oap_obj_off + oap->oap_page_off;
1355 pga[i].page_offset = pga[i].disk_offset;
1356 pga[i].pg = oap->oap_page;
1357 pga[i].count = oap->oap_count;
1358 pga[i].flag = oap->oap_brw_flags;
1359 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1360 pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
1364 /* always get the data for the obdo for the rpc */
1365 LASSERT(ops != NULL);
1366 ops->ap_fill_obdo(caller_data, cmd, oa);
1368 sort_brw_pages(pga, page_count);
1369 rc = osc_brw_prep_request(cmd, cli->cl_import, oa, NULL, page_count,
1370 pga, &requested_nob, &nio_count, &req);
1372 CERROR("prep_req failed: %d\n", rc);
1373 GOTO(out, req = ERR_PTR(rc));
1376 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1377 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1379 aa->aa_requested_nob = requested_nob;
1380 aa->aa_nio_count = nio_count;
1381 aa->aa_page_count = page_count;
1390 OBD_FREE(pga, sizeof(*pga) * page_count);
1395 static void lop_update_pending(struct client_obd *cli,
1396 struct loi_oap_pages *lop, int cmd, int delta)
1398 lop->lop_num_pending += delta;
1399 if (cmd == OBD_BRW_WRITE)
1400 cli->cl_pending_w_pages += delta;
1402 cli->cl_pending_r_pages += delta;
1405 /* the loi lock is held across this function but it's allowed to release
1406 * and reacquire it during its work */
1407 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
1408 int cmd, struct loi_oap_pages *lop)
1410 struct ptlrpc_request *request;
1411 obd_count page_count = 0;
1412 struct list_head *tmp, *pos;
1413 struct osc_async_page *oap = NULL;
1414 struct osc_brw_async_args *aa;
1415 struct obd_async_page_ops *ops;
1416 LIST_HEAD(rpc_list);
1419 /* first we find the pages we're allowed to work with */
1420 list_for_each_safe(pos, tmp, &lop->lop_pending) {
1421 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
1422 ops = oap->oap_caller_ops;
1424 LASSERT(oap->oap_magic == OAP_MAGIC);
1426 /* in llite being 'ready' equates to the page being locked
1427 * until completion unlocks it. commit_write submits a page
1428 * as not ready because its unlock will happen unconditionally
1429 * as the call returns. if we race with commit_write giving
1430 * us that page we dont' want to create a hole in the page
1431 * stream, so we stop and leave the rpc to be fired by
1432 * another dirtier or kupdated interval (the not ready page
1433 * will still be on the dirty list). we could call in
1434 * at the end of ll_file_write to process the queue again. */
1435 if (!(oap->oap_async_flags & ASYNC_READY)) {
1436 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
1438 CDEBUG(D_INODE, "oap %p page %p returned %d "
1439 "instead of ready\n", oap,
1443 /* llite is telling us that the page is still
1444 * in commit_write and that we should try
1445 * and put it in an rpc again later. we
1446 * break out of the loop so we don't create
1447 * a hole in the sequence of pages in the rpc
1452 /* the io isn't needed.. tell the checks
1453 * below to complete the rpc with EINTR */
1454 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1455 oap->oap_count = -EINTR;
1458 oap->oap_async_flags |= ASYNC_READY;
1461 LASSERTF(0, "oap %p page %p returned %d "
1462 "from make_ready\n", oap,
1470 * Page submitted for IO has to be locked. Either by
1471 * ->ap_make_ready() or by higher layers.
1473 * XXX nikita: this assertion should be adjusted when lustre
1474 * starts using PG_writeback for pages being written out.
1476 LASSERT(PageLocked(oap->oap_page));
1478 /* take the page out of our book-keeping */
1479 list_del_init(&oap->oap_pending_item);
1480 lop_update_pending(cli, lop, cmd, -1);
1481 list_del_init(&oap->oap_urgent_item);
1483 /* ask the caller for the size of the io as the rpc leaves. */
1484 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
1486 ops->ap_refresh_count(oap->oap_caller_data,cmd);
1487 if (oap->oap_count <= 0) {
1488 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
1490 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
1494 /* now put the page back in our accounting */
1495 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1496 if (++page_count >= cli->cl_max_pages_per_rpc)
1500 osc_wake_cache_waiters(cli);
1502 if (page_count == 0)
1505 loi_list_maint(cli, loi);
1506 spin_unlock(&cli->cl_loi_list_lock);
1508 request = osc_build_req(cli, &rpc_list, page_count, cmd);
1509 if (IS_ERR(request)) {
1510 /* this should happen rarely and is pretty bad, it makes the
1511 * pending list not follow the dirty order */
1512 spin_lock(&cli->cl_loi_list_lock);
1513 list_for_each_safe(pos, tmp, &rpc_list) {
1514 oap = list_entry(pos, struct osc_async_page,
1516 list_del_init(&oap->oap_rpc_item);
1518 /* queued sync pages can be torn down while the pages
1519 * were between the pending list and the rpc */
1520 if (oap->oap_interrupted) {
1521 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
1522 osc_ap_completion(cli, NULL, oap, 0,
1527 /* put the page back in the loi/lop lists */
1528 list_add_tail(&oap->oap_pending_item,
1530 lop_update_pending(cli, lop, cmd, 1);
1531 if (oap->oap_async_flags & ASYNC_URGENT)
1532 list_add(&oap->oap_urgent_item,
1535 loi_list_maint(cli, loi);
1536 RETURN(PTR_ERR(request));
1539 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1540 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1541 INIT_LIST_HEAD(&aa->aa_oaps);
1542 list_splice(&rpc_list, &aa->aa_oaps);
1543 INIT_LIST_HEAD(&rpc_list);
1546 if (cmd == OBD_BRW_READ) {
1547 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1548 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1550 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1551 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1552 cli->cl_w_in_flight);
1556 spin_lock(&cli->cl_loi_list_lock);
1558 if (cmd == OBD_BRW_READ)
1559 cli->cl_r_in_flight++;
1561 cli->cl_w_in_flight++;
1562 /* queued sync pages can be torn down while the pages
1563 * were between the pending list and the rpc */
1564 list_for_each(pos, &aa->aa_oaps) {
1565 oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
1566 if (oap->oap_interrupted) {
1567 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1569 ptlrpc_mark_interrupted(request);
1574 CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %dr/%dw in flight\n",
1575 request, page_count, aa, cli->cl_r_in_flight,
1576 cli->cl_w_in_flight);
1578 oap->oap_request = ptlrpc_request_addref(request);
1579 request->rq_interpret_reply = brw_interpret_oap;
1580 ptlrpcd_add_req(request);
1584 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1590 if (lop->lop_num_pending == 0)
1593 /* if we have an invalid import we want to drain the queued pages
1594 * by forcing them through rpcs that immediately fail and complete
1595 * the pages. recovery relies on this to empty the queued pages
1596 * before canceling the locks and evicting down the llite pages */
1597 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1600 /* stream rpcs in queue order as long as as there is an urgent page
1601 * queued. this is our cheap solution for good batching in the case
1602 * where writepage marks some random page in the middle of the file as
1603 * urgent because of, say, memory pressure */
1604 if (!list_empty(&lop->lop_urgent))
1607 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1608 optimal = cli->cl_max_pages_per_rpc;
1609 if (cmd == OBD_BRW_WRITE) {
1610 /* trigger a write rpc stream as long as there are dirtiers
1611 * waiting for space. as they're waiting, they're not going to
1612 * create more pages to coallesce with what's waiting.. */
1613 if (!list_empty(&cli->cl_cache_waiters))
1616 /* *2 to avoid triggering rpcs that would want to include pages
1617 * that are being queued but which can't be made ready until
1618 * the queuer finishes with the page. this is a wart for
1619 * llite::commit_write() */
1622 if (lop->lop_num_pending >= optimal)
1628 static void on_list(struct list_head *item, struct list_head *list,
1631 if (list_empty(item) && should_be_on)
1632 list_add_tail(item, list);
1633 else if (!list_empty(item) && !should_be_on)
1634 list_del_init(item);
1637 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1638 * can find pages to build into rpcs quickly */
1639 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1641 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1642 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1643 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1645 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1646 loi->loi_write_lop.lop_num_pending);
1648 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1649 loi->loi_read_lop.lop_num_pending);
1652 #define LOI_DEBUG(LOI, STR, args...) \
1653 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
1654 !list_empty(&(LOI)->loi_cli_item), \
1655 (LOI)->loi_write_lop.lop_num_pending, \
1656 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
1657 (LOI)->loi_read_lop.lop_num_pending, \
1658 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
1661 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
1664 /* first return all objects which we already know to have
1665 * pages ready to be stuffed into rpcs */
1666 if (!list_empty(&cli->cl_loi_ready_list))
1667 RETURN(list_entry(cli->cl_loi_ready_list.next,
1668 struct lov_oinfo, loi_cli_item));
1670 /* then if we have cache waiters, return all objects with queued
1671 * writes. This is especially important when many small files
1672 * have filled up the cache and not been fired into rpcs because
1673 * they don't pass the nr_pending/object threshhold */
1674 if (!list_empty(&cli->cl_cache_waiters) &&
1675 !list_empty(&cli->cl_loi_write_list))
1676 RETURN(list_entry(cli->cl_loi_write_list.next,
1677 struct lov_oinfo, loi_write_item));
1679 /* then return all queued objects when we have an invalid import
1680 * so that they get flushed */
1681 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
1682 if (!list_empty(&cli->cl_loi_write_list))
1683 RETURN(list_entry(cli->cl_loi_write_list.next,
1684 struct lov_oinfo, loi_write_item));
1685 if (!list_empty(&cli->cl_loi_read_list))
1686 RETURN(list_entry(cli->cl_loi_read_list.next,
1687 struct lov_oinfo, loi_read_item));
1692 /* called with the loi list lock held */
1693 static void osc_check_rpcs(struct client_obd *cli)
1695 struct lov_oinfo *loi;
1696 int rc = 0, race_counter = 0;
1699 while ((loi = osc_next_loi(cli)) != NULL) {
1700 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
1702 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
1705 /* attempt some read/write balancing by alternating between
1706 * reads and writes in an object. The makes_rpc checks here
1707 * would be redundant if we were getting read/write work items
1708 * instead of objects. we don't want send_oap_rpc to drain a
1709 * partial read pending queue when we're given this object to
1710 * do io on writes while there are cache waiters */
1711 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
1712 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
1713 &loi->loi_write_lop);
1721 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
1722 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
1723 &loi->loi_read_lop);
1732 /* attempt some inter-object balancing by issueing rpcs
1733 * for each object in turn */
1734 if (!list_empty(&loi->loi_cli_item))
1735 list_del_init(&loi->loi_cli_item);
1736 if (!list_empty(&loi->loi_write_item))
1737 list_del_init(&loi->loi_write_item);
1738 if (!list_empty(&loi->loi_read_item))
1739 list_del_init(&loi->loi_read_item);
1741 loi_list_maint(cli, loi);
1743 /* send_oap_rpc fails with 0 when make_ready tells it to
1744 * back off. llite's make_ready does this when it tries
1745 * to lock a page queued for write that is already locked.
1746 * we want to try sending rpcs from many objects, but we
1747 * don't want to spin failing with 0. */
1748 if (race_counter == 10)
1754 /* we're trying to queue a page in the osc so we're subject to the
1755 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
1756 * If the osc's queued pages are already at that limit, then we want to sleep
1757 * until there is space in the osc's queue for us. We also may be waiting for
1758 * write credits from the OST if there are RPCs in flight that may return some
1759 * before we fall back to sync writes.
1761 * We need this know our allocation was granted in the presence of signals */
1762 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1766 spin_lock(&cli->cl_loi_list_lock);
1767 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
1768 spin_unlock(&cli->cl_loi_list_lock);
1772 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
1773 * grant or cache space. */
1774 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
1775 struct osc_async_page *oap)
1777 struct osc_cache_waiter ocw;
1778 struct l_wait_info lwi = { 0 };
1779 struct timeval start, stop;
1781 CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
1782 cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
1783 cli->cl_avail_grant);
1785 if (cli->cl_dirty_max < PAGE_SIZE)
1788 /* Hopefully normal case - cache space and write credits available */
1789 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1790 cli->cl_avail_grant >= PAGE_SIZE) {
1791 /* account for ourselves */
1792 osc_consume_write_grant(cli, oap);
1796 /* Make sure that there are write rpcs in flight to wait for. This
1797 * is a little silly as this object may not have any pending but
1798 * other objects sure might. */
1799 if (cli->cl_w_in_flight) {
1800 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1801 init_waitqueue_head(&ocw.ocw_waitq);
1805 loi_list_maint(cli, loi);
1806 osc_check_rpcs(cli);
1807 spin_unlock(&cli->cl_loi_list_lock);
1809 CDEBUG(0, "sleeping for cache space\n");
1810 do_gettimeofday(&start);
1811 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1812 do_gettimeofday(&stop);
1813 spin_lock(&cli->cl_loi_list_lock);
1814 lprocfs_stime_record(&cli->cl_enter_stime, &stop, &start);
1815 if (!list_empty(&ocw.ocw_entry)) {
1816 list_del(&ocw.ocw_entry);
1825 /* the companion to enter_cache, called when an oap is no longer part of the
1826 * dirty accounting.. so writeback completes or truncate happens before writing
1827 * starts. must be called with the loi lock held. */
1828 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1833 if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
1838 oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
1839 cli->cl_dirty -= PAGE_SIZE;
1841 cli->cl_lost_grant += PAGE_SIZE;
1842 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
1843 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
1849 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
1850 struct lov_oinfo *loi, struct page *page,
1851 obd_off offset, struct obd_async_page_ops *ops,
1852 void *data, void **res)
1854 struct osc_async_page *oap;
1857 OBD_ALLOC(oap, sizeof(*oap));
1861 oap->oap_magic = OAP_MAGIC;
1862 oap->oap_cli = &exp->exp_obd->u.cli;
1865 oap->oap_caller_ops = ops;
1866 oap->oap_caller_data = data;
1868 oap->oap_page = page;
1869 oap->oap_obj_off = offset;
1871 INIT_LIST_HEAD(&oap->oap_pending_item);
1872 INIT_LIST_HEAD(&oap->oap_urgent_item);
1873 INIT_LIST_HEAD(&oap->oap_rpc_item);
1875 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
1877 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
1882 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1883 struct lov_oinfo *loi, void *cookie,
1884 int cmd, obd_off off, int count,
1885 obd_flags brw_flags, enum async_flags async_flags)
1887 struct client_obd *cli = &exp->exp_obd->u.cli;
1888 struct osc_async_page *oap;
1889 struct loi_oap_pages *lop;
1893 oap = OAP_FROM_COOKIE(cookie);
1895 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1898 if (!list_empty(&oap->oap_pending_item) ||
1899 !list_empty(&oap->oap_urgent_item) ||
1900 !list_empty(&oap->oap_rpc_item))
1904 loi = &lsm->lsm_oinfo[0];
1906 spin_lock(&cli->cl_loi_list_lock);
1909 oap->oap_async_flags = async_flags;
1910 oap->oap_page_off = off;
1911 oap->oap_count = count;
1912 oap->oap_brw_flags = brw_flags;
1914 if (cmd == OBD_BRW_WRITE) {
1915 rc = osc_enter_cache(cli, loi, oap);
1917 spin_unlock(&cli->cl_loi_list_lock);
1920 lop = &loi->loi_write_lop;
1922 lop = &loi->loi_read_lop;
1925 if (oap->oap_async_flags & ASYNC_URGENT)
1926 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1927 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1928 lop_update_pending(cli, lop, cmd, 1);
1930 loi_list_maint(cli, loi);
1932 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
1935 osc_check_rpcs(cli);
1936 spin_unlock(&cli->cl_loi_list_lock);
1941 /* aka (~was & now & flag), but this is more clear :) */
1942 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
1944 static int osc_set_async_flags(struct obd_export *exp,
1945 struct lov_stripe_md *lsm,
1946 struct lov_oinfo *loi, void *cookie,
1947 obd_flags async_flags)
1949 struct client_obd *cli = &exp->exp_obd->u.cli;
1950 struct loi_oap_pages *lop;
1951 struct osc_async_page *oap;
1955 oap = OAP_FROM_COOKIE(cookie);
1957 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1961 loi = &lsm->lsm_oinfo[0];
1963 if (oap->oap_cmd == OBD_BRW_WRITE) {
1964 lop = &loi->loi_write_lop;
1966 lop = &loi->loi_read_lop;
1969 spin_lock(&cli->cl_loi_list_lock);
1971 if (list_empty(&oap->oap_pending_item))
1972 GOTO(out, rc = -EINVAL);
1974 if ((oap->oap_async_flags & async_flags) == async_flags)
1977 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
1978 oap->oap_async_flags |= ASYNC_READY;
1980 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
1981 if (list_empty(&oap->oap_rpc_item)) {
1982 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1983 loi_list_maint(cli, loi);
1987 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
1988 oap->oap_async_flags);
1990 osc_check_rpcs(cli);
1991 spin_unlock(&cli->cl_loi_list_lock);
1995 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
1996 struct lov_oinfo *loi,
1997 struct obd_io_group *oig, void *cookie,
1998 int cmd, obd_off off, int count,
1999 obd_flags brw_flags,
2000 obd_flags async_flags)
2002 struct client_obd *cli = &exp->exp_obd->u.cli;
2003 struct osc_async_page *oap;
2004 struct loi_oap_pages *lop;
2007 oap = OAP_FROM_COOKIE(cookie);
2009 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2012 if (!list_empty(&oap->oap_pending_item) ||
2013 !list_empty(&oap->oap_urgent_item) ||
2014 !list_empty(&oap->oap_rpc_item))
2018 loi = &lsm->lsm_oinfo[0];
2020 spin_lock(&cli->cl_loi_list_lock);
2023 oap->oap_page_off = off;
2024 oap->oap_count = count;
2025 oap->oap_brw_flags = brw_flags;
2026 oap->oap_async_flags = async_flags;
2028 if (cmd == OBD_BRW_WRITE)
2029 lop = &loi->loi_write_lop;
2031 lop = &loi->loi_read_lop;
2033 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
2034 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
2036 oig_add_one(oig, &oap->oap_occ);
2039 LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
2041 spin_unlock(&cli->cl_loi_list_lock);
2046 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
2047 struct loi_oap_pages *lop, int cmd)
2049 struct list_head *pos, *tmp;
2050 struct osc_async_page *oap;
2052 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2053 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2054 list_del(&oap->oap_pending_item);
2055 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2056 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2057 lop_update_pending(cli, lop, cmd, 1);
2059 loi_list_maint(cli, loi);
2062 static int osc_trigger_group_io(struct obd_export *exp,
2063 struct lov_stripe_md *lsm,
2064 struct lov_oinfo *loi,
2065 struct obd_io_group *oig)
2067 struct client_obd *cli = &exp->exp_obd->u.cli;
2071 loi = &lsm->lsm_oinfo[0];
2073 spin_lock(&cli->cl_loi_list_lock);
2075 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2076 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2078 osc_check_rpcs(cli);
2079 spin_unlock(&cli->cl_loi_list_lock);
2084 static int osc_teardown_async_page(struct obd_export *exp,
2085 struct lov_stripe_md *lsm,
2086 struct lov_oinfo *loi, void *cookie)
2088 struct client_obd *cli = &exp->exp_obd->u.cli;
2089 struct loi_oap_pages *lop;
2090 struct osc_async_page *oap;
2094 oap = OAP_FROM_COOKIE(cookie);
2097 loi = &lsm->lsm_oinfo[0];
2099 if (oap->oap_cmd == OBD_BRW_WRITE) {
2100 lop = &loi->loi_write_lop;
2102 lop = &loi->loi_read_lop;
2105 spin_lock(&cli->cl_loi_list_lock);
2107 if (!list_empty(&oap->oap_rpc_item))
2108 GOTO(out, rc = -EBUSY);
2110 osc_exit_cache(cli, oap, 0);
2111 osc_wake_cache_waiters(cli);
2113 if (!list_empty(&oap->oap_urgent_item)) {
2114 list_del_init(&oap->oap_urgent_item);
2115 oap->oap_async_flags &= ~ASYNC_URGENT;
2117 if (!list_empty(&oap->oap_pending_item)) {
2118 list_del_init(&oap->oap_pending_item);
2119 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2121 loi_list_maint(cli, loi);
2123 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2125 spin_unlock(&cli->cl_loi_list_lock);
2127 OBD_FREE(oap, sizeof(*oap));
2132 /* Note: caller will lock/unlock, and set uptodate on the pages */
2133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2134 static int sanosc_brw_read(struct obd_export *exp, struct obdo *oa,
2135 struct lov_stripe_md *lsm, obd_count page_count,
2136 struct brw_page *pga)
2138 struct ptlrpc_request *request = NULL;
2139 struct ost_body *body;
2140 struct niobuf_remote *nioptr;
2141 struct obd_ioobj *iooptr;
2142 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2146 /* XXX does not handle 'new' brw protocol */
2148 size[1] = sizeof(struct obd_ioobj);
2149 size[2] = page_count * sizeof(*nioptr);
2151 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2152 OST_SAN_READ, 3, size, NULL);
2156 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
2157 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
2158 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2159 sizeof(*nioptr) * page_count);
2161 memcpy(&body->oa, oa, sizeof(body->oa));
2163 obdo_to_ioobj(oa, iooptr);
2164 iooptr->ioo_bufcnt = page_count;
2166 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2167 LASSERT(PageLocked(pga[mapped].pg));
2168 LASSERT(mapped == 0 ||
2169 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2171 nioptr->offset = pga[mapped].disk_offset;
2172 nioptr->len = pga[mapped].count;
2173 nioptr->flags = pga[mapped].flag;
2176 size[1] = page_count * sizeof(*nioptr);
2177 request->rq_replen = lustre_msg_size(2, size);
2179 rc = ptlrpc_queue_wait(request);
2183 body = lustre_swab_repbuf(request, 0, sizeof(*body),
2184 lustre_swab_ost_body);
2186 CERROR("Can't unpack body\n");
2187 GOTO(out_req, rc = -EPROTO);
2190 memcpy(oa, &body->oa, sizeof(*oa));
2192 swab = lustre_msg_swabbed(request->rq_repmsg);
2193 LASSERT_REPSWAB(request, 1);
2194 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2196 /* nioptr missing or short */
2197 GOTO(out_req, rc = -EPROTO);
2201 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2202 struct page *page = pga[mapped].pg;
2203 struct buffer_head *bh;
2207 lustre_swab_niobuf_remote (nioptr);
2209 /* got san device associated */
2210 LASSERT(exp->exp_obd != NULL);
2211 dev = exp->exp_obd->u.cli.cl_sandev;
2214 if (!nioptr->offset) {
2215 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
2216 page->mapping->host->i_ino,
2218 memset(page_address(page), 0, PAGE_SIZE);
2222 if (!page->buffers) {
2223 create_empty_buffers(page, dev, PAGE_SIZE);
2226 clear_bit(BH_New, &bh->b_state);
2227 set_bit(BH_Mapped, &bh->b_state);
2228 bh->b_blocknr = (unsigned long)nioptr->offset;
2230 clear_bit(BH_Uptodate, &bh->b_state);
2232 ll_rw_block(READ, 1, &bh);
2236 /* if buffer already existed, it must be the
2237 * one we mapped before, check it */
2238 LASSERT(!test_bit(BH_New, &bh->b_state));
2239 LASSERT(test_bit(BH_Mapped, &bh->b_state));
2240 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
2242 /* wait it's io completion */
2243 if (test_bit(BH_Lock, &bh->b_state))
2246 if (!test_bit(BH_Uptodate, &bh->b_state))
2247 ll_rw_block(READ, 1, &bh);
2251 /* must do syncronous write here */
2253 if (!buffer_uptodate(bh)) {
2261 ptlrpc_req_finished(request);
2265 static int sanosc_brw_write(struct obd_export *exp, struct obdo *oa,
2266 struct lov_stripe_md *lsm, obd_count page_count,
2267 struct brw_page *pga)
2269 struct ptlrpc_request *request = NULL;
2270 struct ost_body *body;
2271 struct niobuf_remote *nioptr;
2272 struct obd_ioobj *iooptr;
2273 int rc, size[3] = {sizeof(*body)}, mapped = 0;
2277 size[1] = sizeof(struct obd_ioobj);
2278 size[2] = page_count * sizeof(*nioptr);
2280 request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2281 OST_SAN_WRITE, 3, size, NULL);
2285 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
2286 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
2287 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
2288 sizeof (*nioptr) * page_count);
2290 memcpy(&body->oa, oa, sizeof(body->oa));
2292 obdo_to_ioobj(oa, iooptr);
2293 iooptr->ioo_bufcnt = page_count;
2296 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2297 LASSERT(PageLocked(pga[mapped].pg));
2298 LASSERT(mapped == 0 ||
2299 pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
2301 nioptr->offset = pga[mapped].disk_offset;
2302 nioptr->len = pga[mapped].count;
2303 nioptr->flags = pga[mapped].flag;
2306 size[1] = page_count * sizeof(*nioptr);
2307 request->rq_replen = lustre_msg_size(2, size);
2309 rc = ptlrpc_queue_wait(request);
2313 swab = lustre_msg_swabbed (request->rq_repmsg);
2314 LASSERT_REPSWAB (request, 1);
2315 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
2317 CERROR("absent/short niobuf array\n");
2318 GOTO(out_req, rc = -EPROTO);
2322 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
2323 struct page *page = pga[mapped].pg;
2324 struct buffer_head *bh;
2328 lustre_swab_niobuf_remote (nioptr);
2330 /* got san device associated */
2331 LASSERT(exp->exp_obd != NULL);
2332 dev = exp->exp_obd->u.cli.cl_sandev;
2334 if (!page->buffers) {
2335 create_empty_buffers(page, dev, PAGE_SIZE);
2338 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
2339 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
2340 LASSERT(page->buffers->b_blocknr ==
2341 (unsigned long)nioptr->offset);
2347 /* if buffer locked, wait it's io completion */
2348 if (test_bit(BH_Lock, &bh->b_state))
2351 clear_bit(BH_New, &bh->b_state);
2352 set_bit(BH_Mapped, &bh->b_state);
2354 /* override the block nr */
2355 bh->b_blocknr = (unsigned long)nioptr->offset;
2357 /* we are about to write it, so set it
2359 * page lock should garentee no race condition here */
2360 set_bit(BH_Uptodate, &bh->b_state);
2361 set_bit(BH_Dirty, &bh->b_state);
2363 ll_rw_block(WRITE, 1, &bh);
2365 /* must do syncronous write here */
2367 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
2375 ptlrpc_req_finished(request);
2379 static int sanosc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
2380 struct lov_stripe_md *lsm, obd_count page_count,
2381 struct brw_page *pga, struct obd_trans_info *oti)
2385 while (page_count) {
2386 obd_count pages_per_brw;
2389 if (page_count > PTLRPC_MAX_BRW_PAGES)
2390 pages_per_brw = PTLRPC_MAX_BRW_PAGES;
2392 pages_per_brw = page_count;
2394 if (cmd & OBD_BRW_WRITE)
2395 rc = sanosc_brw_write(exp, oa, lsm, pages_per_brw,pga);
2397 rc = sanosc_brw_read(exp, oa, lsm, pages_per_brw, pga);
2402 page_count -= pages_per_brw;
2403 pga += pages_per_brw;
2410 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data)
2412 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2415 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
2419 lock_res_and_lock(lock);
2421 if (lock->l_ast_data && lock->l_ast_data != data) {
2422 struct inode *new_inode = data;
2423 struct inode *old_inode = lock->l_ast_data;
2424 if (!(old_inode->i_state & I_FREEING))
2425 LDLM_ERROR(lock, "inconsistent l_ast_data found");
2426 LASSERTF(old_inode->i_state & I_FREEING,
2427 "Found existing inode %p/%lu/%u state %lu in lock: "
2428 "setting data to %p/%lu/%u\n", old_inode,
2429 old_inode->i_ino, old_inode->i_generation,
2431 new_inode, new_inode->i_ino, new_inode->i_generation);
2434 lock->l_ast_data = data;
2435 unlock_res_and_lock(lock);
2436 LDLM_LOCK_PUT(lock);
2439 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2440 ldlm_iterator_t replace, void *data)
2442 struct ldlm_res_id res_id = { .name = {0} };
2443 struct obd_device *obd = class_exp2obd(exp);
2445 res_id.name[0] = lsm->lsm_object_id;
2446 res_id.name[2] = lsm->lsm_object_gr;
2447 ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
2451 static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
2452 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2453 int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
2454 void *data, __u32 lvb_len, void *lvb_swabber,
2455 struct lustre_handle *lockh)
2457 struct obd_device *obd = exp->exp_obd;
2458 struct ldlm_res_id res_id = { .name = {0} };
2460 struct ldlm_reply *rep;
2461 struct ptlrpc_request *req = NULL;
2465 res_id.name[0] = lsm->lsm_object_id;
2466 res_id.name[2] = lsm->lsm_object_gr;
2468 /* Filesystem lock extents are extended to page boundaries so that
2469 * dealing with the page cache is a little smoother. */
2470 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2471 policy->l_extent.end |= ~PAGE_MASK;
2473 if (lsm->lsm_oinfo->loi_kms_valid == 0)
2476 /* Next, search for already existing extent locks that will cover us */
2477 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
2480 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2481 /* return immediately if no credential held */
2482 ldlm_lock_decref(lockh, mode);
2486 osc_set_data_with_check(lockh, data);
2487 if (*flags & LDLM_FL_HAS_INTENT) {
2488 /* I would like to be able to ASSERT here that rss <=
2489 * kms, but I can't, for reasons which are explained in
2492 /* We already have a lock, and it's referenced */
2496 /* If we're trying to read, we also search for an existing PW lock. The
2497 * VFS and page cache already protect us locally, so lots of readers/
2498 * writers can share a single PW lock.
2500 * There are problems with conversion deadlocks, so instead of
2501 * converting a read lock to a write lock, we'll just enqueue a new
2504 * At some point we should cancel the read lock instead of making them
2505 * send us a blocking callback, but there are problems with canceling
2506 * locks out from other users right now, too. */
2508 if (mode == LCK_PR) {
2509 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2510 policy, LCK_PW, lockh);
2512 if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
2513 /* return immediately if no credential held */
2514 ldlm_lock_decref(lockh, LCK_PW);
2518 /* FIXME: This is not incredibly elegant, but it might
2519 * be more elegant than adding another parameter to
2520 * lock_match. I want a second opinion. */
2521 ldlm_lock_addref(lockh, LCK_PR);
2522 ldlm_lock_decref(lockh, LCK_PW);
2523 osc_set_data_with_check(lockh, data);
2527 if (mode == LCK_PW) {
2528 rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
2529 policy, LCK_PR, lockh);
2531 rc = ldlm_cli_convert(lockh, mode, flags);
2533 /* Update readers/writers accounting */
2534 ldlm_lock_addref(lockh, LCK_PW);
2535 ldlm_lock_decref(lockh, LCK_PR);
2536 osc_set_data_with_check(lockh, data);
2539 /* If the conversion failed, we need to drop refcount
2540 on matched lock before we get new one */
2541 /* XXX Won't it save us some efforts if we cancel PR
2542 lock here? We are going to take PW lock anyway and it
2543 will invalidate PR lock */
2544 ldlm_lock_decref(lockh, LCK_PR);
2545 if (rc != EDEADLOCK) {
2552 if (*flags & LDLM_FL_HAS_INTENT) {
2553 int size[2] = {0, sizeof(struct ldlm_request)};
2555 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
2556 LDLM_ENQUEUE, 2, size, NULL);
2560 size[0] = sizeof(*rep);
2561 size[1] = sizeof(lvb);
2562 req->rq_replen = lustre_msg_size(2, size);
2564 rc = ldlm_cli_enqueue(exp, req, obd->obd_namespace, res_id, type,
2565 policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
2566 &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
2568 if (rc == ELDLM_LOCK_ABORTED) {
2569 /* swabbed by ldlm_cli_enqueue() */
2570 LASSERT_REPSWABBED(req, 0);
2571 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
2572 LASSERT(rep != NULL);
2573 if (rep->lock_policy_res1)
2574 rc = rep->lock_policy_res1;
2576 ptlrpc_req_finished(req);
2579 if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
2580 CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
2581 lvb.lvb_size, lvb.lvb_blocks);
2582 lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
2583 lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
2589 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
2590 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2591 int *flags, void *data, struct lustre_handle *lockh)
2593 struct ldlm_res_id res_id = { .name = {0} };
2594 struct obd_device *obd = exp->exp_obd;
2598 res_id.name[0] = lsm->lsm_object_id;
2599 res_id.name[2] = lsm->lsm_object_gr;
2601 OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
2603 /* Filesystem lock extents are extended to page boundaries so that
2604 * dealing with the page cache is a little smoother */
2605 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2606 policy->l_extent.end |= ~PAGE_MASK;
2608 /* Next, search for already existing extent locks that will cover us */
2609 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2610 policy, mode, lockh);
2612 // if (!(*flags & LDLM_FL_TEST_LOCK))
2613 osc_set_data_with_check(lockh, data);
2616 /* If we're trying to read, we also search for an existing PW lock. The
2617 * VFS and page cache already protect us locally, so lots of readers/
2618 * writers can share a single PW lock. */
2619 if (mode == LCK_PR) {
2620 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
2621 policy, LCK_PW, lockh);
2622 if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
2623 /* FIXME: This is not incredibly elegant, but it might
2624 * be more elegant than adding another parameter to
2625 * lock_match. I want a second opinion. */
2626 osc_set_data_with_check(lockh, data);
2627 ldlm_lock_addref(lockh, LCK_PR);
2628 ldlm_lock_decref(lockh, LCK_PW);
2634 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
2635 __u32 mode, struct lustre_handle *lockh)
2639 if (mode == LCK_GROUP)
2640 ldlm_lock_decref_and_cancel(lockh, mode);
2642 ldlm_lock_decref(lockh, mode);
2647 static int osc_cancel_unused(struct obd_export *exp,
2648 struct lov_stripe_md *lsm,
2649 int flags, void *opaque)
2651 struct obd_device *obd = class_exp2obd(exp);
2652 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
2655 res_id.name[0] = lsm->lsm_object_id;
2656 res_id.name[2] = lsm->lsm_object_gr;
2660 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
2663 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
2664 unsigned long max_age)
2666 struct obd_statfs *msfs;
2667 struct ptlrpc_request *request;
2668 int rc, size = sizeof(*osfs);
2671 /* We could possibly pass max_age in the request (as an absolute
2672 * timestamp or a "seconds.usec ago") so the target can avoid doing
2673 * extra calls into the filesystem if that isn't necessary (e.g.
2674 * during mount that would help a bit). Having relative timestamps
2675 * is not so great if request processing is slow, while absolute
2676 * timestamps are not ideal because they need time synchronization. */
2677 request = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
2678 OST_STATFS, 0, NULL, NULL);
2682 request->rq_replen = lustre_msg_size(1, &size);
2683 request->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
2685 rc = ptlrpc_queue_wait(request);
2689 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
2690 lustre_swab_obd_statfs);
2692 CERROR("Can't unpack obd_statfs\n");
2693 GOTO(out, rc = -EPROTO);
2696 memcpy(osfs, msfs, sizeof(*osfs));
2700 ptlrpc_req_finished(request);
2704 /* Retrieve object striping information.
2706 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2707 * the maximum number of OST indices which will fit in the user buffer.
2708 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2710 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2712 struct lov_user_md lum, *lumk;
2719 rc = copy_from_user(&lum, lump, sizeof(lum));
2723 if (lum.lmm_magic != LOV_USER_MAGIC)
2726 if (lum.lmm_stripe_count > 0) {
2727 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
2728 OBD_ALLOC(lumk, lum_size);
2732 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
2733 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
2735 lum_size = sizeof(lum);
2739 lumk->lmm_object_id = lsm->lsm_object_id;
2740 lumk->lmm_object_gr = lsm->lsm_object_gr;
2741 lumk->lmm_stripe_count = 1;
2743 if (copy_to_user(lump, lumk, lum_size))
2747 OBD_FREE(lumk, lum_size);
2752 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2753 void *karg, void *uarg)
2755 struct obd_device *obd = exp->exp_obd;
2756 struct obd_ioctl_data *data = karg;
2760 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2763 if (!try_module_get(THIS_MODULE)) {
2764 CERROR("Can't get module. Is it alive?");
2769 case OBD_IOC_LOV_GET_CONFIG: {
2771 struct lov_desc *desc;
2772 struct obd_uuid uuid;
2776 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
2777 GOTO(out, err = -EINVAL);
2779 data = (struct obd_ioctl_data *)buf;
2781 if (sizeof(*desc) > data->ioc_inllen1) {
2783 GOTO(out, err = -EINVAL);
2786 if (data->ioc_inllen2 < sizeof(uuid)) {
2788 GOTO(out, err = -EINVAL);
2791 if (data->ioc_inllen3 < sizeof(__u32)) {
2793 GOTO(out, err = -EINVAL);
2796 desc = (struct lov_desc *)data->ioc_inlbuf1;
2797 desc->ld_tgt_count = 1;
2798 desc->ld_active_tgt_count = 1;
2799 desc->ld_default_stripe_count = 1;
2800 desc->ld_default_stripe_size = 0;
2801 desc->ld_default_stripe_offset = 0;
2802 desc->ld_pattern = 0;
2803 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2804 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2805 *((__u32 *)data->ioc_inlbuf3) = 1;
2807 err = copy_to_user((void *)uarg, buf, len);
2810 obd_ioctl_freedata(buf, len);
2813 case LL_IOC_LOV_SETSTRIPE:
2814 err = obd_alloc_memmd(exp, karg);
2818 case LL_IOC_LOV_GETSTRIPE:
2819 err = osc_getstripe(karg, uarg);
2821 case OBD_IOC_CLIENT_RECOVER:
2822 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2827 case IOC_OSC_SET_ACTIVE:
2828 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2831 case IOC_OSC_CTL_RECOVERY:
2832 err = ptlrpc_import_control_recovery(obd->u.cli.cl_import,
2836 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
2837 GOTO(out, err = -ENOTTY);
2840 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2843 module_put(THIS_MODULE);
2848 static int osc_get_info(struct obd_export *exp, __u32 keylen,
2849 void *key, __u32 *vallen, void *val)
2852 if (!vallen || !val)
2855 if (keylen > strlen("lock_to_stripe") &&
2856 strcmp(key, "lock_to_stripe") == 0) {
2857 __u32 *stripe = val;
2858 *vallen = sizeof(*stripe);
2861 } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
2862 struct ptlrpc_request *req;
2864 char *bufs[1] = {key};
2866 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2867 OST_GET_INFO, 1, (int *)&keylen, bufs);
2871 req->rq_replen = lustre_msg_size(1, (int *)vallen);
2872 rc = ptlrpc_queue_wait(req);
2876 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
2877 lustre_swab_ost_last_id);
2878 if (reply == NULL) {
2879 CERROR("Can't unpack OST last ID\n");
2880 GOTO(out, rc = -EPROTO);
2882 *((obd_id *)val) = *reply;
2884 ptlrpc_req_finished(req);
2886 } else if (keylen >= strlen("client_nid") &&
2887 strcmp(key, "client_nid") == 0) {
2888 struct ptlrpc_connection * conn;
2889 ptl_nid_t * nid = val;
2890 *vallen = sizeof(*nid);
2892 conn = class_exp2cliimp(exp)->imp_connection;
2896 nid = &conn->c_peer.peer_id.nid;
2903 static int osc_set_info(struct obd_export *exp, obd_count keylen,
2904 void *key, obd_count vallen, void *val)
2906 struct obd_device *obd = exp->exp_obd;
2907 struct obd_import *imp = class_exp2cliimp(exp);
2908 struct llog_ctxt *ctxt;
2912 if (keylen == strlen("unlinked") &&
2913 memcmp(key, "unlinked", keylen) == 0) {
2914 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2915 spin_lock(&oscc->oscc_lock);
2916 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
2917 spin_unlock(&oscc->oscc_lock);
2920 if (keylen == strlen("unrecovery") &&
2921 memcmp(key, "unrecovery", keylen) == 0) {
2922 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
2923 spin_lock(&oscc->oscc_lock);
2924 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
2925 spin_unlock(&oscc->oscc_lock);
2928 if (keylen == strlen("initial_recov") &&
2929 memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
2930 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2931 if (vallen != sizeof(int))
2933 imp->imp_initial_recov = *(int *)val;
2934 CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
2935 exp->exp_obd->obd_name,
2936 imp->imp_initial_recov);
2940 if (keylen == strlen("async") &&
2941 memcmp(key, "async", keylen) == 0) {
2942 struct client_obd *cl = &obd->u.cli;
2943 if (vallen != sizeof(int))
2945 cl->cl_async = *(int *)val;
2946 CDEBUG(D_HA, "%s: set async = %d\n",
2947 obd->obd_name, cl->cl_async);
2951 if (keylen == 5 && strcmp(key, "audit") == 0) {
2952 struct ptlrpc_request *req;
2953 char *bufs[2] = {key, val};
2954 int size[2] = {keylen, vallen};
2956 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2957 OST_SET_INFO, 2, size, bufs);
2961 req->rq_replen = lustre_msg_size(0, size);
2962 lustre_swab_reqbuf(req, 1, sizeof(struct audit_attr_msg),
2963 lustre_swab_audit_attr);
2964 rc = ptlrpc_queue_wait(req);
2966 ptlrpc_req_finished(req);
2970 if (keylen == 9 && strcmp(key, "audit_obj") == 0) {
2971 struct ptlrpc_request *req;
2972 char *bufs[2] = {key, val};
2973 int size[2] = {keylen, vallen};
2975 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2976 OST_SET_INFO, 2, size, bufs);
2980 req->rq_replen = lustre_msg_size(0, size);
2981 lustre_swab_reqbuf(req, 1, sizeof(struct obdo),
2983 rc = ptlrpc_queue_wait(req);
2985 ptlrpc_req_finished(req);
2989 if (keylen == 8 && memcmp(key, "auditlog", 8) == 0) {
2990 struct ptlrpc_request *req;
2991 char *bufs[2] = {key, val};
2992 int size[2] = {keylen, vallen};
2994 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
2995 OST_SET_INFO, 2, size, bufs);
2999 req->rq_replen = lustre_msg_size(0, size);
3000 lustre_swab_reqbuf(req, 1, sizeof(struct audit_msg),
3001 lustre_swab_audit_msg);
3002 rc = ptlrpc_queue_wait(req);
3004 ptlrpc_req_finished(req);
3008 if (keylen == strlen("sec") &&
3009 memcmp(key, "sec", keylen) == 0) {
3010 struct client_obd *cli = &exp->exp_obd->u.cli;
3012 cli->cl_sec_flavor = ptlrpcs_name2flavor(val);
3013 if (cli->cl_sec_flavor == PTLRPCS_FLVR_INVALID) {
3014 CERROR("unrecognized security flavor %s\n", (char*) val);
3021 if (keylen == strlen("sec_flags") &&
3022 memcmp(key, "sec_flags", keylen) == 0) {
3023 struct client_obd *cli = &exp->exp_obd->u.cli;
3025 cli->cl_sec_flags = *((unsigned long *) val);
3029 if (keylen == strlen("flush_cred") &&
3030 memcmp(key, "flush_cred", keylen) == 0) {
3031 struct client_obd *cli = &exp->exp_obd->u.cli;
3034 ptlrpcs_import_flush_current_creds(cli->cl_import);
3037 if (keylen == strlen("crypto_cb") &&
3038 memcmp(key, "crypto_cb", keylen) == 0) {
3039 LASSERT(vallen == sizeof(crypt_cb_t));
3040 osc_crypt_cb = (crypt_cb_t)val;
3044 if (keylen < strlen("mds_conn") ||
3045 memcmp(key, "mds_conn", keylen) != 0)
3048 ctxt = llog_get_context(&exp->exp_obd->obd_llogs,
3049 LLOG_UNLINK_ORIG_CTXT);
3052 rc = llog_initiator_connect(ctxt);
3054 CERROR("cannot establish the connect for "
3055 "ctxt %p: %d\n", ctxt, rc);
3058 imp->imp_server_timeout = 1;
3059 CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
3060 imp->imp_pingable = 1;
3066 static struct llog_operations osc_size_repl_logops = {
3067 lop_cancel: llog_obd_repl_cancel
3070 static struct llog_operations osc_unlink_orig_logops;
3072 static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
3073 struct obd_device *tgt, int count,
3074 struct llog_catid *catid)
3079 osc_unlink_orig_logops = llog_lvfs_ops;
3080 osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
3081 osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
3082 osc_unlink_orig_logops.lop_add = llog_catalog_add;
3083 osc_unlink_orig_logops.lop_connect = llog_origin_connect;
3085 rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
3086 &catid->lci_logid, &osc_unlink_orig_logops);
3090 rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
3091 &osc_size_repl_logops);
3095 static int osc_llog_finish(struct obd_device *obd,
3096 struct obd_llogs *llogs, int count)
3101 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
3105 rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
3109 static int osc_connect(struct lustre_handle *exph,
3110 struct obd_device *obd, struct obd_uuid *cluuid,
3111 struct obd_connect_data *data,
3112 unsigned long connect_flags)
3116 rc = client_connect_import(exph, obd, cluuid, data, connect_flags);
3120 static int osc_disconnect(struct obd_export *exp, unsigned long flags)
3122 struct obd_device *obd = class_exp2obd(exp);
3123 struct llog_ctxt *ctxt;
3127 ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
3128 if (obd->u.cli.cl_conn_count == 1)
3129 /* flush any remaining cancel messages out to the target */
3130 llog_sync(ctxt, exp);
3132 rc = client_disconnect_export(exp, flags);
3136 static int osc_import_event(struct obd_device *obd,
3137 struct obd_import *imp,
3138 enum obd_import_event event)
3140 struct client_obd *cli;
3143 LASSERT(imp->imp_obd == obd);
3146 case IMP_EVENT_DISCON: {
3147 /* Only do this on the MDS OSC's */
3148 if (imp->imp_server_timeout) {
3149 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3151 spin_lock(&oscc->oscc_lock);
3152 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3153 spin_unlock(&oscc->oscc_lock);
3157 case IMP_EVENT_INACTIVE: {
3158 if (obd->obd_observer)
3159 rc = obd_notify(obd->obd_observer, obd, 0, 0);
3162 case IMP_EVENT_INVALIDATE: {
3163 struct ldlm_namespace *ns = obd->obd_namespace;
3167 spin_lock(&cli->cl_loi_list_lock);
3168 cli->cl_avail_grant = 0;
3169 cli->cl_lost_grant = 0;
3170 /* all pages go to failing rpcs due to the invalid import */
3171 osc_check_rpcs(cli);
3172 spin_unlock(&cli->cl_loi_list_lock);
3174 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3178 case IMP_EVENT_ACTIVE: {
3179 /* Only do this on the MDS OSC's */
3180 if (imp->imp_server_timeout) {
3181 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3183 spin_lock(&oscc->oscc_lock);
3184 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3185 spin_unlock(&oscc->oscc_lock);
3188 if (obd->obd_observer)
3189 rc = obd_notify(obd->obd_observer, obd, 1, 0);
3193 CERROR("Unknown import event %d\n", event);
3199 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
3201 struct lprocfs_static_vars lvars;
3205 lprocfs_init_vars(osc,&lvars);
3206 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3210 rc = lproc_osc_attach_seqstat(dev);
3212 lprocfs_obd_detach(dev);
3216 ptlrpc_lprocfs_register_obd(dev);
3220 static int osc_detach(struct obd_device *dev)
3222 ptlrpc_lprocfs_unregister_obd(dev);
3223 return lprocfs_obd_detach(dev);
3226 static int osc_setup(struct obd_device *obd, obd_count len, void *buf)
3230 rc = ptlrpcd_addref();
3234 rc = client_obd_setup(obd, len, buf);
3243 static int osc_cleanup(struct obd_device *obd, int flags)
3245 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3248 rc = ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
3249 LDLM_FL_CONFIG_CHANGE, NULL);
3253 spin_lock(&oscc->oscc_lock);
3254 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
3255 oscc->oscc_flags |= OSCC_FLAG_EXITING;
3256 spin_unlock(&oscc->oscc_lock);
3258 rc = client_obd_cleanup(obd, flags);
3263 struct obd_ops osc_obd_ops = {
3264 .o_owner = THIS_MODULE,
3265 .o_attach = osc_attach,
3266 .o_detach = osc_detach,
3267 .o_setup = osc_setup,
3268 .o_cleanup = osc_cleanup,
3269 .o_add_conn = client_import_add_conn,
3270 .o_del_conn = client_import_del_conn,
3271 .o_connect = osc_connect,
3272 .o_disconnect = osc_disconnect,
3273 .o_statfs = osc_statfs,
3274 .o_packmd = osc_packmd,
3275 .o_unpackmd = osc_unpackmd,
3276 .o_create = osc_create,
3277 .o_destroy = osc_destroy,
3278 .o_getattr = osc_getattr,
3279 .o_getattr_async = osc_getattr_async,
3280 .o_setattr = osc_setattr,
3282 .o_brw_async = osc_brw_async,
3283 .o_prep_async_page = osc_prep_async_page,
3284 .o_queue_async_io = osc_queue_async_io,
3285 .o_set_async_flags = osc_set_async_flags,
3286 .o_queue_group_io = osc_queue_group_io,
3287 .o_trigger_group_io = osc_trigger_group_io,
3288 .o_teardown_async_page = osc_teardown_async_page,
3289 .o_punch = osc_punch,
3291 .o_enqueue = osc_enqueue,
3292 .o_match = osc_match,
3293 .o_change_cbdata = osc_change_cbdata,
3294 .o_cancel = osc_cancel,
3295 .o_cancel_unused = osc_cancel_unused,
3296 .o_iocontrol = osc_iocontrol,
3297 .o_get_info = osc_get_info,
3298 .o_set_info = osc_set_info,
3299 .o_import_event = osc_import_event,
3300 .o_llog_init = osc_llog_init,
3301 .o_llog_finish = osc_llog_finish,
3304 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3305 struct obd_ops sanosc_obd_ops = {
3306 .o_owner = THIS_MODULE,
3307 .o_attach = osc_attach,
3308 .o_detach = osc_detach,
3309 .o_cleanup = client_obd_cleanup,
3310 .o_add_conn = client_import_add_conn,
3311 .o_del_conn = client_import_del_conn,
3312 .o_connect = osc_connect,
3313 .o_disconnect = client_disconnect_export,
3314 .o_statfs = osc_statfs,
3315 .o_packmd = osc_packmd,
3316 .o_unpackmd = osc_unpackmd,
3317 .o_create = osc_real_create,
3318 .o_destroy = osc_destroy,
3319 .o_getattr = osc_getattr,
3320 .o_getattr_async = osc_getattr_async,
3321 .o_setattr = osc_setattr,
3322 .o_setup = client_sanobd_setup,
3323 .o_brw = sanosc_brw,
3324 .o_punch = osc_punch,
3326 .o_enqueue = osc_enqueue,
3327 .o_match = osc_match,
3328 .o_change_cbdata = osc_change_cbdata,
3329 .o_cancel = osc_cancel,
3330 .o_cancel_unused = osc_cancel_unused,
3331 .o_iocontrol = osc_iocontrol,
3332 .o_import_event = osc_import_event,
3333 .o_llog_init = osc_llog_init,
3334 .o_llog_finish = osc_llog_finish,
3338 int __init osc_init(void)
3340 struct lprocfs_static_vars lvars;
3341 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3342 struct lprocfs_static_vars sanlvars;
3347 lprocfs_init_vars(osc, &lvars);
3348 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3349 lprocfs_init_vars(osc, &sanlvars);
3352 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
3353 OBD_OSC_DEVICENAME);
3357 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3358 rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
3359 OBD_SANOSC_DEVICENAME);
3361 class_unregister_type(OBD_OSC_DEVICENAME);
3368 static void /*__exit*/ osc_exit(void)
3370 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3371 class_unregister_type(OBD_SANOSC_DEVICENAME);
3373 class_unregister_type(OBD_OSC_DEVICENAME);
3376 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3377 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3378 MODULE_LICENSE("GPL");
3380 module_init(osc_init);
3381 module_exit(osc_exit);