1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * For testing and management it is treated as an obd_device,
23 * although * it does not export a full OBD method table (the
24 * requests are coming * in over the wire, so object target modules
25 * do not have a full * method table.)
30 #define DEBUG_SUBSYSTEM S_OSC
33 # include <linux/version.h>
34 # include <linux/module.h>
35 # include <linux/mm.h>
36 # include <linux/highmem.h>
37 # include <linux/lustre_dlm.h>
38 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
39 # include <linux/workqueue.h>
40 # include <linux/smp_lock.h>
42 # include <linux/locks.h>
44 #else /* __KERNEL__ */
45 # include <liblustre.h>
48 #include <linux/kp30.h>
49 #include <linux/lustre_mds.h> /* for mds_objid */
50 #include <linux/lustre_otree.h>
51 #include <linux/obd_ost.h>
52 #include <linux/lustre_commit_confd.h>
53 #include <linux/obd_lov.h>
56 # include <linux/ctype.h>
57 # include <linux/init.h>
62 #include <linux/lustre_ha.h>
63 #include <linux/obd_support.h> /* for OBD_FAIL_CHECK */
64 #include <linux/lustre_lite.h> /* for ll_i2info */
65 #include <portals/lib-types.h> /* for PTL_MD_MAX_IOV */
66 #include <linux/lprocfs_status.h>
68 static struct llog_cookie zero_cookie = { { 0 } };
70 static int osc_attach(struct obd_device *dev, obd_count len, void *data)
72 struct lprocfs_static_vars lvars;
74 lprocfs_init_vars(osc,&lvars);
75 return lprocfs_obd_attach(dev, lvars.obd_vars);
78 static int osc_detach(struct obd_device *dev)
80 return lprocfs_obd_detach(dev);
83 /* Pack OSC object metadata for disk storage (LE byte order). */
84 static int osc_packmd(struct lustre_handle *conn, struct lov_mds_md **lmmp,
85 struct lov_stripe_md *lsm)
90 lmm_size = sizeof(**lmmp);
95 OBD_FREE(*lmmp, lmm_size);
101 OBD_ALLOC(*lmmp, lmm_size);
107 LASSERT(lsm->lsm_object_id);
108 (*lmmp)->lmm_object_id = cpu_to_le64 (lsm->lsm_object_id);
114 /* Unpack OSC object metadata from disk storage (LE byte order). */
115 static int osc_unpackmd(struct lustre_handle *conn, struct lov_stripe_md **lsmp,
116 struct lov_mds_md *lmm, int lmm_bytes)
122 if (lmm_bytes < sizeof (*lmm)) {
123 CERROR("lov_mds_md too small: %d, need %d\n",
124 lmm_bytes, (int)sizeof(*lmm));
127 /* XXX LOV_MAGIC etc check? */
129 if (lmm->lmm_object_id == cpu_to_le64(0)) {
130 CERROR("lov_mds_md: zero lmm_object_id\n");
135 lsm_size = lov_stripe_md_size(1);
139 if (*lsmp != NULL && lmm == NULL) {
140 OBD_FREE(*lsmp, lsm_size);
146 OBD_ALLOC(*lsmp, lsm_size);
150 (*lsmp)->lsm_oinfo[0].loi_dirty_ot =
151 &(*lsmp)->lsm_oinfo[0].loi_dirty_ot_inline;
152 ot_init((*lsmp)->lsm_oinfo[0].loi_dirty_ot);
156 /* XXX zero *lsmp? */
157 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
158 LASSERT((*lsmp)->lsm_object_id);
161 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
166 #warning "FIXME: make this be sent from OST"
167 #define OSC_BRW_MAX_SIZE 65536
168 #define OSC_BRW_MAX_IOV min_t(int, PTL_MD_MAX_IOV, OSC_BRW_MAX_SIZE/PAGE_SIZE)
170 static int osc_getattr_interpret(struct ptlrpc_request *req,
171 struct osc_getattr_async_args *aa, int rc)
173 struct ost_body *body;
179 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
181 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
182 memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
184 /* This should really be sent by the OST */
185 aa->aa_oa->o_blksize = OSC_BRW_MAX_SIZE;
186 aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
188 CERROR("can't unpack ost_body\n");
190 aa->aa_oa->o_valid = 0;
196 static int osc_getattr_async(struct lustre_handle *conn, struct obdo *oa,
197 struct lov_stripe_md *md,
198 struct ptlrpc_request_set *set)
200 struct ptlrpc_request *request;
201 struct ost_body *body;
202 int size = sizeof(*body);
203 struct osc_getattr_async_args *aa;
206 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_GETATTR, 1,
211 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
212 memcpy(&body->oa, oa, sizeof(*oa));
214 request->rq_replen = lustre_msg_size(1, &size);
215 request->rq_interpret_reply = osc_getattr_interpret;
217 LASSERT (sizeof (*aa) <= sizeof (request->rq_async_args));
218 aa = (struct osc_getattr_async_args *)&request->rq_async_args;
221 ptlrpc_set_add_req (set, request);
225 static int osc_getattr(struct lustre_handle *conn, struct obdo *oa,
226 struct lov_stripe_md *md)
228 struct ptlrpc_request *request;
229 struct ost_body *body;
230 int rc, size = sizeof(*body);
233 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_GETATTR, 1,
238 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
239 memcpy(&body->oa, oa, sizeof(*oa));
241 request->rq_replen = lustre_msg_size(1, &size);
243 rc = ptlrpc_queue_wait(request);
245 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
249 body = lustre_swab_repbuf(request, 0, sizeof (*body),
250 lustre_swab_ost_body);
252 CERROR ("can't unpack ost_body\n");
253 GOTO (out, rc = -EPROTO);
256 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
257 memcpy(oa, &body->oa, sizeof(*oa));
259 /* This should really be sent by the OST */
260 oa->o_blksize = OSC_BRW_MAX_SIZE;
261 oa->o_valid |= OBD_MD_FLBLKSZ;
265 ptlrpc_req_finished(request);
269 /* The import lock must already be held. */
270 static inline void osc_update_body_handle(struct list_head *head,
271 struct lustre_handle *old,
272 struct lustre_handle *new, int op)
274 struct list_head *tmp;
275 struct ost_body *body;
276 struct ptlrpc_request *req;
277 struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
279 list_for_each(tmp, head) {
280 req = list_entry(tmp, struct ptlrpc_request, rq_list);
282 /* XXX ok to remove when bug 1303 resolved - rread 05/27/03 */
283 LASSERT (req != last_req);
286 if (req->rq_reqmsg->opc != op)
288 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
289 if (memcmp(obdo_handle(&body->oa), old, sizeof(*old)))
292 DEBUG_REQ(D_HA, req, "updating close body with new fh");
293 memcpy(obdo_handle(&body->oa), new, sizeof(*new));
297 static void osc_replay_open(struct ptlrpc_request *req)
299 struct lustre_handle old;
300 struct ost_body *body;
301 struct obd_client_handle *och = req->rq_replay_data;
302 struct lustre_handle *oa_handle;
305 body = lustre_swab_repbuf (req, 0, sizeof (*body),
306 lustre_swab_ost_body);
307 LASSERT (body != NULL);
309 oa_handle = obdo_handle(&body->oa);
311 memcpy(&old, &och->och_fh, sizeof(old));
312 CDEBUG(D_HA, "updating cookie from "LPD64" to "LPD64"\n",
313 och->och_fh.cookie, oa_handle->cookie);
314 memcpy(&och->och_fh, oa_handle, sizeof(och->och_fh));
316 /* A few frames up, ptlrpc_replay holds the lock, so this is safe. */
317 osc_update_body_handle(&req->rq_import->imp_sending_list, &old,
318 &och->och_fh, OST_CLOSE);
319 osc_update_body_handle(&req->rq_import->imp_delayed_list, &old,
320 &och->och_fh, OST_CLOSE);
325 static int osc_open(struct lustre_handle *conn, struct obdo *oa,
326 struct lov_stripe_md *md, struct obd_trans_info *oti,
327 struct obd_client_handle *och)
329 struct ptlrpc_request *request;
330 struct ost_body *body;
332 int rc, size = sizeof(*body);
334 LASSERT(och != NULL);
336 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_OPEN, 1, &size,
341 spin_lock_irqsave (&request->rq_lock, flags);
342 request->rq_replay = 1;
343 spin_unlock_irqrestore (&request->rq_lock, flags);
345 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
346 memcpy(&body->oa, oa, sizeof(*oa));
348 request->rq_replen = lustre_msg_size(1, &size);
350 rc = ptlrpc_queue_wait(request);
354 body = lustre_swab_repbuf (request, 0, sizeof (*body),
355 lustre_swab_ost_body);
357 CERROR ("Can't unpack ost_body\n");
358 GOTO (out, rc = -EPROTO);
361 memcpy(oa, &body->oa, sizeof(*oa));
363 /* If the open succeeded, we better have a handle */
364 /* BlueArc OSTs don't send back (o_valid | FLHANDLE). sigh.
365 * Temporary workaround until fixed. -phil 24 Feb 03 */
366 // if ((oa->o_valid & OBD_MD_FLHANDLE) == 0) {
367 // CERROR ("No file handle\n");
368 // GOTO (out, rc = -EPROTO);
370 oa->o_valid |= OBD_MD_FLHANDLE;
372 /* This should really be sent by the OST */
373 oa->o_blksize = OSC_BRW_MAX_SIZE;
374 oa->o_valid |= OBD_MD_FLBLKSZ;
376 memcpy(&och->och_fh, obdo_handle(oa), sizeof(och->och_fh));
377 request->rq_replay_cb = osc_replay_open;
378 request->rq_replay_data = och;
379 och->och_req = ptlrpc_request_addref(request);
380 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
384 ptlrpc_req_finished(request);
388 static int osc_close(struct lustre_handle *conn, struct obdo *oa,
389 struct lov_stripe_md *md, struct obd_trans_info *oti)
391 struct obd_import *import = class_conn2cliimp(conn);
392 struct ptlrpc_request *request;
393 struct ost_body *body;
394 struct obd_client_handle *och;
396 int rc, size = sizeof(*body);
400 och = (struct obd_client_handle *)&oa->o_inline;
401 if (och->och_magic == 0) {
402 /* Zero magic means that this file was never opened on this
403 * OST--almost certainly because the OST was inactive at
407 LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
409 request = ptlrpc_prep_req(import, OST_CLOSE, 1, &size, NULL);
413 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
414 memcpy(&body->oa, oa, sizeof(*oa));
416 request->rq_replen = lustre_msg_size(1, &size);
418 rc = ptlrpc_queue_wait(request);
420 CDEBUG(D_HA, "Suppressing close error %d\n", rc); // bug 1036
422 /* och_req == NULL can't happen any more, right? --phik */
423 if (och->och_req != NULL) {
424 spin_lock_irqsave(&import->imp_lock, flags);
425 spin_lock (&och->och_req->rq_lock);
426 och->och_req->rq_replay = 0;
427 spin_unlock (&och->och_req->rq_lock);
428 /* see comments in llite/file.c:ll_mdc_close() */
429 if (och->och_req->rq_transno) {
430 /* this can't happen yet, because the OSTs don't yet
431 * issue transnos for OPEN requests -phik 21 Apr 2003 */
433 if (!request->rq_transno && import->imp_replayable) {
434 request->rq_transno = och->och_req->rq_transno;
435 ptlrpc_retain_replayable_request(request,
438 spin_unlock_irqrestore(&import->imp_lock, flags);
440 spin_unlock_irqrestore(&import->imp_lock, flags);
443 ptlrpc_req_finished(och->och_req);
447 body = lustre_swab_repbuf (request, 0, sizeof (*body),
448 lustre_swab_ost_body);
451 CDEBUG(D_HA, "Suppressing close error %d\n", rc); // bug 1036
453 memcpy(oa, &body->oa, sizeof(*oa));
456 ptlrpc_req_finished(request);
460 static int osc_setattr(struct lustre_handle *conn, struct obdo *oa,
461 struct lov_stripe_md *md, struct obd_trans_info *oti)
463 struct ptlrpc_request *request;
464 struct ost_body *body;
465 int rc, size = sizeof(*body);
468 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SETATTR, 1,
473 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
474 memcpy(&body->oa, oa, sizeof(*oa));
476 request->rq_replen = lustre_msg_size(1, &size);
478 rc = ptlrpc_queue_wait(request);
480 ptlrpc_req_finished(request);
484 static int osc_create(struct lustre_handle *conn, struct obdo *oa,
485 struct lov_stripe_md **ea, struct obd_trans_info *oti)
487 struct ptlrpc_request *request;
488 struct ost_body *body;
489 struct lov_stripe_md *lsm;
490 int rc, size = sizeof(*body);
498 rc = obd_alloc_memmd(conn, &lsm);
503 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CREATE, 1, &size,
506 GOTO(out, rc = -ENOMEM);
508 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
509 memcpy(&body->oa, oa, sizeof(body->oa));
511 request->rq_replen = lustre_msg_size(1, &size);
513 rc = ptlrpc_queue_wait(request);
517 body = lustre_swab_repbuf(request, 0, sizeof(*body),
518 lustre_swab_ost_body);
520 CERROR ("can't unpack ost_body\n");
521 GOTO (out_req, rc = -EPROTO);
524 memcpy(oa, &body->oa, sizeof(*oa));
526 /* This should really be sent by the OST */
527 oa->o_blksize = OSC_BRW_MAX_SIZE;
528 oa->o_valid |= OBD_MD_FLBLKSZ;
530 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
531 * have valid lsm_oinfo data structs, so don't go touching that.
532 * This needs to be fixed in a big way.
534 lsm->lsm_object_id = oa->o_id;
538 oti->oti_transno = request->rq_repmsg->transno;
540 if (oa->o_valid & OBD_MD_FLCOOKIE) {
541 if (!oti->oti_logcookies)
542 oti_alloc_cookies(oti, 1);
543 memcpy(oti->oti_logcookies, obdo_logcookie(oa),
544 sizeof(oti->oti_onecookie));
548 CDEBUG(D_HA, "transno: "LPD64"\n", request->rq_repmsg->transno);
551 ptlrpc_req_finished(request);
554 obd_free_memmd(conn, &lsm);
558 static int osc_punch(struct lustre_handle *conn, struct obdo *oa,
559 struct lov_stripe_md *md, obd_size start,
560 obd_size end, struct obd_trans_info *oti)
562 struct ptlrpc_request *request;
563 struct ost_body *body;
564 int rc, size = sizeof(*body);
572 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_PUNCH, 1, &size,
577 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
578 memcpy(&body->oa, oa, sizeof(*oa));
580 /* overload the size and blocks fields in the oa with start/end */
581 body->oa.o_size = start;
582 body->oa.o_blocks = end;
583 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
585 request->rq_replen = lustre_msg_size(1, &size);
587 rc = ptlrpc_queue_wait(request);
591 body = lustre_swab_repbuf (request, 0, sizeof (*body),
592 lustre_swab_ost_body);
594 CERROR ("can't unpack ost_body\n");
595 GOTO (out, rc = -EPROTO);
598 memcpy(oa, &body->oa, sizeof(*oa));
602 ptlrpc_req_finished(request);
606 static int osc_destroy(struct lustre_handle *conn, struct obdo *oa,
607 struct lov_stripe_md *ea, struct obd_trans_info *oti)
609 struct ptlrpc_request *request;
610 struct ost_body *body;
611 int rc, size = sizeof(*body);
618 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_DESTROY, 1,
623 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
624 memcpy(&body->oa, oa, sizeof(*oa));
626 if (oti && oa->o_valid & OBD_MD_FLCOOKIE) {
627 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
628 sizeof(*oti->oti_logcookies));
629 oti->oti_logcookies++;
632 request->rq_replen = lustre_msg_size(1, &size);
634 rc = ptlrpc_queue_wait(request);
638 body = lustre_swab_repbuf(request, 0, sizeof(*body),
639 lustre_swab_ost_body);
641 CERROR ("Can't unpack body\n");
642 GOTO (out, rc = -EPROTO);
645 memcpy(oa, &body->oa, sizeof(*oa));
649 ptlrpc_req_finished(request);
653 static void osc_announce_cached(struct client_obd *cli, struct ost_body *body)
655 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLRDEV;
657 LASSERT(!(body->oa.o_valid & bits));
659 body->oa.o_valid |= bits;
660 down(&cli->cl_dirty_sem);
661 body->oa.o_blocks = cli->cl_dirty;
662 body->oa.o_rdev = cli->cl_dirty_granted;
663 up(&cli->cl_dirty_sem);
664 CDEBUG(D_INODE, "announcing "LPU64" dirty "LPU64" granted\n",
665 cli->cl_dirty, cli->cl_dirty_granted);
668 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
670 if(!(body->oa.o_valid & OBD_MD_FLRDEV)) {
671 if (cli->cl_ost_can_grant) {
672 CDEBUG(D_INODE, "%s can't grant\n",
673 cli->cl_import->imp_target_uuid.uuid);
675 cli->cl_ost_can_grant = 0;
679 CDEBUG(D_ERROR, "got "LPU64" grant\n", body->oa.o_rdev);
680 down(&cli->cl_dirty_sem);
681 cli->cl_dirty_granted = body->oa.o_rdev;
682 /* XXX check for over-run and wake up the io thread that
683 * doesn't exist yet */
684 up(&cli->cl_dirty_sem);
687 /* We assume that the reason this OSC got a short read is because it read
688 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
689 * via the LOV, and it _knows_ it's reading inside the file, it's just that
690 * this stripe never got written at or beyond this stripe offset yet. */
691 static void handle_short_read(int nob_read, obd_count page_count,
692 struct brw_page *pga)
696 /* skip bytes read OK */
697 while (nob_read > 0) {
698 LASSERT (page_count > 0);
700 if (pga->count > nob_read) {
701 /* EOF inside this page */
702 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
703 memset(ptr + nob_read, 0, pga->count - nob_read);
710 nob_read -= pga->count;
715 /* zero remaining pages */
716 while (page_count-- > 0) {
717 ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
718 memset(ptr, 0, pga->count);
724 static int check_write_rcs(struct ptlrpc_request *request, int niocount,
725 obd_count page_count, struct brw_page *pga)
730 /* return error if any niobuf was in error */
731 remote_rcs = lustre_swab_repbuf(request, 1,
732 sizeof(*remote_rcs) * niocount, NULL);
733 if (remote_rcs == NULL) {
734 CERROR ("Missing/short RC vector on BRW_WRITE reply\n");
737 if (lustre_msg_swabbed (request->rq_repmsg))
738 for (i = 0; i < niocount; i++)
739 __swab32s (&remote_rcs[i]);
741 for (i = 0; i < niocount; i++) {
742 if (remote_rcs[i] < 0)
743 return (remote_rcs[i]);
745 if (remote_rcs[i] != 0) {
746 CERROR ("rc[%d] invalid (%d) req %p\n",
747 i, remote_rcs[i], request);
755 static inline int can_merge_pages (struct brw_page *p1, struct brw_page *p2)
757 if (p1->flag != p2->flag) {
758 /* XXX we don't make much use of 'flag' right now
759 * but this will warn about usage when we do */
760 CERROR ("different flags set %d, %d\n",
765 return (p1->off + p1->count == p2->off);
769 static obd_count cksum_pages(int nob, obd_count page_count,
770 struct brw_page *pga)
777 LASSERT (page_count > 0);
779 ptr = kmap (pga->pg);
780 ost_checksum (&cksum, ptr + (pga->off & (PAGE_SIZE - 1)),
781 pga->count > nob ? nob : pga->count);
793 static int osc_brw_prep_request(int cmd, struct obd_import *imp,struct obdo *oa,
794 struct lov_stripe_md *lsm, obd_count page_count,
795 struct brw_page *pga, int *requested_nobp,
796 int *niocountp, struct ptlrpc_request **reqp)
798 struct ptlrpc_request *req;
799 struct ptlrpc_bulk_desc *desc;
800 struct client_obd *cli = &imp->imp_obd->u.cli;
801 struct ost_body *body;
802 struct obd_ioobj *ioobj;
803 struct niobuf_remote *niobuf;
812 opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
814 for (niocount = i = 1; i < page_count; i++)
815 if (!can_merge_pages (&pga[i - 1], &pga[i]))
818 size[0] = sizeof(*body);
819 size[1] = sizeof(*ioobj);
820 size[2] = niocount * sizeof(*niobuf);
822 req = ptlrpc_prep_req(imp, opc, 3, size, NULL);
826 if (opc == OST_WRITE)
827 desc = ptlrpc_prep_bulk_imp(req, BULK_GET_SOURCE,
830 desc = ptlrpc_prep_bulk_imp(req, BULK_PUT_SINK,
833 GOTO(out, rc = -ENOMEM);
834 /* NB request now owns desc and will free it when it gets freed */
836 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
837 ioobj = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*ioobj));
838 niobuf = lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf));
840 memcpy(&body->oa, oa, sizeof(*oa));
842 ioobj->ioo_id = oa->o_id;
843 ioobj->ioo_gr = oa->o_valid & 0 ? oa->o_gr : 0;
844 ioobj->ioo_type = oa->o_mode;
845 ioobj->ioo_bufcnt = niocount;
847 LASSERT (page_count > 0);
848 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
849 struct brw_page *pg = &pga[i];
850 struct brw_page *pg_prev = pg - 1;
852 LASSERT(pg->count > 0);
853 LASSERT((pg->off & ~PAGE_MASK) + pg->count <= PAGE_SIZE);
854 LASSERT(i == 0 || pg->off > pg_prev->off);
856 rc = ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~PAGE_MASK,
861 requested_nob += pg->count;
863 if (i > 0 && can_merge_pages(pg_prev, pg)) {
865 niobuf->len += pg->count;
867 niobuf->offset = pg->off;
868 niobuf->len = pg->count;
869 niobuf->flags = pg->flag;
873 LASSERT((void *)(niobuf - niocount) ==
874 lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
876 body->oa.o_valid |= OBD_MD_FLCKSUM;
877 if (opc == OST_BRW_WRITE)
878 body->oa.o_nlink = cksum_pages(requested_nob, page_count, pga);
880 osc_announce_cached(cli, body);
881 spin_lock_irqsave(&req->rq_lock, flags);
882 req->rq_no_resend = 1;
883 spin_unlock_irqrestore(&req->rq_lock, flags);
885 /* size[0] still sizeof (*body) */
886 if (opc == OST_WRITE) {
887 /* 1 RC per niobuf */
888 size[1] = sizeof(__u32) * niocount;
889 req->rq_replen = lustre_msg_size(2, size);
891 /* 1 RC for the whole I/O */
892 req->rq_replen = lustre_msg_size(1, size);
895 *niocountp = niocount;
896 *requested_nobp = requested_nob;
901 ptlrpc_req_finished (req);
905 static int osc_brw_fini_request(struct ptlrpc_request *req, struct obdo *oa,
906 int requested_nob, int niocount,
907 obd_count page_count, struct brw_page *pga,
910 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
911 struct ost_body *body;
916 body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
918 CERROR ("Can't unpack body\n");
922 osc_update_grant(cli, body);
924 if (req->rq_reqmsg->opc == OST_WRITE) {
926 CERROR ("Unexpected +ve rc %d\n", rc);
930 return(check_write_rcs(req, niocount, page_count, pga));
933 if (rc > requested_nob) {
934 CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
938 if (rc < requested_nob)
939 handle_short_read(rc, page_count, pga);
941 memcpy(oa, &body->oa, sizeof(*oa));
944 if (oa->o_valid & OBD_MD_FLCKSUM) {
945 static int cksum_counter;
946 obd_count server_cksum = oa->o_nlink;
947 obd_count cksum = cksum_pages(rc, page_count, pga);
950 if (server_cksum != cksum) {
951 CERROR("Bad checksum: server "LPX64", client "LPX64
952 ", server NID "LPX64"\n", server_cksum, cksum,
953 imp->imp_connection->c_peer.peer_nid);
956 } else if ((cksum_counter & (-cksum_counter)) == cksum_counter)
957 CERROR("Checksum %u from "LPX64" OK: %x\n",
959 imp->imp_connection->c_peer.peer_nid, cksum);
961 static int cksum_missed;
963 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
964 CERROR("Request checksum %u from "LPX64", no reply\n",
966 imp->imp_connection->c_peer.peer_nid);
972 static int osc_brw_internal(int cmd, struct lustre_handle *conn,struct obdo *oa,
973 struct lov_stripe_md *lsm,
974 obd_count page_count, struct brw_page *pga)
978 struct ptlrpc_request *request;
983 rc = osc_brw_prep_request(cmd, class_conn2cliimp(conn), oa, lsm,
984 page_count, pga, &requested_nob, &niocount,
986 /* NB ^ sets rq_no_resend */
991 rc = ptlrpc_queue_wait(request);
993 if (rc == -ETIMEDOUT && request->rq_resend) {
994 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
995 ptlrpc_req_finished(request);
999 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
1000 page_count, pga, rc);
1002 ptlrpc_req_finished(request);
1006 static int brw_interpret(struct ptlrpc_request *request,
1007 struct osc_brw_async_args *aa, int rc)
1009 struct obdo *oa = aa->aa_oa;
1010 int requested_nob = aa->aa_requested_nob;
1011 int niocount = aa->aa_nio_count;
1012 obd_count page_count = aa->aa_page_count;
1013 struct brw_page *pga = aa->aa_pga;
1016 /* XXX bug 937 here */
1017 if (rc == -ETIMEDOUT && request->rq_resend) {
1018 DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
1019 LBUG(); /* re-send. later. */
1020 //goto restart_bulk;
1023 rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
1024 page_count, pga, rc);
1028 static int async_internal(int cmd, struct lustre_handle *conn, struct obdo *oa,
1029 struct lov_stripe_md *lsm, obd_count page_count,
1030 struct brw_page *pga, struct ptlrpc_request_set *set)
1032 struct ptlrpc_request *request;
1035 struct osc_brw_async_args *aa;
1039 rc = osc_brw_prep_request(cmd, class_conn2cliimp(conn), oa, lsm,
1040 page_count, pga, &requested_nob, &nio_count,
1042 /* NB ^ sets rq_no_resend */
1045 LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
1046 aa = (struct osc_brw_async_args *)&request->rq_async_args;
1048 aa->aa_requested_nob = requested_nob;
1049 aa->aa_nio_count = nio_count;
1050 aa->aa_page_count = page_count;
1053 request->rq_interpret_reply = brw_interpret;
1054 ptlrpc_set_add_req(set, request);
1060 #define min_t(type,x,y) \
1061 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
1065 * ugh, we want disk allocation on the target to happen in offset order. we'll
1066 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1067 * fine for our small page arrays and doesn't require allocation. its an
1068 * insertion sort that swaps elements that are strides apart, shrinking the
1069 * stride down until its '1' and the array is sorted.
1071 static void sort_brw_pages(struct brw_page *array, int num)
1074 struct brw_page tmp;
1078 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1083 for (i = stride ; i < num ; i++) {
1086 while (j >= stride && array[j - stride].off > tmp.off) {
1087 array[j] = array[j - stride];
1092 } while (stride > 1);
1095 /* make sure we the regions we're passing to elan don't violate its '4
1096 * fragments' constraint. portal headers are a fragment, all full
1097 * PAGE_SIZE long pages count as 1 fragment, and each partial page
1098 * counts as a fragment. I think. see bug 934. */
1099 static obd_count check_elan_limit(struct brw_page *pg, obd_count pages)
1102 int saw_whole_frag = 0;
1105 for (i = 0 ; frags_left && i < pages ; pg++, i++) {
1106 if (pg->count == PAGE_SIZE) {
1107 if (!saw_whole_frag) {
1118 static int osc_brw(int cmd, struct lustre_handle *conn, struct obdo *oa,
1119 struct lov_stripe_md *md, obd_count page_count,
1120 struct brw_page *pga, struct obd_trans_info *oti)
1124 if (cmd == OBD_BRW_CHECK) {
1125 /* The caller just wants to know if there's a chance that this
1126 * I/O can succeed */
1127 struct obd_import *imp = class_conn2cliimp(conn);
1129 if (imp == NULL || imp->imp_invalid)
1134 while (page_count) {
1135 obd_count pages_per_brw;
1138 if (page_count > OSC_BRW_MAX_IOV)
1139 pages_per_brw = OSC_BRW_MAX_IOV;
1141 pages_per_brw = page_count;
1143 sort_brw_pages(pga, pages_per_brw);
1144 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1146 rc = osc_brw_internal(cmd, conn, oa, md, pages_per_brw, pga);
1151 page_count -= pages_per_brw;
1152 pga += pages_per_brw;
1157 static int osc_brw_async(int cmd, struct lustre_handle *conn, struct obdo *oa,
1158 struct lov_stripe_md *md, obd_count page_count,
1159 struct brw_page *pga, struct ptlrpc_request_set *set,
1160 struct obd_trans_info *oti)
1164 if (cmd == OBD_BRW_CHECK) {
1165 /* The caller just wants to know if there's a chance that this
1166 * I/O can succeed */
1167 struct obd_import *imp = class_conn2cliimp(conn);
1169 if (imp == NULL || imp->imp_invalid)
1174 while (page_count) {
1175 obd_count pages_per_brw;
1178 if (page_count > OSC_BRW_MAX_IOV)
1179 pages_per_brw = OSC_BRW_MAX_IOV;
1181 pages_per_brw = page_count;
1183 sort_brw_pages(pga, pages_per_brw);
1184 pages_per_brw = check_elan_limit(pga, pages_per_brw);
1186 rc = async_internal(cmd, conn, oa, md, pages_per_brw, pga, set);
1191 page_count -= pages_per_brw;
1192 pga += pages_per_brw;
1198 /* Note: caller will lock/unlock, and set uptodate on the pages */
1199 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1200 static int sanosc_brw_read(struct lustre_handle *conn, struct obdo *oa,
1201 struct lov_stripe_md *lsm, obd_count page_count,
1202 struct brw_page *pga)
1204 struct ptlrpc_request *request = NULL;
1205 struct ost_body *body;
1206 struct niobuf_remote *nioptr;
1207 struct obd_ioobj *iooptr;
1208 int rc, size[3] = {sizeof(*body)}, mapped = 0;
1212 /* XXX does not handle 'new' brw protocol */
1214 size[1] = sizeof(struct obd_ioobj);
1215 size[2] = page_count * sizeof(*nioptr);
1217 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SAN_READ, 3,
1222 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof(*body));
1223 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof(*iooptr));
1224 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
1225 sizeof(*nioptr) * page_count);
1227 memcpy(&body->oa, oa, sizeof(body->oa));
1229 iooptr->ioo_id = oa->o_id;
1230 iooptr->ioo_gr = oa->o_valid & 0 ? oa->o_gr : 0;
1231 iooptr->ioo_type = oa->o_mode;
1232 iooptr->ioo_bufcnt = page_count;
1234 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
1235 LASSERT(PageLocked(pga[mapped].pg));
1236 LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
1238 nioptr->offset = pga[mapped].off;
1239 nioptr->len = pga[mapped].count;
1240 nioptr->flags = pga[mapped].flag;
1243 size[1] = page_count * sizeof(*nioptr);
1244 request->rq_replen = lustre_msg_size(2, size);
1246 rc = ptlrpc_queue_wait(request);
1250 body = lustre_swab_repbuf(request, 0, sizeof(*body),
1251 lustre_swab_ost_body);
1253 CERROR("Can't unpack body\n");
1254 GOTO(out_req, rc = -EPROTO);
1257 memcpy(oa, &body->oa, sizeof(*oa));
1259 swab = lustre_msg_swabbed(request->rq_repmsg);
1260 LASSERT_REPSWAB(request, 1);
1261 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
1263 /* nioptr missing or short */
1264 GOTO(out_req, rc = -EPROTO);
1268 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
1269 struct page *page = pga[mapped].pg;
1270 struct buffer_head *bh;
1274 lustre_swab_niobuf_remote (nioptr);
1276 /* got san device associated */
1277 LASSERT(class_conn2obd(conn));
1278 dev = class_conn2obd(conn)->u.cli.cl_sandev;
1281 if (!nioptr->offset) {
1282 CDEBUG(D_PAGE, "hole at ino %lu; index %ld\n",
1283 page->mapping->host->i_ino,
1285 memset(page_address(page), 0, PAGE_SIZE);
1289 if (!page->buffers) {
1290 create_empty_buffers(page, dev, PAGE_SIZE);
1293 clear_bit(BH_New, &bh->b_state);
1294 set_bit(BH_Mapped, &bh->b_state);
1295 bh->b_blocknr = (unsigned long)nioptr->offset;
1297 clear_bit(BH_Uptodate, &bh->b_state);
1299 ll_rw_block(READ, 1, &bh);
1303 /* if buffer already existed, it must be the
1304 * one we mapped before, check it */
1305 LASSERT(!test_bit(BH_New, &bh->b_state));
1306 LASSERT(test_bit(BH_Mapped, &bh->b_state));
1307 LASSERT(bh->b_blocknr == (unsigned long)nioptr->offset);
1309 /* wait it's io completion */
1310 if (test_bit(BH_Lock, &bh->b_state))
1313 if (!test_bit(BH_Uptodate, &bh->b_state))
1314 ll_rw_block(READ, 1, &bh);
1318 /* must do syncronous write here */
1320 if (!buffer_uptodate(bh)) {
1328 ptlrpc_req_finished(request);
1332 static int sanosc_brw_write(struct lustre_handle *conn, struct obdo *oa,
1333 struct lov_stripe_md *lsm, obd_count page_count,
1334 struct brw_page *pga)
1336 struct ptlrpc_request *request = NULL;
1337 struct ost_body *body;
1338 struct niobuf_remote *nioptr;
1339 struct obd_ioobj *iooptr;
1340 int rc, size[3] = {sizeof(*body)}, mapped = 0;
1344 size[1] = sizeof(struct obd_ioobj);
1345 size[2] = page_count * sizeof(*nioptr);
1347 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SAN_WRITE,
1352 body = lustre_msg_buf(request->rq_reqmsg, 0, sizeof (*body));
1353 iooptr = lustre_msg_buf(request->rq_reqmsg, 1, sizeof (*iooptr));
1354 nioptr = lustre_msg_buf(request->rq_reqmsg, 2,
1355 sizeof (*nioptr) * page_count);
1357 memcpy(&body->oa, oa, sizeof(body->oa));
1359 iooptr->ioo_id = oa->o_id;
1360 iooptr->ioo_gr = oa->o_valid & 0 ? oa->o_gr : 0;
1361 iooptr->ioo_type = oa->o_mode;
1362 iooptr->ioo_bufcnt = page_count;
1365 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
1366 LASSERT(PageLocked(pga[mapped].pg));
1367 LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
1369 nioptr->offset = pga[mapped].off;
1370 nioptr->len = pga[mapped].count;
1371 nioptr->flags = pga[mapped].flag;
1374 size[1] = page_count * sizeof(*nioptr);
1375 request->rq_replen = lustre_msg_size(2, size);
1377 rc = ptlrpc_queue_wait(request);
1381 swab = lustre_msg_swabbed (request->rq_repmsg);
1382 LASSERT_REPSWAB (request, 1);
1383 nioptr = lustre_msg_buf(request->rq_repmsg, 1, size[1]);
1385 CERROR("absent/short niobuf array\n");
1386 GOTO(out_req, rc = -EPROTO);
1390 for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
1391 struct page *page = pga[mapped].pg;
1392 struct buffer_head *bh;
1396 lustre_swab_niobuf_remote (nioptr);
1398 /* got san device associated */
1399 LASSERT(class_conn2obd(conn));
1400 dev = class_conn2obd(conn)->u.cli.cl_sandev;
1402 if (!page->buffers) {
1403 create_empty_buffers(page, dev, PAGE_SIZE);
1406 LASSERT(!test_bit(BH_New, &page->buffers->b_state));
1407 LASSERT(test_bit(BH_Mapped, &page->buffers->b_state));
1408 LASSERT(page->buffers->b_blocknr ==
1409 (unsigned long)nioptr->offset);
1415 /* if buffer locked, wait it's io completion */
1416 if (test_bit(BH_Lock, &bh->b_state))
1419 clear_bit(BH_New, &bh->b_state);
1420 set_bit(BH_Mapped, &bh->b_state);
1422 /* override the block nr */
1423 bh->b_blocknr = (unsigned long)nioptr->offset;
1425 /* we are about to write it, so set it
1427 * page lock should garentee no race condition here */
1428 set_bit(BH_Uptodate, &bh->b_state);
1429 set_bit(BH_Dirty, &bh->b_state);
1431 ll_rw_block(WRITE, 1, &bh);
1433 /* must do syncronous write here */
1435 if (!buffer_uptodate(bh) || test_bit(BH_Dirty, &bh->b_state)) {
1443 ptlrpc_req_finished(request);
1447 static int sanosc_brw(int cmd, struct lustre_handle *conn, struct obdo *oa,
1448 struct lov_stripe_md *lsm, obd_count page_count,
1449 struct brw_page *pga, struct obd_trans_info *oti)
1453 while (page_count) {
1454 obd_count pages_per_brw;
1457 if (page_count > OSC_BRW_MAX_IOV)
1458 pages_per_brw = OSC_BRW_MAX_IOV;
1460 pages_per_brw = page_count;
1462 if (cmd & OBD_BRW_WRITE)
1463 rc = sanosc_brw_write(conn, oa, lsm, pages_per_brw,pga);
1465 rc = sanosc_brw_read(conn, oa, lsm, pages_per_brw, pga);
1470 page_count -= pages_per_brw;
1471 pga += pages_per_brw;
1478 static int osc_mark_page_dirty(struct lustre_handle *conn,
1479 struct lov_stripe_md *lsm, unsigned long offset)
1481 struct client_obd *cli = &class_conn2obd(conn)->u.cli;
1482 struct otree *dirty_ot = lsm->lsm_oinfo[0].loi_dirty_ot;
1486 down(&cli->cl_dirty_sem);
1489 if (cli->cl_ost_can_grant &&
1490 (cli->cl_dirty + PAGE_CACHE_SIZE >= cli->cl_dirty_granted)) {
1491 CDEBUG(D_INODE, "granted "LPU64" < "LPU64"\n",
1492 cli->cl_dirty_granted, cli->cl_dirty + PAGE_CACHE_SIZE);
1493 GOTO(out, rc = -EDQUOT);
1497 rc = ot_mark_offset(dirty_ot, offset);
1501 cli->cl_dirty += PAGE_CACHE_SIZE;
1502 CDEBUG(D_INODE, "dirtied off %lu, now "LPU64" bytes dirty\n",
1503 offset, cli->cl_dirty);
1505 up(&cli->cl_dirty_sem);
1509 static int osc_clear_dirty_pages(struct lustre_handle *conn,
1510 struct lov_stripe_md *lsm,
1511 unsigned long start, unsigned long end,
1512 unsigned long *cleared)
1514 struct client_obd *cli = &class_conn2obd(conn)->u.cli;
1515 struct otree *dirty_ot = lsm->lsm_oinfo[0].loi_dirty_ot;
1516 unsigned long old_marked, new_marked;
1520 down(&cli->cl_dirty_sem);
1522 old_marked = ot_num_marked(dirty_ot);
1524 rc = ot_clear_extent(dirty_ot, start, end);
1528 new_marked = ot_num_marked(dirty_ot);
1530 LASSERT(new_marked <= old_marked);
1531 LASSERT(old_marked * PAGE_CACHE_SIZE <= cli->cl_dirty);
1532 *cleared = old_marked - new_marked;
1533 cli->cl_dirty -= (__u64)*cleared << PAGE_CACHE_SHIFT;
1534 CDEBUG(D_INODE, "cleared [%lu,%lu], now "LPU64" bytes dirty\n",
1535 start, end, cli->cl_dirty);
1538 up(&cli->cl_dirty_sem);
1542 static int osc_last_dirty_offset(struct lustre_handle *conn,
1543 struct lov_stripe_md *lsm,
1544 unsigned long *offset)
1546 struct otree *dirty_ot = lsm->lsm_oinfo[0].loi_dirty_ot;
1550 rc = ot_last_marked(dirty_ot, offset);
1554 static int osc_enqueue(struct lustre_handle *connh, struct lov_stripe_md *lsm,
1555 struct lustre_handle *parent_lock,
1556 __u32 type, void *extentp, int extent_len, __u32 mode,
1557 int *flags, void *callback, void *data,
1558 struct lustre_handle *lockh)
1560 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
1561 struct obd_device *obd = class_conn2obd(connh);
1562 struct ldlm_extent *extent = extentp;
1566 /* Filesystem lock extents are extended to page boundaries so that
1567 * dealing with the page cache is a little smoother. */
1568 extent->start -= extent->start & ~PAGE_MASK;
1569 extent->end |= ~PAGE_MASK;
1571 /* Next, search for already existing extent locks that will cover us */
1572 rc = ldlm_lock_match(obd->obd_namespace, LDLM_FL_MATCH_DATA, &res_id,
1573 type, extent, sizeof(extent), mode, data, lockh);
1575 /* We already have a lock, and it's referenced */
1578 /* If we're trying to read, we also search for an existing PW lock. The
1579 * VFS and page cache already protect us locally, so lots of readers/
1580 * writers can share a single PW lock.
1582 * There are problems with conversion deadlocks, so instead of
1583 * converting a read lock to a write lock, we'll just enqueue a new
1586 * At some point we should cancel the read lock instead of making them
1587 * send us a blocking callback, but there are problems with canceling
1588 * locks out from other users right now, too. */
1590 if (mode == LCK_PR) {
1591 rc = ldlm_lock_match(obd->obd_namespace, LDLM_FL_MATCH_DATA,
1592 &res_id, type, extent, sizeof(extent),
1593 LCK_PW, data, lockh);
1595 /* FIXME: This is not incredibly elegant, but it might
1596 * be more elegant than adding another parameter to
1597 * lock_match. I want a second opinion. */
1598 ldlm_lock_addref(lockh, LCK_PR);
1599 ldlm_lock_decref(lockh, LCK_PW);
1605 rc = ldlm_cli_enqueue(connh, NULL, obd->obd_namespace, parent_lock,
1606 res_id, type, extent, sizeof(extent), mode, flags,
1607 ldlm_completion_ast, callback, data, lockh);
1611 static int osc_match(struct lustre_handle *connh, struct lov_stripe_md *lsm,
1612 __u32 type, void *extentp, int extent_len, __u32 mode,
1613 int *flags, void *data, struct lustre_handle *lockh)
1615 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
1616 struct obd_device *obd = class_conn2obd(connh);
1617 struct ldlm_extent *extent = extentp;
1621 /* Filesystem lock extents are extended to page boundaries so that
1622 * dealing with the page cache is a little smoother */
1623 extent->start -= extent->start & ~PAGE_MASK;
1624 extent->end |= ~PAGE_MASK;
1626 /* Next, search for already existing extent locks that will cover us */
1627 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
1628 extent, sizeof(extent), mode, data, lockh);
1632 /* If we're trying to read, we also search for an existing PW lock. The
1633 * VFS and page cache already protect us locally, so lots of readers/
1634 * writers can share a single PW lock. */
1635 if (mode == LCK_PR) {
1636 rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id,
1637 type, extent, sizeof(extent), LCK_PW,
1640 /* FIXME: This is not incredibly elegant, but it might
1641 * be more elegant than adding another parameter to
1642 * lock_match. I want a second opinion. */
1643 ldlm_lock_addref(lockh, LCK_PR);
1644 ldlm_lock_decref(lockh, LCK_PW);
1650 static int osc_cancel(struct lustre_handle *oconn, struct lov_stripe_md *md,
1651 __u32 mode, struct lustre_handle *lockh)
1655 ldlm_lock_decref(lockh, mode);
1660 static int osc_cancel_unused(struct lustre_handle *connh,
1661 struct lov_stripe_md *lsm, int flags, void *opaque)
1663 struct obd_device *obd = class_conn2obd(connh);
1664 struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
1666 return ldlm_cli_cancel_unused(obd->obd_namespace, &res_id, flags,
1670 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1671 unsigned long max_age)
1673 struct obd_statfs *msfs;
1674 struct ptlrpc_request *request;
1675 int rc, size = sizeof(*osfs);
1678 /* We could possibly pass max_age in the request (as an absolute
1679 * timestamp or a "seconds.usec ago") so the target can avoid doing
1680 * extra calls into the filesystem if that isn't necessary (e.g.
1681 * during mount that would help a bit). Having relative timestamps
1682 * is not so great if request processing is slow, while absolute
1683 * timestamps are not ideal because they need time synchronization. */
1684 request = ptlrpc_prep_req(obd->u.cli.cl_import, OST_STATFS,0,NULL,NULL);
1688 request->rq_replen = lustre_msg_size(1, &size);
1690 rc = ptlrpc_queue_wait(request);
1692 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
1696 msfs = lustre_swab_repbuf(request, 0, sizeof(*msfs),
1697 lustre_swab_obd_statfs);
1699 CERROR("Can't unpack obd_statfs\n");
1700 GOTO(out, rc = -EPROTO);
1703 memcpy(osfs, msfs, sizeof(*osfs));
1707 ptlrpc_req_finished(request);
1711 /* Retrieve object striping information.
1713 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
1714 * the maximum number of OST indices which will fit in the user buffer.
1715 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
1717 static int osc_getstripe(struct lustre_handle *conn, struct lov_stripe_md *lsm,
1718 struct lov_mds_md *lmmu)
1720 struct lov_mds_md lmm, *lmmk;
1727 rc = copy_from_user(&lmm, lmmu, sizeof(lmm));
1731 if (lmm.lmm_magic != LOV_MAGIC)
1734 if (lmm.lmm_ost_count < 1)
1737 lmm_size = sizeof(lmm) + sizeof(lmm.lmm_objects[0]);
1738 OBD_ALLOC(lmmk, lmm_size);
1742 lmmk->lmm_stripe_count = 1;
1743 lmmk->lmm_ost_count = 1;
1744 lmmk->lmm_object_id = lsm->lsm_object_id;
1745 lmmk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
1747 if (copy_to_user(lmmu, lmmk, lmm_size))
1750 OBD_FREE(lmmk, lmm_size);
1755 static int osc_iocontrol(unsigned int cmd, struct lustre_handle *conn, int len,
1756 void *karg, void *uarg)
1758 struct obd_device *obd = class_conn2obd(conn);
1759 struct obd_ioctl_data *data = karg;
1764 case IOC_OSC_REGISTER_LOV: {
1765 if (obd->u.cli.cl_containing_lov)
1766 GOTO(out, err = -EALREADY);
1767 obd->u.cli.cl_containing_lov = (struct obd_device *)karg;
1770 case OBD_IOC_LOV_GET_CONFIG: {
1772 struct lov_desc *desc;
1773 struct obd_uuid uuid;
1777 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
1778 GOTO(out, err = -EINVAL);
1780 data = (struct obd_ioctl_data *)buf;
1782 if (sizeof(*desc) > data->ioc_inllen1) {
1784 GOTO(out, err = -EINVAL);
1787 if (data->ioc_inllen2 < sizeof(uuid)) {
1789 GOTO(out, err = -EINVAL);
1792 desc = (struct lov_desc *)data->ioc_inlbuf1;
1793 desc->ld_tgt_count = 1;
1794 desc->ld_active_tgt_count = 1;
1795 desc->ld_default_stripe_count = 1;
1796 desc->ld_default_stripe_size = 0;
1797 desc->ld_default_stripe_offset = 0;
1798 desc->ld_pattern = 0;
1799 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
1801 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
1803 err = copy_to_user((void *)uarg, buf, len);
1806 obd_ioctl_freedata(buf, len);
1809 case LL_IOC_LOV_SETSTRIPE:
1810 err = obd_alloc_memmd(conn, karg);
1814 case LL_IOC_LOV_GETSTRIPE:
1815 err = osc_getstripe(conn, karg, uarg);
1817 case OBD_IOC_CLIENT_RECOVER:
1818 err = ptlrpc_recover_import(obd->u.cli.cl_import,
1821 case IOC_OSC_SET_ACTIVE:
1822 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
1826 CERROR("unrecognised ioctl %#x by %s\n", cmd, current->comm);
1827 GOTO(out, err = -ENOTTY);
1833 static int osc_get_info(struct lustre_handle *conn, obd_count keylen,
1834 void *key, __u32 *vallen, void *val)
1837 if (!vallen || !val)
1840 if (keylen > strlen("lock_to_stripe") &&
1841 strcmp(key, "lock_to_stripe") == 0) {
1842 __u32 *stripe = val;
1843 *vallen = sizeof(*stripe);
1850 static int osc_set_info(struct lustre_handle *conn, obd_count keylen,
1851 void *key, obd_count vallen, void *val)
1853 struct ptlrpc_request *req;
1854 int rc, size = keylen;
1855 char *bufs[1] = {key};
1858 if (keylen < strlen("mds_conn") ||
1859 memcmp(key, "mds_conn", strlen("mds_conn")) != 0)
1862 req = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SET_INFO, 1,
1867 req->rq_replen = lustre_msg_size(0, NULL);
1868 rc = ptlrpc_queue_wait(req);
1869 ptlrpc_req_finished(req);
1873 static int osc_log_cancel(struct lustre_handle *conn, struct lov_stripe_md *lsm,
1874 int count, struct llog_cookie *cookies, int flags)
1876 struct obd_device *obd = class_conn2obd(conn);
1877 struct llog_commit_data *llcd;
1878 struct client_obd *cli;
1883 if ((count == 0 || cookies == NULL ||
1884 memcmp(cookies, &zero_cookie, sizeof(*cookies)) == 0)) {
1886 if (cli->cl_llcd == NULL || !(flags & OBD_LLOG_FL_SENDNOW))
1889 llcd = cli->cl_llcd;
1894 llcd = cli->cl_llcd;
1898 CERROR("couldn't get an llcd - dropped "LPX64":%x+%u\n",
1899 cookies->lgc_lgl.lgl_oid,
1900 cookies->lgc_lgl.lgl_ogen, cookies->lgc_index);
1901 GOTO(out, rc = -ENOMEM);
1903 llcd->llcd_import = cli->cl_import;
1904 cli->cl_llcd = llcd;
1907 memcpy(llcd->llcd_cookies + llcd->llcd_cookiebytes, cookies,
1909 llcd->llcd_cookiebytes += sizeof(*cookies);
1911 /* If we can't fit any more cookies into the page, we need to send it */
1913 if ((PAGE_SIZE - llcd->llcd_cookiebytes < sizeof(*cookies) ||
1914 flags & OBD_LLOG_FL_SENDNOW)) {
1915 cli->cl_llcd = NULL;
1924 static int osc_disconnect(struct lustre_handle *conn, int flags)
1926 struct obd_device *obd = class_conn2obd(conn);
1928 /* flush any remaining cancel messages out to the target */
1929 if (obd->u.cli.cl_llcd)
1930 osc_log_cancel(conn, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
1932 return client_import_disconnect(conn, flags);
1935 static int osc_log_add(struct lustre_handle *conn,
1936 struct llog_handle *cathandle,
1937 struct llog_trans_hdr *rec, struct lov_stripe_md *lsm,
1938 struct llog_cookie *logcookies, int numcookies)
1941 LASSERT(logcookies && numcookies > 0);
1943 llog_add_record(cathandle, rec, logcookies);
1948 struct obd_ops osc_obd_ops = {
1949 o_owner: THIS_MODULE,
1950 o_attach: osc_attach,
1951 o_detach: osc_detach,
1952 o_setup: client_obd_setup,
1953 o_cleanup: client_obd_cleanup,
1954 o_connect: client_import_connect,
1955 o_disconnect: osc_disconnect,
1956 o_statfs: osc_statfs,
1957 o_packmd: osc_packmd,
1958 o_unpackmd: osc_unpackmd,
1959 o_create: osc_create,
1960 o_destroy: osc_destroy,
1961 o_getattr: osc_getattr,
1962 o_getattr_async:osc_getattr_async,
1963 o_setattr: osc_setattr,
1967 o_brw_async: osc_brw_async,
1969 o_enqueue: osc_enqueue,
1971 o_cancel: osc_cancel,
1972 o_cancel_unused:osc_cancel_unused,
1973 o_iocontrol: osc_iocontrol,
1974 o_get_info: osc_get_info,
1975 o_set_info: osc_set_info,
1976 o_log_cancel: osc_log_cancel,
1977 o_log_add: osc_log_add,
1978 o_mark_page_dirty: osc_mark_page_dirty,
1979 o_clear_dirty_pages: osc_clear_dirty_pages,
1980 o_last_dirty_offset: osc_last_dirty_offset,
1983 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1984 struct obd_ops sanosc_obd_ops = {
1985 o_owner: THIS_MODULE,
1986 o_attach: osc_attach,
1987 o_detach: osc_detach,
1988 o_cleanup: client_obd_cleanup,
1989 o_connect: client_import_connect,
1990 o_disconnect: client_import_disconnect,
1991 o_statfs: osc_statfs,
1992 o_packmd: osc_packmd,
1993 o_unpackmd: osc_unpackmd,
1994 o_create: osc_create,
1995 o_destroy: osc_destroy,
1996 o_getattr: osc_getattr,
1997 o_getattr_async: osc_getattr_async,
1998 o_setattr: osc_setattr,
2001 o_setup: client_sanobd_setup,
2004 o_enqueue: osc_enqueue,
2006 o_cancel: osc_cancel,
2007 o_cancel_unused: osc_cancel_unused,
2008 o_iocontrol: osc_iocontrol,
2009 o_log_cancel: osc_log_cancel,
2010 o_log_add: osc_log_add,
2011 o_mark_page_dirty: osc_mark_page_dirty,
2012 o_clear_dirty_pages: osc_clear_dirty_pages,
2013 o_last_dirty_offset: osc_last_dirty_offset,
2017 int __init osc_init(void)
2019 struct lprocfs_static_vars lvars, sanlvars;
2023 LASSERT(sizeof(struct obd_client_handle) <= FD_OSTDATA_SIZE);
2024 LASSERT(sizeof(struct obd_client_handle) <= OBD_INLINESZ);
2026 lprocfs_init_vars(osc,&lvars);
2027 lprocfs_init_vars(osc,&sanlvars);
2029 rc = class_register_type(&osc_obd_ops, lvars.module_vars,
2034 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2035 rc = class_register_type(&sanosc_obd_ops, sanlvars.module_vars,
2036 LUSTRE_SANOSC_NAME);
2038 class_unregister_type(LUSTRE_OSC_NAME);
2044 static void /*__exit*/ osc_exit(void)
2046 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
2047 class_unregister_type(LUSTRE_SANOSC_NAME);
2049 class_unregister_type(LUSTRE_OSC_NAME);
2053 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2054 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
2055 MODULE_LICENSE("GPL");
2057 module_init(osc_init);
2058 module_exit(osc_exit);