1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
6 * This code is issued under the GNU General Public License.
7 * See the file COPYING in this distribution
9 * Author Peter Braam <braam@clusterfs.com>
11 * This server is single threaded at present (but can easily be multi
12 * threaded). For testing and management it is treated as an
13 * obd_device, although it does not export a full OBD method table
14 * (the requests are coming in over the wire, so object target
15 * modules do not have a full method table.)
20 #define DEBUG_SUBSYSTEM S_OSC
22 #include <linux/version.h>
23 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/lustre_dlm.h>
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
28 #include <linux/workqueue.h>
30 #include <linux/kp30.h>
31 #include <linux/lustre_mds.h> /* for mds_objid */
32 #include <linux/obd_ost.h>
33 #include <linux/obd_lov.h>
34 #include <linux/ctype.h>
35 #include <linux/init.h>
36 #include <linux/lustre_ha.h>
37 #include <linux/obd_support.h> /* for OBD_FAIL_CHECK */
38 #include <linux/lustre_lite.h> /* for ll_i2info */
39 #include <portals/lib-types.h> /* for PTL_MD_MAX_IOV */
40 #include <linux/lprocfs_status.h>
42 extern lprocfs_vars_t status_var_nm_1[];
43 extern lprocfs_vars_t status_class_var[];
45 static int osc_getattr(struct lustre_handle *conn, struct obdo *oa,
46 struct lov_stripe_md *md)
48 struct ptlrpc_request *request;
49 struct ost_body *body;
50 int rc, size = sizeof(*body);
53 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_GETATTR, 1,
58 body = lustre_msg_buf(request->rq_reqmsg, 0);
59 #warning FIXME: pack only valid fields instead of memcpy, endianness
60 memcpy(&body->oa, oa, sizeof(*oa));
62 request->rq_replen = lustre_msg_size(1, &size);
64 rc = ptlrpc_queue_wait(request);
65 rc = ptlrpc_check_status(request, rc);
67 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
71 body = lustre_msg_buf(request->rq_repmsg, 0);
72 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
74 memcpy(oa, &body->oa, sizeof(*oa));
78 ptlrpc_req_finished(request);
82 static int osc_open(struct lustre_handle *conn, struct obdo *oa,
83 struct lov_stripe_md *md)
85 struct ptlrpc_request *request;
86 struct ost_body *body;
87 int rc, size = sizeof(*body);
90 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_OPEN, 1, &size,
95 body = lustre_msg_buf(request->rq_reqmsg, 0);
96 #warning FIXME: pack only valid fields instead of memcpy, endianness
97 memcpy(&body->oa, oa, sizeof(*oa));
99 request->rq_replen = lustre_msg_size(1, &size);
101 rc = ptlrpc_queue_wait(request);
102 rc = ptlrpc_check_status(request, rc);
106 body = lustre_msg_buf(request->rq_repmsg, 0);
107 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
109 memcpy(oa, &body->oa, sizeof(*oa));
113 ptlrpc_req_finished(request);
117 static int osc_close(struct lustre_handle *conn, struct obdo *oa,
118 struct lov_stripe_md *md)
120 struct ptlrpc_request *request;
121 struct ost_body *body;
122 int rc, size = sizeof(*body);
125 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CLOSE, 1, &size,
130 body = lustre_msg_buf(request->rq_reqmsg, 0);
131 #warning FIXME: pack only valid fields instead of memcpy, endianness
132 memcpy(&body->oa, oa, sizeof(*oa));
134 request->rq_replen = lustre_msg_size(1, &size);
136 rc = ptlrpc_queue_wait(request);
137 rc = ptlrpc_check_status(request, rc);
141 body = lustre_msg_buf(request->rq_repmsg, 0);
142 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
144 memcpy(oa, &body->oa, sizeof(*oa));
148 ptlrpc_req_finished(request);
152 static int osc_setattr(struct lustre_handle *conn, struct obdo *oa,
153 struct lov_stripe_md *md)
155 struct ptlrpc_request *request;
156 struct ost_body *body;
157 int rc, size = sizeof(*body);
160 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SETATTR, 1,
165 body = lustre_msg_buf(request->rq_reqmsg, 0);
166 memcpy(&body->oa, oa, sizeof(*oa));
168 request->rq_replen = lustre_msg_size(1, &size);
170 rc = ptlrpc_queue_wait(request);
171 rc = ptlrpc_check_status(request, rc);
173 ptlrpc_req_finished(request);
177 static int osc_create(struct lustre_handle *conn, struct obdo *oa,
178 struct lov_stripe_md **ea)
180 struct ptlrpc_request *request;
181 struct ost_body *body;
182 struct lov_stripe_md *lsm;
183 int rc, size = sizeof(*body);
191 // XXX check oa->o_valid & OBD_MD_FLEASIZE first...
192 OBD_ALLOC(lsm, oa->o_easize);
195 lsm->lsm_mds_easize = oa->o_easize;
198 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CREATE, 1, &size,
201 GOTO(out, rc = -ENOMEM);
203 body = lustre_msg_buf(request->rq_reqmsg, 0);
204 memcpy(&body->oa, oa, sizeof(*oa));
206 request->rq_replen = lustre_msg_size(1, &size);
208 rc = ptlrpc_queue_wait(request);
209 rc = ptlrpc_check_status(request, rc);
213 body = lustre_msg_buf(request->rq_repmsg, 0);
214 memcpy(oa, &body->oa, sizeof(*oa));
216 lsm->lsm_object_id = oa->o_id;
217 lsm->lsm_stripe_count = 0;
221 ptlrpc_req_finished(request);
224 OBD_FREE(lsm, oa->o_easize);
228 static int osc_punch(struct lustre_handle *conn, struct obdo *oa,
229 struct lov_stripe_md *md, obd_size start,
232 struct ptlrpc_request *request;
233 struct ost_body *body;
234 int rc, size = sizeof(*body);
242 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_PUNCH, 1, &size,
247 body = lustre_msg_buf(request->rq_reqmsg, 0);
248 #warning FIXME: pack only valid fields instead of memcpy, endianness, valid
249 memcpy(&body->oa, oa, sizeof(*oa));
251 /* overload the size and blocks fields in the oa with start/end */
252 body->oa.o_size = HTON__u64(start);
253 body->oa.o_blocks = HTON__u64(end);
254 body->oa.o_valid |= HTON__u32(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
256 request->rq_replen = lustre_msg_size(1, &size);
258 rc = ptlrpc_queue_wait(request);
259 rc = ptlrpc_check_status(request, rc);
263 body = lustre_msg_buf(request->rq_repmsg, 0);
264 memcpy(oa, &body->oa, sizeof(*oa));
268 ptlrpc_req_finished(request);
272 static int osc_destroy(struct lustre_handle *conn, struct obdo *oa,
273 struct lov_stripe_md *ea)
275 struct ptlrpc_request *request;
276 struct ost_body *body;
277 int rc, size = sizeof(*body);
284 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_DESTROY, 1,
289 body = lustre_msg_buf(request->rq_reqmsg, 0);
290 #warning FIXME: pack only valid fields instead of memcpy, endianness
291 memcpy(&body->oa, oa, sizeof(*oa));
293 request->rq_replen = lustre_msg_size(1, &size);
295 rc = ptlrpc_queue_wait(request);
296 rc = ptlrpc_check_status(request, rc);
300 body = lustre_msg_buf(request->rq_repmsg, 0);
301 memcpy(oa, &body->oa, sizeof(*oa));
305 ptlrpc_req_finished(request);
309 struct osc_brw_cb_data {
310 brw_callback_t callback;
316 /* Our bulk-unmapping bottom half. */
317 static void unmap_and_decref_bulk_desc(void *data)
319 struct ptlrpc_bulk_desc *desc = data;
320 struct list_head *tmp;
323 /* This feels wrong to me. */
324 list_for_each(tmp, &desc->bd_page_list) {
325 struct ptlrpc_bulk_page *bulk;
326 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
328 kunmap(bulk->bp_page);
331 ptlrpc_bulk_decref(desc);
335 static void brw_finish(struct ptlrpc_bulk_desc *desc, void *data)
337 struct osc_brw_cb_data *cb_data = data;
341 if (desc->bd_flags & PTL_RPC_FL_TIMEOUT) {
342 err = (desc->bd_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
346 if (cb_data->callback)
347 cb_data->callback(cb_data->cb_data, err, CB_PHASE_FINISH);
349 if (cb_data->obd_data)
350 OBD_FREE(cb_data->obd_data, cb_data->obd_size);
351 OBD_FREE(cb_data, sizeof(*cb_data));
353 /* We can't kunmap the desc from interrupt context, so we do it from
354 * the bottom half above. */
355 prepare_work(&desc->bd_queue, unmap_and_decref_bulk_desc, desc);
356 schedule_work(&desc->bd_queue);
361 static int osc_brw_read(struct lustre_handle *conn, struct lov_stripe_md *lsm,
362 obd_count page_count, struct brw_page *pga,
363 brw_callback_t callback, struct io_cb_data *data)
365 struct ptlrpc_connection *connection =
366 client_conn2cli(conn)->cl_import.imp_connection;
367 struct ptlrpc_request *request = NULL;
368 struct ptlrpc_bulk_desc *desc = NULL;
369 struct ost_body *body;
370 struct osc_brw_cb_data *cb_data = NULL;
371 int rc, size[3] = {sizeof(*body)};
372 void *iooptr, *nioptr;
377 size[1] = sizeof(struct obd_ioobj);
378 size[2] = page_count * sizeof(struct niobuf_remote);
380 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_READ, 3, size,
385 body = lustre_msg_buf(request->rq_reqmsg, 0);
387 desc = ptlrpc_prep_bulk(connection);
389 GOTO(out_req, rc = -ENOMEM);
390 desc->bd_portal = OST_BULK_PORTAL;
391 desc->bd_cb = brw_finish;
392 OBD_ALLOC(cb_data, sizeof(*cb_data));
394 GOTO(out_desc, rc = -ENOMEM);
396 cb_data->callback = callback;
397 cb_data->cb_data = data;
398 CDEBUG(D_PAGE, "data(%p)->desc = %p\n", data, desc);
400 desc->bd_cb_data = cb_data;
402 iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
403 nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
404 ost_pack_ioo(&iooptr, lsm, page_count);
405 /* end almost identical to brw_write case */
407 spin_lock(&connection->c_lock);
408 xid = ++connection->c_xid_out; /* single xid for all pages */
409 spin_unlock(&connection->c_lock);
411 for (mapped = 0; mapped < page_count; mapped++) {
412 struct ptlrpc_bulk_page *bulk = ptlrpc_prep_bulk_page(desc);
414 GOTO(out_unmap, rc = -ENOMEM);
416 bulk->bp_xid = xid; /* single xid for all pages */
418 bulk->bp_buf = kmap(pga[mapped].pg);
419 bulk->bp_page = pga[mapped].pg;
420 bulk->bp_buflen = PAGE_SIZE;
421 ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
422 pga[mapped].flag, bulk->bp_xid);
426 * Register the bulk first, because the reply could arrive out of order,
427 * and we want to be ready for the bulk data.
429 * The reference is released when brw_finish is complete.
431 * On error, we never do the brw_finish, so we handle all decrefs.
433 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_READ_BULK)) {
434 CERROR("obd_fail_loc=%x, skipping register_bulk\n",
435 OBD_FAIL_OSC_BRW_READ_BULK);
437 rc = ptlrpc_register_bulk(desc);
442 request->rq_replen = lustre_msg_size(1, size);
443 rc = ptlrpc_queue_wait(request);
444 rc = ptlrpc_check_status(request, rc);
447 * XXX: If there is an error during the processing of the callback,
448 * such as a timeout in a sleep that it performs, brw_finish
449 * will never get called, and we'll leak the desc, fail to kunmap
450 * things, cats will live with dogs. One solution would be to
451 * export brw_finish as osc_brw_finish, so that the timeout case
452 * and its kin could call it for proper cleanup. An alternative
453 * would be for an error return from the callback to cause us to
454 * clean up, but that doesn't help the truly async cases (like
455 * LOV), which will immediately return from their PHASE_START
456 * callback, before any such cleanup-requiring error condition can
462 /* Callbacks cause asynchronous handling. */
463 rc = callback(data, 0, CB_PHASE_START);
466 ptlrpc_req_finished(request);
469 /* Clean up on error. */
472 kunmap(pga[mapped].pg);
473 OBD_FREE(cb_data, sizeof(*cb_data));
475 ptlrpc_bulk_decref(desc);
479 static int osc_brw_write(struct lustre_handle *conn, struct lov_stripe_md *md,
480 obd_count page_count, struct brw_page *pga,
481 brw_callback_t callback, struct io_cb_data *data)
483 struct ptlrpc_connection *connection =
484 client_conn2cli(conn)->cl_import.imp_connection;
485 struct ptlrpc_request *request = NULL;
486 struct ptlrpc_bulk_desc *desc = NULL;
487 struct ost_body *body;
488 struct niobuf_local *local = NULL;
489 struct niobuf_remote *remote;
490 struct osc_brw_cb_data *cb_data = NULL;
491 int rc, j, size[3] = {sizeof(*body)};
492 void *iooptr, *nioptr;
496 size[1] = sizeof(struct obd_ioobj);
497 size[2] = page_count * sizeof(*remote);
499 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_WRITE, 3, size,
504 body = lustre_msg_buf(request->rq_reqmsg, 0);
506 desc = ptlrpc_prep_bulk(connection);
508 GOTO(out_req, rc = -ENOMEM);
509 desc->bd_portal = OSC_BULK_PORTAL;
510 desc->bd_cb = brw_finish;
511 OBD_ALLOC(cb_data, sizeof(*cb_data));
513 GOTO(out_desc, rc = -ENOMEM);
515 cb_data->callback = callback;
516 cb_data->cb_data = data;
517 CDEBUG(D_PAGE, "data(%p)->desc = %p\n", data, desc);
519 desc->bd_cb_data = cb_data;
521 iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
522 nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
523 ost_pack_ioo(&iooptr, md, page_count);
524 /* end almost identical to brw_read case */
526 OBD_ALLOC(local, page_count * sizeof(*local));
528 GOTO(out_cb, rc = -ENOMEM);
530 cb_data->obd_data = local;
531 cb_data->obd_size = page_count * sizeof(*local);
533 for (mapped = 0; mapped < page_count; mapped++) {
534 local[mapped].addr = kmap(pga[mapped].pg);
536 CDEBUG(D_INFO, "kmap(pg) = %p ; pg->flags = %lx ; pg->count = "
537 "%d ; page %d of %d\n",
538 local[mapped].addr, pga[mapped].pg->flags,
539 page_count(pga[mapped].pg),
540 mapped, page_count - 1);
542 local[mapped].offset = pga[mapped].off;
543 local[mapped].len = pga[mapped].count;
544 ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
545 pga[mapped].flag, 0);
548 size[1] = page_count * sizeof(*remote);
549 request->rq_replen = lustre_msg_size(2, size);
550 rc = ptlrpc_queue_wait(request);
551 rc = ptlrpc_check_status(request, rc);
555 nioptr = lustre_msg_buf(request->rq_repmsg, 1);
557 GOTO(out_unmap, rc = -EINVAL);
559 if (request->rq_repmsg->buflens[1] != size[1]) {
560 CERROR("buffer length wrong (%d vs. %d)\n",
561 request->rq_repmsg->buflens[1], size[1]);
562 GOTO(out_unmap, rc = -EINVAL);
565 for (j = 0; j < page_count; j++) {
566 struct ptlrpc_bulk_page *bulk;
568 ost_unpack_niobuf(&nioptr, &remote);
570 bulk = ptlrpc_prep_bulk_page(desc);
572 GOTO(out_unmap, rc = -ENOMEM);
574 bulk->bp_buf = (void *)(unsigned long)local[j].addr;
575 bulk->bp_buflen = local[j].len;
576 bulk->bp_xid = remote->xid;
577 bulk->bp_page = pga[j].pg;
580 if (desc->bd_page_count != page_count)
583 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_WRITE_BULK))
584 GOTO(out_unmap, rc = 0);
586 /* Our reference is released when brw_finish is complete. */
587 rc = ptlrpc_send_bulk(desc);
589 /* XXX: Mike, same question as in osc_brw_read. */
593 /* Callbacks cause asynchronous handling. */
594 rc = callback(data, 0, CB_PHASE_START);
597 ptlrpc_req_finished(request);
600 /* Clean up on error. */
603 kunmap(pga[mapped].pg);
605 OBD_FREE(local, page_count * sizeof(*local));
607 OBD_FREE(cb_data, sizeof(*cb_data));
609 ptlrpc_bulk_decref(desc);
613 static int osc_brw(int cmd, struct lustre_handle *conn,
614 struct lov_stripe_md *md, obd_count page_count,
615 struct brw_page *pga, brw_callback_t callback,
616 struct io_cb_data *data)
621 obd_count pages_per_brw;
624 if (page_count > PTL_MD_MAX_IOV)
625 pages_per_brw = PTL_MD_MAX_IOV;
627 pages_per_brw = page_count;
629 if (cmd & OBD_BRW_WRITE)
630 rc = osc_brw_write(conn, md, pages_per_brw, pga,
633 rc = osc_brw_read(conn, md, pages_per_brw, pga,
639 page_count -= pages_per_brw;
640 pga += pages_per_brw;
645 static int osc_enqueue(struct lustre_handle *connh, struct lov_stripe_md *lsm,
646 struct lustre_handle *parent_lock,
647 __u32 type, void *extentp, int extent_len, __u32 mode,
648 int *flags, void *callback, void *data, int datalen,
649 struct lustre_handle *lockh)
651 __u64 res_id[RES_NAME_SIZE] = { lsm->lsm_object_id };
652 struct obd_device *obddev = class_conn2obd(connh);
653 struct ldlm_extent *extent = extentp;
657 /* Filesystem locks are given a bit of special treatment: if
658 * this is not a file size lock (which has end == -1), we
659 * fixup the lock to start and end on page boundaries. */
660 if (extent->end != OBD_OBJECT_EOF) {
661 extent->start &= PAGE_MASK;
662 extent->end = (extent->end + PAGE_SIZE - 1) & PAGE_MASK;
665 /* Next, search for already existing extent locks that will cover us */
666 rc = ldlm_lock_match(obddev->obd_namespace, res_id, type, extent,
667 sizeof(extent), mode, lockh);
669 /* We already have a lock, and it's referenced */
672 /* If we're trying to read, we also search for an existing PW lock. The
673 * VFS and page cache already protect us locally, so lots of readers/
674 * writers can share a single PW lock.
676 * There are problems with conversion deadlocks, so instead of
677 * converting a read lock to a write lock, we'll just enqueue a new
680 * At some point we should cancel the read lock instead of making them
681 * send us a blocking callback, but there are problems with canceling
682 * locks out from other users right now, too. */
684 if (mode == LCK_PR) {
685 rc = ldlm_lock_match(obddev->obd_namespace, res_id, type,
686 extent, sizeof(extent), LCK_PW, lockh);
688 /* FIXME: This is not incredibly elegant, but it might
689 * be more elegant than adding another parameter to
690 * lock_match. I want a second opinion. */
691 ldlm_lock_addref(lockh, LCK_PR);
692 ldlm_lock_decref(lockh, LCK_PW);
698 rc = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, parent_lock,
699 res_id, type, extent, sizeof(extent), mode, flags,
700 ldlm_completion_ast, callback, data, datalen,
705 static int osc_cancel(struct lustre_handle *oconn, struct lov_stripe_md *md,
706 __u32 mode, struct lustre_handle *lockh)
710 ldlm_lock_decref(lockh, mode);
715 static int osc_cancel_unused(struct lustre_handle *connh,
716 struct lov_stripe_md *lsm, int flags)
718 struct obd_device *obddev = class_conn2obd(connh);
719 __u64 res_id[RES_NAME_SIZE] = { lsm->lsm_object_id };
721 return ldlm_cli_cancel_unused(obddev->obd_namespace, res_id, flags);
724 static int osc_statfs(struct lustre_handle *conn, struct obd_statfs *osfs)
726 struct ptlrpc_request *request;
727 int rc, size = sizeof(*osfs);
730 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_STATFS, 0, NULL,
735 request->rq_replen = lustre_msg_size(1, &size);
737 rc = ptlrpc_queue_wait(request);
738 rc = ptlrpc_check_status(request, rc);
740 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
744 obd_statfs_unpack(osfs, lustre_msg_buf(request->rq_repmsg, 0));
748 ptlrpc_req_finished(request);
752 static int osc_iocontrol(long cmd, struct lustre_handle *conn, int len,
753 void *karg, void *uarg)
755 struct obd_device *obddev = class_conn2obd(conn);
756 struct obd_ioctl_data *data = karg;
761 case IOC_LDLM_TEST: {
762 err = ldlm_test(obddev, conn);
763 CERROR("-- done err %d\n", err);
766 case IOC_LDLM_REGRESS_START: {
767 unsigned int numthreads = 1;
768 unsigned int numheld = 10;
769 unsigned int numres = 10;
770 unsigned int numext = 10;
773 if (data->ioc_inllen1) {
774 parse = data->ioc_inlbuf1;
775 if (*parse != '\0') {
776 while(isspace(*parse)) parse++;
777 numthreads = simple_strtoul(parse, &parse, 0);
778 while(isspace(*parse)) parse++;
780 if (*parse != '\0') {
781 while(isspace(*parse)) parse++;
782 numheld = simple_strtoul(parse, &parse, 0);
783 while(isspace(*parse)) parse++;
785 if (*parse != '\0') {
786 while(isspace(*parse)) parse++;
787 numres = simple_strtoul(parse, &parse, 0);
788 while(isspace(*parse)) parse++;
790 if (*parse != '\0') {
791 while(isspace(*parse)) parse++;
792 numext = simple_strtoul(parse, &parse, 0);
793 while(isspace(*parse)) parse++;
797 err = ldlm_regression_start(obddev, conn, numthreads,
798 numheld, numres, numext);
800 CERROR("-- done err %d\n", err);
803 case IOC_LDLM_REGRESS_STOP: {
804 err = ldlm_regression_stop();
805 CERROR("-- done err %d\n", err);
808 case IOC_OSC_REGISTER_LOV: {
809 if (obddev->u.cli.cl_containing_lov)
810 GOTO(out, err = -EALREADY);
811 obddev->u.cli.cl_containing_lov = (struct obd_device *)karg;
816 GOTO(out, err = -ENOTTY);
822 int osc_attach(struct obd_device *dev,
823 obd_count len, void *data)
826 rc = lprocfs_reg_obd(dev, (lprocfs_vars_t*)status_var_nm_1, (void*)dev);
830 int osc_detach(struct obd_device *dev)
833 rc = lprocfs_dereg_obd(dev);
837 struct obd_ops osc_obd_ops = {
838 o_attach: osc_attach,
839 o_detach: osc_detach,
840 o_setup: client_obd_setup,
841 o_cleanup: client_obd_cleanup,
842 o_statfs: osc_statfs,
843 o_create: osc_create,
844 o_destroy: osc_destroy,
845 o_getattr: osc_getattr,
846 o_setattr: osc_setattr,
849 o_connect: client_obd_connect,
850 o_disconnect: client_obd_disconnect,
853 o_enqueue: osc_enqueue,
854 o_cancel: osc_cancel,
855 o_cancel_unused: osc_cancel_unused,
856 o_iocontrol: osc_iocontrol
859 static int __init osc_init(void)
863 rc = class_register_type(&osc_obd_ops,
864 (lprocfs_vars_t*)status_class_var,
872 static void __exit osc_exit(void)
874 class_unregister_type(LUSTRE_OSC_NAME);
877 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
878 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC) v1.0");
879 MODULE_LICENSE("GPL");
881 module_init(osc_init);
882 module_exit(osc_exit);