1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
6 * This code is issued under the GNU General Public License.
7 * See the file COPYING in this distribution
9 * Author Peter Braam <braam@clusterfs.com>
11 * This server is single threaded at present (but can easily be multi
12 * threaded). For testing and management it is treated as an
13 * obd_device, although it does not export a full OBD method table
14 * (the requests are coming in over the wire, so object target
15 * modules do not have a full method table.)
20 #define DEBUG_SUBSYSTEM S_OSC
22 #include <linux/module.h>
24 #include <linux/highmem.h>
25 #include <linux/lustre_dlm.h>
26 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
27 #include <linux/workqueue.h>
29 #include <linux/kp30.h>
30 #include <linux/lustre_mds.h> /* for mds_objid */
31 #include <linux/obd_ost.h>
32 #include <linux/obd_lov.h>
33 #include <linux/ctype.h>
34 #include <linux/init.h>
35 #include <linux/lustre_ha.h>
36 #include <linux/obd_support.h> /* for OBD_FAIL_CHECK */
37 #include <linux/lustre_lite.h> /* for ll_i2info */
39 static int osc_getattr(struct lustre_handle *conn, struct obdo *oa,
40 struct lov_stripe_md *md)
42 struct ptlrpc_request *request;
43 struct ost_body *body;
44 int rc, size = sizeof(*body);
47 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_GETATTR, 1,
52 body = lustre_msg_buf(request->rq_reqmsg, 0);
53 #warning FIXME: pack only valid fields instead of memcpy, endianness
54 memcpy(&body->oa, oa, sizeof(*oa));
56 request->rq_replen = lustre_msg_size(1, &size);
58 rc = ptlrpc_queue_wait(request);
59 rc = ptlrpc_check_status(request, rc);
61 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
65 body = lustre_msg_buf(request->rq_repmsg, 0);
66 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
68 memcpy(oa, &body->oa, sizeof(*oa));
72 ptlrpc_req_finished(request);
76 static int osc_open(struct lustre_handle *conn, struct obdo *oa,
77 struct lov_stripe_md *md)
79 struct ptlrpc_request *request;
80 struct ost_body *body;
81 int rc, size = sizeof(*body);
84 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_OPEN, 1, &size,
89 body = lustre_msg_buf(request->rq_reqmsg, 0);
90 #warning FIXME: pack only valid fields instead of memcpy, endianness
91 memcpy(&body->oa, oa, sizeof(*oa));
93 request->rq_replen = lustre_msg_size(1, &size);
95 rc = ptlrpc_queue_wait(request);
96 rc = ptlrpc_check_status(request, rc);
100 body = lustre_msg_buf(request->rq_repmsg, 0);
101 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
103 memcpy(oa, &body->oa, sizeof(*oa));
107 ptlrpc_req_finished(request);
111 static int osc_close(struct lustre_handle *conn, struct obdo *oa,
112 struct lov_stripe_md *md)
114 struct ptlrpc_request *request;
115 struct ost_body *body;
116 int rc, size = sizeof(*body);
119 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CLOSE, 1, &size,
124 body = lustre_msg_buf(request->rq_reqmsg, 0);
125 #warning FIXME: pack only valid fields instead of memcpy, endianness
126 memcpy(&body->oa, oa, sizeof(*oa));
128 request->rq_replen = lustre_msg_size(1, &size);
130 rc = ptlrpc_queue_wait(request);
131 rc = ptlrpc_check_status(request, rc);
135 body = lustre_msg_buf(request->rq_repmsg, 0);
136 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
138 memcpy(oa, &body->oa, sizeof(*oa));
142 ptlrpc_req_finished(request);
146 static int osc_setattr(struct lustre_handle *conn, struct obdo *oa,
147 struct lov_stripe_md *md)
149 struct ptlrpc_request *request;
150 struct ost_body *body;
151 int rc, size = sizeof(*body);
154 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SETATTR, 1,
159 body = lustre_msg_buf(request->rq_reqmsg, 0);
160 memcpy(&body->oa, oa, sizeof(*oa));
162 request->rq_replen = lustre_msg_size(1, &size);
164 rc = ptlrpc_queue_wait(request);
165 rc = ptlrpc_check_status(request, rc);
167 ptlrpc_req_finished(request);
171 static int osc_create(struct lustre_handle *conn, struct obdo *oa,
172 struct lov_stripe_md **ea)
174 struct ptlrpc_request *request;
175 struct ost_body *body;
176 struct lov_stripe_md *lsm;
177 int rc, size = sizeof(*body);
185 // XXX check oa->o_valid & OBD_MD_FLEASIZE first...
186 OBD_ALLOC(lsm, oa->o_easize);
189 lsm->lsm_mds_easize = oa->o_easize;
192 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CREATE, 1, &size,
195 GOTO(out, rc = -ENOMEM);
197 body = lustre_msg_buf(request->rq_reqmsg, 0);
198 memcpy(&body->oa, oa, sizeof(*oa));
200 request->rq_replen = lustre_msg_size(1, &size);
202 rc = ptlrpc_queue_wait(request);
203 rc = ptlrpc_check_status(request, rc);
207 body = lustre_msg_buf(request->rq_repmsg, 0);
208 memcpy(oa, &body->oa, sizeof(*oa));
210 lsm->lsm_object_id = oa->o_id;
211 lsm->lsm_stripe_count = 0;
215 ptlrpc_req_finished(request);
218 OBD_FREE(lsm, oa->o_easize);
222 static int osc_punch(struct lustre_handle *conn, struct obdo *oa,
223 struct lov_stripe_md *md, obd_size start,
226 struct ptlrpc_request *request;
227 struct ost_body *body;
228 int rc, size = sizeof(*body);
236 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_PUNCH, 1, &size,
241 body = lustre_msg_buf(request->rq_reqmsg, 0);
242 #warning FIXME: pack only valid fields instead of memcpy, endianness, valid
243 memcpy(&body->oa, oa, sizeof(*oa));
245 /* overload the blocks and size fields in the oa with start/end */
246 #warning FIXME: endianness, size=start, blocks=end?
247 body->oa.o_blocks = start;
248 body->oa.o_size = end;
249 body->oa.o_valid |= OBD_MD_FLBLOCKS | OBD_MD_FLSIZE;
251 request->rq_replen = lustre_msg_size(1, &size);
253 rc = ptlrpc_queue_wait(request);
254 rc = ptlrpc_check_status(request, rc);
258 body = lustre_msg_buf(request->rq_repmsg, 0);
259 memcpy(oa, &body->oa, sizeof(*oa));
263 ptlrpc_req_finished(request);
267 static int osc_destroy(struct lustre_handle *conn, struct obdo *oa,
268 struct lov_stripe_md *ea)
270 struct ptlrpc_request *request;
271 struct ost_body *body;
272 int rc, size = sizeof(*body);
279 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_DESTROY, 1,
284 body = lustre_msg_buf(request->rq_reqmsg, 0);
285 #warning FIXME: pack only valid fields instead of memcpy, endianness
286 memcpy(&body->oa, oa, sizeof(*oa));
288 request->rq_replen = lustre_msg_size(1, &size);
290 rc = ptlrpc_queue_wait(request);
291 rc = ptlrpc_check_status(request, rc);
295 body = lustre_msg_buf(request->rq_repmsg, 0);
296 memcpy(oa, &body->oa, sizeof(*oa));
300 ptlrpc_req_finished(request);
304 struct osc_brw_cb_data {
305 brw_callback_t callback;
311 /* Our bulk-unmapping bottom half. */
312 static void unmap_and_decref_bulk_desc(void *data)
314 struct ptlrpc_bulk_desc *desc = data;
315 struct list_head *tmp;
318 /* This feels wrong to me. */
319 list_for_each(tmp, &desc->bd_page_list) {
320 struct ptlrpc_bulk_page *bulk;
321 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
323 kunmap(bulk->bp_page);
326 ptlrpc_bulk_decref(desc);
330 static void brw_finish(struct ptlrpc_bulk_desc *desc, void *data)
332 struct osc_brw_cb_data *cb_data = data;
336 if (desc->bd_flags & PTL_RPC_FL_TIMEOUT) {
337 err = (desc->bd_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
341 if (cb_data->callback)
342 cb_data->callback(cb_data->cb_data, err, CB_PHASE_FINISH);
344 if (cb_data->obd_data)
345 OBD_FREE(cb_data->obd_data, cb_data->obd_size);
346 OBD_FREE(cb_data, sizeof(*cb_data));
348 /* We can't kunmap the desc from interrupt context, so we do it from
349 * the bottom half above. */
350 prepare_work(&desc->bd_queue, unmap_and_decref_bulk_desc, desc);
351 schedule_work(&desc->bd_queue);
356 static int osc_brw_read(struct lustre_handle *conn, struct lov_stripe_md *lsm,
357 obd_count page_count, struct brw_page *pga,
358 brw_callback_t callback, struct io_cb_data *data)
360 struct ptlrpc_connection *connection =
361 client_conn2cli(conn)->cl_import.imp_connection;
362 struct ptlrpc_request *request = NULL;
363 struct ptlrpc_bulk_desc *desc = NULL;
364 struct ost_body *body;
365 struct osc_brw_cb_data *cb_data = NULL;
366 int rc, size[3] = {sizeof(*body)};
367 void *iooptr, *nioptr;
372 size[1] = sizeof(struct obd_ioobj);
373 size[2] = page_count * sizeof(struct niobuf_remote);
375 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_READ, 3, size,
380 body = lustre_msg_buf(request->rq_reqmsg, 0);
382 desc = ptlrpc_prep_bulk(connection);
384 GOTO(out_req, rc = -ENOMEM);
385 desc->bd_portal = OST_BULK_PORTAL;
386 desc->bd_cb = brw_finish;
387 OBD_ALLOC(cb_data, sizeof(*cb_data));
389 GOTO(out_desc, rc = -ENOMEM);
391 cb_data->callback = callback;
392 cb_data->cb_data = data;
394 desc->bd_cb_data = cb_data;
396 iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
397 nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
398 ost_pack_ioo(&iooptr, lsm, page_count);
399 /* end almost identical to brw_write case */
401 spin_lock(&connection->c_lock);
402 xid = ++connection->c_xid_out; /* single xid for all pages */
403 spin_unlock(&connection->c_lock);
405 for (mapped = 0; mapped < page_count; mapped++) {
406 struct ptlrpc_bulk_page *bulk = ptlrpc_prep_bulk_page(desc);
408 GOTO(out_unmap, rc = -ENOMEM);
410 bulk->bp_xid = xid; /* single xid for all pages */
412 bulk->bp_buf = kmap(pga[mapped].pg);
413 bulk->bp_page = pga[mapped].pg;
414 bulk->bp_buflen = PAGE_SIZE;
415 ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
416 pga[mapped].flag, bulk->bp_xid);
420 * Register the bulk first, because the reply could arrive out of order,
421 * and we want to be ready for the bulk data.
423 * The reference is released when brw_finish is complete.
425 * On error, we never do the brw_finish, so we handle all decrefs.
427 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_READ_BULK)) {
428 CERROR("obd_fail_loc=%x, skipping register_bulk\n",
429 OBD_FAIL_OSC_BRW_READ_BULK);
431 rc = ptlrpc_register_bulk(desc);
436 request->rq_replen = lustre_msg_size(1, size);
437 rc = ptlrpc_queue_wait(request);
438 rc = ptlrpc_check_status(request, rc);
441 * XXX: If there is an error during the processing of the callback,
442 * such as a timeout in a sleep that it performs, brw_finish
443 * will never get called, and we'll leak the desc, fail to kunmap
444 * things, cats will live with dogs. One solution would be to
445 * export brw_finish as osc_brw_finish, so that the timeout case
446 * and its kin could call it for proper cleanup. An alternative
447 * would be for an error return from the callback to cause us to
448 * clean up, but that doesn't help the truly async cases (like
449 * LOV), which will immediately return from their PHASE_START
450 * callback, before any such cleanup-requiring error condition can
456 /* Callbacks cause asynchronous handling. */
457 rc = callback(data, 0, CB_PHASE_START);
460 ptlrpc_req_finished(request);
463 /* Clean up on error. */
466 kunmap(pga[mapped].pg);
467 OBD_FREE(cb_data, sizeof(*cb_data));
469 ptlrpc_bulk_decref(desc);
473 static int osc_brw_write(struct lustre_handle *conn, struct lov_stripe_md *md,
474 obd_count page_count, struct brw_page *pga,
475 brw_callback_t callback, struct io_cb_data *data)
477 struct ptlrpc_connection *connection =
478 client_conn2cli(conn)->cl_import.imp_connection;
479 struct ptlrpc_request *request = NULL;
480 struct ptlrpc_bulk_desc *desc = NULL;
481 struct ost_body *body;
482 struct niobuf_local *local = NULL;
483 struct niobuf_remote *remote;
484 struct osc_brw_cb_data *cb_data = NULL;
485 int rc, j, size[3] = {sizeof(*body)};
486 void *iooptr, *nioptr;
490 size[1] = sizeof(struct obd_ioobj);
491 size[2] = page_count * sizeof(*remote);
493 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_WRITE, 3, size,
498 body = lustre_msg_buf(request->rq_reqmsg, 0);
500 desc = ptlrpc_prep_bulk(connection);
502 GOTO(out_req, rc = -ENOMEM);
503 desc->bd_portal = OSC_BULK_PORTAL;
504 desc->bd_cb = brw_finish;
505 OBD_ALLOC(cb_data, sizeof(*cb_data));
507 GOTO(out_desc, rc = -ENOMEM);
509 cb_data->callback = callback;
510 cb_data->cb_data = data;
512 desc->bd_cb_data = cb_data;
514 iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
515 nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
516 ost_pack_ioo(&iooptr, md, page_count);
517 /* end almost identical to brw_read case */
519 OBD_ALLOC(local, page_count * sizeof(*local));
521 GOTO(out_cb, rc = -ENOMEM);
523 cb_data->obd_data = local;
524 cb_data->obd_size = page_count * sizeof(*local);
526 for (mapped = 0; mapped < page_count; mapped++) {
527 local[mapped].addr = kmap(pga[mapped].pg);
529 CDEBUG(D_INFO, "kmap(pg) = %p ; pg->flags = %lx ; pg->count = "
530 "%d ; page %d of %d\n",
531 local[mapped].addr, pga[mapped].pg->flags,
532 page_count(pga[mapped].pg),
533 mapped, page_count - 1);
535 local[mapped].offset = pga[mapped].off;
536 local[mapped].len = pga[mapped].count;
537 ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
538 pga[mapped].flag, 0);
541 size[1] = page_count * sizeof(*remote);
542 request->rq_replen = lustre_msg_size(2, size);
543 rc = ptlrpc_queue_wait(request);
544 rc = ptlrpc_check_status(request, rc);
548 nioptr = lustre_msg_buf(request->rq_repmsg, 1);
550 GOTO(out_unmap, rc = -EINVAL);
552 if (request->rq_repmsg->buflens[1] != size[1]) {
553 CERROR("buffer length wrong (%d vs. %d)\n",
554 request->rq_repmsg->buflens[1], size[1]);
555 GOTO(out_unmap, rc = -EINVAL);
558 for (j = 0; j < page_count; j++) {
559 struct ptlrpc_bulk_page *bulk;
561 ost_unpack_niobuf(&nioptr, &remote);
563 bulk = ptlrpc_prep_bulk_page(desc);
565 GOTO(out_unmap, rc = -ENOMEM);
567 bulk->bp_buf = (void *)(unsigned long)local[j].addr;
568 bulk->bp_buflen = local[j].len;
569 bulk->bp_xid = remote->xid;
570 bulk->bp_page = pga[j].pg;
573 if (desc->bd_page_count != page_count)
576 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_WRITE_BULK))
577 GOTO(out_unmap, rc = 0);
579 /* Our reference is released when brw_finish is complete. */
580 rc = ptlrpc_send_bulk(desc);
582 /* XXX: Mike, same question as in osc_brw_read. */
586 /* Callbacks cause asynchronous handling. */
587 rc = callback(data, 0, CB_PHASE_START);
590 ptlrpc_req_finished(request);
593 /* Clean up on error. */
596 kunmap(pga[mapped].pg);
598 OBD_FREE(local, page_count * sizeof(*local));
600 OBD_FREE(cb_data, sizeof(*cb_data));
602 ptlrpc_bulk_decref(desc);
606 static int osc_brw(int cmd, struct lustre_handle *conn,
607 struct lov_stripe_md *md, obd_count page_count,
608 struct brw_page *pga, brw_callback_t callback,
609 struct io_cb_data *data)
611 if (cmd & OBD_BRW_WRITE)
612 return osc_brw_write(conn, md, page_count, pga, callback, data);
614 return osc_brw_read(conn, md, page_count, pga, callback, data);
617 static int osc_enqueue(struct lustre_handle *connh, struct lov_stripe_md *lsm,
618 struct lustre_handle *parent_lock,
619 __u32 type, void *extentp, int extent_len, __u32 mode,
620 int *flags, void *callback, void *data, int datalen,
621 struct lustre_handle *lockh)
623 __u64 res_id[RES_NAME_SIZE] = { lsm->lsm_object_id };
624 struct obd_device *obddev = class_conn2obd(connh);
625 struct ldlm_extent *extent = extentp;
629 /* Filesystem locks are given a bit of special treatment: if
630 * this is not a file size lock (which has end == -1), we
631 * fixup the lock to start and end on page boundaries. */
632 if (extent->end != OBD_OBJECT_EOF) {
633 extent->start &= PAGE_MASK;
634 extent->end = (extent->end + PAGE_SIZE - 1) & PAGE_MASK;
637 /* Next, search for already existing extent locks that will cover us */
638 rc = ldlm_lock_match(obddev->obd_namespace, res_id, type, extent,
639 sizeof(extent), mode, lockh);
641 RETURN(0); /* We already have a lock, and it's referenced */
643 /* If we're trying to read, we also search for an existing PW lock. The
644 * VFS and page cache already protect us locally, so lots of readers/
645 * writers can share a single PW lock.
647 * There are problems with conversion deadlocks, so instead of
648 * converting a read lock to a write lock, we'll just enqueue a new
651 * At some point we should cancel the read lock instead of making them
652 * send us a blocking callback, but there are problems with canceling
653 * locks out from other users right now, too. */
655 if (mode == LCK_PR) {
656 rc = ldlm_lock_match(obddev->obd_namespace, res_id, type,
657 extent, sizeof(extent), LCK_PW, lockh);
659 /* FIXME: This is not incredibly elegant, but it might
660 * be more elegant than adding another parameter to
661 * lock_match. I want a second opinion. */
662 ldlm_lock_addref(lockh, LCK_PR);
663 ldlm_lock_decref(lockh, LCK_PW);
669 rc = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, parent_lock,
670 res_id, type, extent, sizeof(extent), mode, flags,
671 ldlm_completion_ast, callback, data, datalen,
676 static int osc_cancel(struct lustre_handle *oconn, struct lov_stripe_md *md,
677 __u32 mode, struct lustre_handle *lockh)
681 ldlm_lock_decref(lockh, mode);
686 static int osc_cancel_unused(struct lustre_handle *connh,
687 struct lov_stripe_md *lsm, int local)
689 struct obd_device *obddev = class_conn2obd(connh);
690 __u64 res_id[RES_NAME_SIZE] = { lsm->lsm_object_id };
692 return ldlm_cli_cancel_unused(obddev->obd_namespace, res_id, local);
695 static int osc_statfs(struct lustre_handle *conn, struct obd_statfs *osfs)
697 struct ptlrpc_request *request;
698 int rc, size = sizeof(*osfs);
701 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_STATFS, 0, NULL,
706 request->rq_replen = lustre_msg_size(1, &size);
708 rc = ptlrpc_queue_wait(request);
709 rc = ptlrpc_check_status(request, rc);
711 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
715 obd_statfs_unpack(osfs, lustre_msg_buf(request->rq_repmsg, 0));
719 ptlrpc_req_finished(request);
723 static int osc_iocontrol(long cmd, struct lustre_handle *conn, int len,
724 void *karg, void *uarg)
726 struct obd_device *obddev = class_conn2obd(conn);
727 struct obd_ioctl_data *data = karg;
731 if (_IOC_TYPE(cmd) != IOC_LDLM_TYPE ||
732 _IOC_NR(cmd) < IOC_LDLM_MIN_NR || _IOC_NR(cmd) > IOC_LDLM_MAX_NR) {
733 CDEBUG(D_IOCTL, "invalid ioctl (type %ld, nr %ld, size %ld)\n",
734 _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
739 case IOC_LDLM_TEST: {
740 err = ldlm_test(obddev, conn);
741 CERROR("-- done err %d\n", err);
744 case IOC_LDLM_REGRESS_START: {
745 unsigned int numthreads = 1;
746 unsigned int numheld = 10;
747 unsigned int numres = 10;
748 unsigned int numext = 10;
751 if (data->ioc_inllen1) {
752 parse = data->ioc_inlbuf1;
753 if (*parse != '\0') {
754 while(isspace(*parse)) parse++;
755 numthreads = simple_strtoul(parse, &parse, 0);
756 while(isspace(*parse)) parse++;
758 if (*parse != '\0') {
759 while(isspace(*parse)) parse++;
760 numheld = simple_strtoul(parse, &parse, 0);
761 while(isspace(*parse)) parse++;
763 if (*parse != '\0') {
764 while(isspace(*parse)) parse++;
765 numres = simple_strtoul(parse, &parse, 0);
766 while(isspace(*parse)) parse++;
768 if (*parse != '\0') {
769 while(isspace(*parse)) parse++;
770 numext = simple_strtoul(parse, &parse, 0);
771 while(isspace(*parse)) parse++;
775 err = ldlm_regression_start(obddev, conn, numthreads,
776 numheld, numres, numext);
778 CERROR("-- done err %d\n", err);
781 case IOC_LDLM_REGRESS_STOP: {
782 err = ldlm_regression_stop();
783 CERROR("-- done err %d\n", err);
787 GOTO(out, err = -EINVAL);
793 struct obd_ops osc_obd_ops = {
794 o_setup: client_obd_setup,
795 o_cleanup: client_obd_cleanup,
796 o_statfs: osc_statfs,
797 o_create: osc_create,
798 o_destroy: osc_destroy,
799 o_getattr: osc_getattr,
800 o_setattr: osc_setattr,
803 o_connect: client_obd_connect,
804 o_disconnect: client_obd_disconnect,
807 o_enqueue: osc_enqueue,
808 o_cancel: osc_cancel,
809 o_cancel_unused: osc_cancel_unused,
810 o_iocontrol: osc_iocontrol
813 static int __init osc_init(void)
815 return class_register_type(&osc_obd_ops, LUSTRE_OSC_NAME);
818 static void __exit osc_exit(void)
820 class_unregister_type(LUSTRE_OSC_NAME);
823 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
824 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC) v1.0");
825 MODULE_LICENSE("GPL");
827 module_init(osc_init);
828 module_exit(osc_exit);