1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
6 * This code is issued under the GNU General Public License.
7 * See the file COPYING in this distribution
9 * Author Peter Braam <braam@clusterfs.com>
11 * This server is single threaded at present (but can easily be multi
12 * threaded). For testing and management it is treated as an
13 * obd_device, although it does not export a full OBD method table
14 * (the requests are coming in over the wire, so object target
15 * modules do not have a full method table.)
20 #define DEBUG_SUBSYSTEM S_OSC
22 #include <linux/module.h>
24 #include <linux/highmem.h>
25 #include <linux/lustre_dlm.h>
26 #include <linux/lustre_mds.h> /* for mds_objid */
27 #include <linux/obd_ost.h>
28 #include <linux/obd_lov.h>
29 #include <linux/ctype.h>
30 #include <linux/init.h>
31 #include <linux/lustre_ha.h>
32 #include <linux/obd_support.h> /* for OBD_FAIL_CHECK */
33 #include <linux/lustre_lite.h> /* for ll_i2info */
35 static int osc_getattr(struct lustre_handle *conn, struct obdo *oa,
36 struct lov_stripe_md *md)
38 struct ptlrpc_request *request;
39 struct ost_body *body;
40 int rc, size = sizeof(*body);
43 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_GETATTR, 1,
48 body = lustre_msg_buf(request->rq_reqmsg, 0);
49 #warning FIXME: pack only valid fields instead of memcpy, endianness
50 memcpy(&body->oa, oa, sizeof(*oa));
52 request->rq_replen = lustre_msg_size(1, &size);
54 rc = ptlrpc_queue_wait(request);
55 rc = ptlrpc_check_status(request, rc);
57 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
61 body = lustre_msg_buf(request->rq_repmsg, 0);
62 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
64 memcpy(oa, &body->oa, sizeof(*oa));
68 ptlrpc_free_req(request);
72 static int osc_open(struct lustre_handle *conn, struct obdo *oa,
73 struct lov_stripe_md *md)
75 struct ptlrpc_request *request;
76 struct ost_body *body;
77 int rc, size = sizeof(*body);
80 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_OPEN, 1, &size,
85 body = lustre_msg_buf(request->rq_reqmsg, 0);
86 #warning FIXME: pack only valid fields instead of memcpy, endianness
87 memcpy(&body->oa, oa, sizeof(*oa));
89 request->rq_replen = lustre_msg_size(1, &size);
91 rc = ptlrpc_queue_wait(request);
92 rc = ptlrpc_check_status(request, rc);
96 body = lustre_msg_buf(request->rq_repmsg, 0);
97 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
99 memcpy(oa, &body->oa, sizeof(*oa));
103 ptlrpc_free_req(request);
107 static int osc_close(struct lustre_handle *conn, struct obdo *oa,
108 struct lov_stripe_md *md)
110 struct ptlrpc_request *request;
111 struct ost_body *body;
112 int rc, size = sizeof(*body);
115 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CLOSE, 1, &size,
120 body = lustre_msg_buf(request->rq_reqmsg, 0);
121 #warning FIXME: pack only valid fields instead of memcpy, endianness
122 memcpy(&body->oa, oa, sizeof(*oa));
124 request->rq_replen = lustre_msg_size(1, &size);
126 rc = ptlrpc_queue_wait(request);
127 rc = ptlrpc_check_status(request, rc);
131 body = lustre_msg_buf(request->rq_repmsg, 0);
132 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
134 memcpy(oa, &body->oa, sizeof(*oa));
138 ptlrpc_free_req(request);
142 static int osc_setattr(struct lustre_handle *conn, struct obdo *oa,
143 struct lov_stripe_md *md)
145 struct ptlrpc_request *request;
146 struct ost_body *body;
147 int rc, size = sizeof(*body);
150 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_SETATTR, 1,
155 body = lustre_msg_buf(request->rq_reqmsg, 0);
156 memcpy(&body->oa, oa, sizeof(*oa));
158 request->rq_replen = lustre_msg_size(1, &size);
160 rc = ptlrpc_queue_wait(request);
161 rc = ptlrpc_check_status(request, rc);
163 ptlrpc_free_req(request);
167 static int osc_create(struct lustre_handle *conn, struct obdo *oa,
168 struct lov_stripe_md **ea)
170 struct ptlrpc_request *request;
171 struct ost_body *body;
172 int rc, size = sizeof(*body);
185 // XXX check oa->o_valid & OBD_MD_FLEASIZE first...
186 OBD_ALLOC(*ea, oa->o_easize);
189 (*ea)->lsm_mds_easize = oa->o_easize;
192 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_CREATE, 1, &size,
197 body = lustre_msg_buf(request->rq_reqmsg, 0);
198 memcpy(&body->oa, oa, sizeof(*oa));
200 request->rq_replen = lustre_msg_size(1, &size);
202 rc = ptlrpc_queue_wait(request);
203 rc = ptlrpc_check_status(request, rc);
207 body = lustre_msg_buf(request->rq_repmsg, 0);
208 memcpy(oa, &body->oa, sizeof(*oa));
210 (*ea)->lsm_object_id = oa->o_id;
211 (*ea)->lsm_stripe_count = 0;
214 ptlrpc_free_req(request);
218 static int osc_punch(struct lustre_handle *conn, struct obdo *oa,
219 struct lov_stripe_md *md, obd_size start,
222 struct ptlrpc_request *request;
223 struct ost_body *body;
224 int rc, size = sizeof(*body);
232 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_PUNCH, 1, &size,
237 body = lustre_msg_buf(request->rq_reqmsg, 0);
238 #warning FIXME: pack only valid fields instead of memcpy, endianness, valid
239 memcpy(&body->oa, oa, sizeof(*oa));
241 /* overload the blocks and size fields in the oa with start/end */
242 #warning FIXME: endianness, size=start, blocks=end?
243 body->oa.o_blocks = start;
244 body->oa.o_size = end;
245 body->oa.o_valid |= OBD_MD_FLBLOCKS | OBD_MD_FLSIZE;
247 request->rq_replen = lustre_msg_size(1, &size);
249 rc = ptlrpc_queue_wait(request);
250 rc = ptlrpc_check_status(request, rc);
254 body = lustre_msg_buf(request->rq_repmsg, 0);
255 memcpy(oa, &body->oa, sizeof(*oa));
259 ptlrpc_free_req(request);
263 static int osc_destroy(struct lustre_handle *conn, struct obdo *oa,
264 struct lov_stripe_md *ea)
266 struct ptlrpc_request *request;
267 struct ost_body *body;
268 int rc, size = sizeof(*body);
275 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_DESTROY, 1,
280 body = lustre_msg_buf(request->rq_reqmsg, 0);
281 #warning FIXME: pack only valid fields instead of memcpy, endianness
282 memcpy(&body->oa, oa, sizeof(*oa));
284 request->rq_replen = lustre_msg_size(1, &size);
286 rc = ptlrpc_queue_wait(request);
287 rc = ptlrpc_check_status(request, rc);
291 body = lustre_msg_buf(request->rq_repmsg, 0);
292 memcpy(oa, &body->oa, sizeof(*oa));
296 ptlrpc_free_req(request);
300 struct osc_brw_cb_data {
301 brw_callback_t callback;
307 /* Our bulk-unmapping bottom half. */
308 static void unmap_and_decref_bulk_desc(void *data)
310 struct ptlrpc_bulk_desc *desc = data;
311 struct list_head *tmp;
314 /* This feels wrong to me. */
315 list_for_each(tmp, &desc->bd_page_list) {
316 struct ptlrpc_bulk_page *bulk;
317 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
319 kunmap(bulk->bp_page);
322 ptlrpc_bulk_decref(desc);
326 static void brw_finish(struct ptlrpc_bulk_desc *desc, void *data)
328 struct osc_brw_cb_data *cb_data = data;
332 if (desc->bd_flags & PTL_RPC_FL_TIMEOUT) {
333 err = (desc->bd_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
337 if (cb_data->callback)
338 cb_data->callback(cb_data->cb_data, err, CB_PHASE_FINISH);
340 if (cb_data->obd_data)
341 OBD_FREE(cb_data->obd_data, cb_data->obd_size);
342 OBD_FREE(cb_data, sizeof(*cb_data));
344 /* We can't kunmap the desc from interrupt context, so we do it from
345 * the bottom half above. */
346 INIT_TQUEUE(&desc->bd_queue, 0, 0);
347 PREPARE_TQUEUE(&desc->bd_queue, unmap_and_decref_bulk_desc, desc);
348 schedule_task(&desc->bd_queue);
353 static int osc_brw_read(struct lustre_handle *conn, struct lov_stripe_md *md,
354 obd_count page_count, struct brw_page *pga,
355 brw_callback_t callback, struct io_cb_data *data)
357 struct ptlrpc_connection *connection =
358 client_conn2cli(conn)->cl_import.imp_connection;
359 struct ptlrpc_request *request = NULL;
360 struct ptlrpc_bulk_desc *desc = NULL;
361 struct ost_body *body;
362 struct osc_brw_cb_data *cb_data = NULL;
363 int rc, size[3] = {sizeof(*body)};
364 void *iooptr, *nioptr;
369 size[1] = sizeof(struct obd_ioobj);
370 size[2] = page_count * sizeof(struct niobuf_remote);
372 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_READ, 3, size,
377 body = lustre_msg_buf(request->rq_reqmsg, 0);
379 desc = ptlrpc_prep_bulk(connection);
381 GOTO(out_req, rc = -ENOMEM);
382 desc->bd_portal = OST_BULK_PORTAL;
383 desc->bd_cb = brw_finish;
384 OBD_ALLOC(cb_data, sizeof(*cb_data));
386 GOTO(out_desc, rc = -ENOMEM);
388 cb_data->callback = callback;
389 cb_data->cb_data = data;
391 desc->bd_cb_data = cb_data;
393 iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
394 nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
395 ost_pack_ioo(&iooptr, md, page_count);
396 /* end almost identical to brw_write case */
398 spin_lock(&connection->c_lock);
399 xid = ++connection->c_xid_out; /* single xid for all pages */
400 spin_unlock(&connection->c_lock);
402 for (mapped = 0; mapped < page_count; mapped++) {
403 struct ptlrpc_bulk_page *bulk = ptlrpc_prep_bulk_page(desc);
405 GOTO(out_unmap, rc = -ENOMEM);
407 bulk->bp_xid = xid; /* single xid for all pages */
409 bulk->bp_buf = kmap(pga[mapped].pg);
410 bulk->bp_page = pga[mapped].pg;
411 bulk->bp_buflen = PAGE_SIZE;
412 ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
413 pga[mapped].flag, bulk->bp_xid);
417 * Register the bulk first, because the reply could arrive out of order,
418 * and we want to be ready for the bulk data.
420 * The reference is released when brw_finish is complete.
422 * On error, we never do the brw_finish, so we handle all decrefs.
424 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_READ_BULK)) {
425 CERROR("obd_fail_loc=%x, skipping register_bulk\n",
426 OBD_FAIL_OSC_BRW_READ_BULK);
428 rc = ptlrpc_register_bulk(desc);
433 request->rq_replen = lustre_msg_size(1, size);
434 rc = ptlrpc_queue_wait(request);
435 rc = ptlrpc_check_status(request, rc);
438 * XXX: If there is an error during the processing of the callback,
439 * such as a timeout in a sleep that it performs, brw_finish
440 * will never get called, and we'll leak the desc, fail to kunmap
441 * things, cats will live with dogs. One solution would be to
442 * export brw_finish as osc_brw_finish, so that the timeout case
443 * and its kin could call it for proper cleanup. An alternative
444 * would be for an error return from the callback to cause us to
445 * clean up, but that doesn't help the truly async cases (like
446 * LOV), which will immediately return from their PHASE_START
447 * callback, before any such cleanup-requiring error condition can
453 /* Callbacks cause asynchronous handling. */
454 rc = callback(data, 0, CB_PHASE_START);
457 ptlrpc_req_finished(request);
460 /* Clean up on error. */
463 kunmap(pga[mapped].pg);
464 OBD_FREE(cb_data, sizeof(*cb_data));
466 ptlrpc_bulk_decref(desc);
470 static int osc_brw_write(struct lustre_handle *conn, struct lov_stripe_md *md,
471 obd_count page_count, struct brw_page *pga,
472 brw_callback_t callback, struct io_cb_data *data)
474 struct ptlrpc_connection *connection =
475 client_conn2cli(conn)->cl_import.imp_connection;
476 struct ptlrpc_request *request = NULL;
477 struct ptlrpc_bulk_desc *desc = NULL;
478 struct ost_body *body;
479 struct niobuf_local *local = NULL;
480 struct niobuf_remote *remote;
481 struct osc_brw_cb_data *cb_data = NULL;
482 int rc, j, size[3] = {sizeof(*body)};
483 void *iooptr, *nioptr;
487 size[1] = sizeof(struct obd_ioobj);
488 size[2] = page_count * sizeof(*remote);
490 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_WRITE, 3, size,
495 body = lustre_msg_buf(request->rq_reqmsg, 0);
497 desc = ptlrpc_prep_bulk(connection);
499 GOTO(out_req, rc = -ENOMEM);
500 desc->bd_portal = OSC_BULK_PORTAL;
501 desc->bd_cb = brw_finish;
502 OBD_ALLOC(cb_data, sizeof(*cb_data));
504 GOTO(out_desc, rc = -ENOMEM);
506 cb_data->callback = callback;
507 cb_data->cb_data = data;
509 desc->bd_cb_data = cb_data;
511 iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
512 nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
513 ost_pack_ioo(&iooptr, md, page_count);
514 /* end almost identical to brw_read case */
516 OBD_ALLOC(local, page_count * sizeof(*local));
518 GOTO(out_cb, rc = -ENOMEM);
520 cb_data->obd_data = local;
521 cb_data->obd_size = page_count * sizeof(*local);
523 for (mapped = 0; mapped < page_count; mapped++) {
524 local[mapped].addr = kmap(pga[mapped].pg);
526 CDEBUG(D_INFO, "kmap(pg) = %p ; pg->flags = %lx ; pg->count = "
527 "%d ; page %d of %d\n",
528 local[mapped].addr, pga[mapped].pg->flags,
529 page_count(pga[mapped].pg),
530 mapped, page_count - 1);
532 local[mapped].offset = pga[mapped].off;
533 local[mapped].len = pga[mapped].count;
534 ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
535 pga[mapped].flag, 0);
538 size[1] = page_count * sizeof(*remote);
539 request->rq_replen = lustre_msg_size(2, size);
540 rc = ptlrpc_queue_wait(request);
541 rc = ptlrpc_check_status(request, rc);
545 nioptr = lustre_msg_buf(request->rq_repmsg, 1);
547 GOTO(out_unmap, rc = -EINVAL);
549 if (request->rq_repmsg->buflens[1] != size[1]) {
550 CERROR("buffer length wrong (%d vs. %d)\n",
551 request->rq_repmsg->buflens[1], size[1]);
552 GOTO(out_unmap, rc = -EINVAL);
555 for (j = 0; j < page_count; j++) {
556 struct ptlrpc_bulk_page *bulk;
558 ost_unpack_niobuf(&nioptr, &remote);
560 bulk = ptlrpc_prep_bulk_page(desc);
562 GOTO(out_unmap, rc = -ENOMEM);
564 bulk->bp_buf = (void *)(unsigned long)local[j].addr;
565 bulk->bp_buflen = local[j].len;
566 bulk->bp_xid = remote->xid;
567 bulk->bp_page = pga[j].pg;
570 if (desc->bd_page_count != page_count)
573 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_WRITE_BULK))
574 GOTO(out_unmap, rc = 0);
576 /* Our reference is released when brw_finish is complete. */
577 rc = ptlrpc_send_bulk(desc);
579 /* XXX: Mike, same question as in osc_brw_read. */
583 /* Callbacks cause asynchronous handling. */
584 rc = callback(data, 0, CB_PHASE_START);
587 ptlrpc_req_finished(request);
590 /* Clean up on error. */
593 kunmap(pga[mapped].pg);
595 OBD_FREE(local, page_count * sizeof(*local));
597 OBD_FREE(cb_data, sizeof(*cb_data));
599 ptlrpc_bulk_decref(desc);
603 static int osc_brw(int cmd, struct lustre_handle *conn,
604 struct lov_stripe_md *md, obd_count page_count,
605 struct brw_page *pga, brw_callback_t callback,
606 struct io_cb_data *data)
608 if (cmd & OBD_BRW_WRITE)
609 return osc_brw_write(conn, md, page_count, pga, callback, data);
611 return osc_brw_read(conn, md, page_count, pga, callback, data);
614 static int osc_enqueue(struct lustre_handle *connh, struct lov_stripe_md *lsm,
615 struct lustre_handle *parent_lock,
616 __u32 type, void *extentp, int extent_len, __u32 mode,
617 int *flags, void *callback, void *data, int datalen,
618 struct lustre_handle *lockh)
620 __u64 res_id[RES_NAME_SIZE] = { lsm->lsm_object_id };
621 struct obd_device *obddev = class_conn2obd(connh);
622 struct ldlm_extent *extent = extentp;
626 /* Filesystem locks are given a bit of special treatment: first we
627 * fixup the lock to start and end on page boundaries. */
628 extent->start &= PAGE_MASK;
629 extent->end = (extent->end + PAGE_SIZE - 1) & PAGE_MASK;
631 /* Next, search for already existing extent locks that will cover us */
632 rc = ldlm_lock_match(obddev->obd_namespace, res_id, type, extent,
633 sizeof(extent), mode, lockh);
635 RETURN(0); /* We already have a lock, and it's referenced */
637 /* If we're trying to read, we also search for an existing PW lock. The
638 * VFS and page cache already protect us locally, so lots of readers/
639 * writers can share a single PW lock.
641 * There are problems with conversion deadlocks, so instead of
642 * converting a read lock to a write lock, we'll just enqueue a new
645 * At some point we should cancel the read lock instead of making them
646 * send us a blocking callback, but there are problems with canceling
647 * locks out from other users right now, too. */
649 if (mode == LCK_PR) {
650 rc = ldlm_lock_match(obddev->obd_namespace, res_id, type,
651 extent, sizeof(extent), LCK_PW, lockh);
653 /* FIXME: This is not incredibly elegant, but it might
654 * be more elegant than adding another parameter to
655 * lock_match. I want a second opinion. */
656 ldlm_lock_addref(lockh, LCK_PR);
657 ldlm_lock_decref(lockh, LCK_PW);
663 rc = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, parent_lock,
664 res_id, type, extent, sizeof(extent), mode, flags,
665 ldlm_completion_ast, callback, data, datalen,
670 static int osc_cancel(struct lustre_handle *oconn, struct lov_stripe_md *md,
671 __u32 mode, struct lustre_handle *lockh)
675 ldlm_lock_decref(lockh, mode);
680 static int osc_cancel_unused(struct lustre_handle *connh,
681 struct lov_stripe_md *lsm, int local)
683 struct obd_device *obddev = class_conn2obd(connh);
684 __u64 res_id[RES_NAME_SIZE] = { lsm->lsm_object_id };
686 return ldlm_cli_cancel_unused(obddev->obd_namespace, res_id, local);
689 static int osc_statfs(struct lustre_handle *conn, struct obd_statfs *osfs)
691 struct ptlrpc_request *request;
692 int rc, size = sizeof(*osfs);
695 request = ptlrpc_prep_req(class_conn2cliimp(conn), OST_STATFS, 0, NULL,
700 request->rq_replen = lustre_msg_size(1, &size);
702 rc = ptlrpc_queue_wait(request);
703 rc = ptlrpc_check_status(request, rc);
705 CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
709 obd_statfs_unpack(osfs, lustre_msg_buf(request->rq_repmsg, 0));
713 ptlrpc_free_req(request);
717 static int osc_iocontrol(long cmd, struct lustre_handle *conn, int len,
718 void *karg, void *uarg)
720 struct obd_device *obddev = class_conn2obd(conn);
721 struct obd_ioctl_data *data = karg;
725 if (_IOC_TYPE(cmd) != IOC_LDLM_TYPE ||
726 _IOC_NR(cmd) < IOC_LDLM_MIN_NR || _IOC_NR(cmd) > IOC_LDLM_MAX_NR) {
727 CDEBUG(D_IOCTL, "invalid ioctl (type %ld, nr %ld, size %ld)\n",
728 _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
733 case IOC_LDLM_TEST: {
734 err = ldlm_test(obddev, conn);
735 CERROR("-- done err %d\n", err);
738 case IOC_LDLM_REGRESS_START: {
739 unsigned int numthreads = 1;
740 unsigned int numheld = 10;
741 unsigned int numres = 10;
742 unsigned int numext = 10;
745 if (data->ioc_inllen1) {
746 parse = data->ioc_inlbuf1;
747 if (*parse != '\0') {
748 while(isspace(*parse)) parse++;
749 numthreads = simple_strtoul(parse, &parse, 0);
750 while(isspace(*parse)) parse++;
752 if (*parse != '\0') {
753 while(isspace(*parse)) parse++;
754 numheld = simple_strtoul(parse, &parse, 0);
755 while(isspace(*parse)) parse++;
757 if (*parse != '\0') {
758 while(isspace(*parse)) parse++;
759 numres = simple_strtoul(parse, &parse, 0);
760 while(isspace(*parse)) parse++;
762 if (*parse != '\0') {
763 while(isspace(*parse)) parse++;
764 numext = simple_strtoul(parse, &parse, 0);
765 while(isspace(*parse)) parse++;
769 err = ldlm_regression_start(obddev, conn, numthreads,
770 numheld, numres, numext);
772 CERROR("-- done err %d\n", err);
775 case IOC_LDLM_REGRESS_STOP: {
776 err = ldlm_regression_stop();
777 CERROR("-- done err %d\n", err);
781 GOTO(out, err = -EINVAL);
787 struct obd_ops osc_obd_ops = {
788 o_setup: client_obd_setup,
789 o_cleanup: client_obd_cleanup,
790 o_statfs: osc_statfs,
791 o_create: osc_create,
792 o_destroy: osc_destroy,
793 o_getattr: osc_getattr,
794 o_setattr: osc_setattr,
797 o_connect: client_obd_connect,
798 o_disconnect: client_obd_disconnect,
801 o_enqueue: osc_enqueue,
802 o_cancel: osc_cancel,
803 o_cancel_unused: osc_cancel_unused,
804 o_iocontrol: osc_iocontrol
807 static int __init osc_init(void)
809 return class_register_type(&osc_obd_ops, LUSTRE_OSC_NAME);
812 static void __exit osc_exit(void)
814 class_unregister_type(LUSTRE_OSC_NAME);
817 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
818 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC) v1.0");
819 MODULE_LICENSE("GPL");
821 module_init(osc_init);
822 module_exit(osc_exit);