Whamcloud - gitweb
b95cc76c04d518f6850de6c616a3b7bf18cd5fdf
[fs/lustre-release.git] / libcfs / libcfs / darwin / darwin-prim.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/darwin/darwin-prim.c
37  *
38  * Darwin porting library
39  * Make things easy to port
40  *
41  * Author: Phil Schwan <phil@clusterfs.com>
42  */
43
44 #define DEBUG_SUBSYSTEM S_LNET
45
46 #include <mach/mach_types.h>
47 #include <string.h>
48 #include <sys/file.h>
49 #include <sys/conf.h>
50 #include <sys/uio.h>
51 #include <sys/filedesc.h>
52 #include <sys/namei.h>
53 #include <miscfs/devfs/devfs.h>
54 #include <kern/thread.h>
55
56 #include <libcfs/libcfs.h>
57
58 /*
59  * cfs pseudo device, actually pseudo char device in darwin
60  */
61 #define KLNET_MAJOR  -1
62
63 kern_return_t  cfs_psdev_register(cfs_psdev_t *dev) {
64         dev->index = cdevsw_add(KLNET_MAJOR, dev->devsw);
65         if (dev->index < 0) {
66                 printf("libcfs_init: failed to allocate a major number!\n");
67                 return KERN_FAILURE;
68         }
69         dev->handle = devfs_make_node(makedev (dev->index, 0),
70                                       DEVFS_CHAR, UID_ROOT,
71                                       GID_WHEEL, 0666, (char *)dev->name, 0);
72         return KERN_SUCCESS;
73 }
74
75 kern_return_t  cfs_psdev_deregister(cfs_psdev_t *dev) {
76         devfs_remove(dev->handle);
77         cdevsw_remove(dev->index, dev->devsw);
78         return KERN_SUCCESS;
79 }
80
81 /*
82  * KPortal symbol register / unregister support
83  */
84 struct rw_semaphore             cfs_symbol_lock;
85 struct list_head                cfs_symbol_list;
86
87 void *
88 cfs_symbol_get(const char *name)
89 {
90         struct list_head    *walker;
91         struct cfs_symbol   *sym = NULL;
92
93         down_read(&cfs_symbol_lock);
94         list_for_each(walker, &cfs_symbol_list) {
95                 sym = list_entry (walker, struct cfs_symbol, sym_list);
96                 if (!strcmp(sym->name, name)) {
97                         sym->ref ++;
98                         break;
99                 }
100         }
101         up_read(&cfs_symbol_lock);
102         if (sym != NULL)
103                 return sym->value;
104         return NULL;
105 }
106
107 kern_return_t
108 cfs_symbol_put(const char *name)
109 {
110         struct list_head    *walker;
111         struct cfs_symbol   *sym = NULL;
112
113         down_read(&cfs_symbol_lock);
114         list_for_each(walker, &cfs_symbol_list) {
115                 sym = list_entry (walker, struct cfs_symbol, sym_list);
116                 if (!strcmp(sym->name, name)) {
117                         sym->ref --;
118                         LASSERT(sym->ref >= 0);
119                         break;
120                 }
121         }
122         up_read(&cfs_symbol_lock);
123         LASSERT(sym != NULL);
124
125         return 0;
126 }
127
128 kern_return_t
129 cfs_symbol_register(const char *name, const void *value)
130 {
131         struct list_head    *walker;
132         struct cfs_symbol   *sym = NULL;
133         struct cfs_symbol   *new = NULL;
134
135         MALLOC(new, struct cfs_symbol *, sizeof(struct cfs_symbol), M_TEMP, M_WAITOK|M_ZERO);
136         strncpy(new->name, name, CFS_SYMBOL_LEN);
137         new->value = (void *)value;
138         new->ref = 0;
139         CFS_INIT_LIST_HEAD(&new->sym_list);
140
141         down_write(&cfs_symbol_lock);
142         list_for_each(walker, &cfs_symbol_list) {
143                 sym = list_entry (walker, struct cfs_symbol, sym_list);
144                 if (!strcmp(sym->name, name)) {
145                         up_write(&cfs_symbol_lock);
146                         FREE(new, M_TEMP);
147                         return KERN_NAME_EXISTS;
148                 }
149
150         }
151         list_add_tail(&new->sym_list, &cfs_symbol_list);
152         up_write(&cfs_symbol_lock);
153
154         return KERN_SUCCESS;
155 }
156
157 kern_return_t
158 cfs_symbol_unregister(const char *name)
159 {
160         struct list_head    *walker;
161         struct list_head    *nxt;
162         struct cfs_symbol   *sym = NULL;
163
164         down_write(&cfs_symbol_lock);
165         list_for_each_safe(walker, nxt, &cfs_symbol_list) {
166                 sym = list_entry (walker, struct cfs_symbol, sym_list);
167                 if (!strcmp(sym->name, name)) {
168                         LASSERT(sym->ref == 0);
169                         list_del (&sym->sym_list);
170                         FREE(sym, M_TEMP);
171                         break;
172                 }
173         }
174         up_write(&cfs_symbol_lock);
175
176         return KERN_SUCCESS;
177 }
178
179 void
180 cfs_symbol_init()
181 {
182         CFS_INIT_LIST_HEAD(&cfs_symbol_list);
183         init_rwsem(&cfs_symbol_lock);
184 }
185
186 void
187 cfs_symbol_fini()
188 {
189         struct list_head    *walker;
190         struct cfs_symbol   *sym = NULL;
191
192         down_write(&cfs_symbol_lock);
193         list_for_each(walker, &cfs_symbol_list) {
194                 sym = list_entry (walker, struct cfs_symbol, sym_list);
195                 LASSERT(sym->ref == 0);
196                 list_del (&sym->sym_list);
197                 FREE(sym, M_TEMP);
198         }
199         up_write(&cfs_symbol_lock);
200
201         fini_rwsem(&cfs_symbol_lock);
202         return;
203 }
204
205 struct kernel_thread_arg
206 {
207         spinlock_t      lock;
208         atomic_t        inuse;
209         cfs_thread_t    func;
210         void            *arg;
211 };
212
213 struct kernel_thread_arg cfs_thread_arg;
214
215 #define THREAD_ARG_FREE                 0
216 #define THREAD_ARG_HOLD                 1
217 #define THREAD_ARG_RECV                 2
218
219 #define set_targ_stat(a, v)             atomic_set(&(a)->inuse, v)
220 #define get_targ_stat(a)                atomic_read(&(a)->inuse)
221
222 /*
223  * Hold the thread argument and set the status of thread_status
224  * to THREAD_ARG_HOLD, if the thread argument is held by other
225  * threads (It's THREAD_ARG_HOLD already), current-thread has to wait.
226  */
227 #define thread_arg_hold(pta, _func, _arg)                       \
228         do {                                                    \
229                 spin_lock(&(pta)->lock);                        \
230                 if (get_targ_stat(pta) == THREAD_ARG_FREE) {    \
231                         set_targ_stat((pta), THREAD_ARG_HOLD);  \
232                         (pta)->arg = (void *)_arg;              \
233                         (pta)->func = _func;                    \
234                         spin_unlock(&(pta)->lock);              \
235                         break;                                  \
236                 }                                               \
237                 spin_unlock(&(pta)->lock);                      \
238                 cfs_schedule();                                 \
239         } while(1);                                             \
240
241 /*
242  * Release the thread argument if the thread argument has been
243  * received by the child-thread (Status of thread_args is
244  * THREAD_ARG_RECV), otherwise current-thread has to wait.
245  * After release, the thread_args' status will be set to
246  * THREAD_ARG_FREE, and others can re-use the thread_args to
247  * create new kernel_thread.
248  */
249 #define thread_arg_release(pta)                                 \
250         do {                                                    \
251                 spin_lock(&(pta)->lock);                        \
252                 if (get_targ_stat(pta) == THREAD_ARG_RECV) {    \
253                         (pta)->arg = NULL;                      \
254                         (pta)->func = NULL;                     \
255                         set_targ_stat(pta, THREAD_ARG_FREE);    \
256                         spin_unlock(&(pta)->lock);              \
257                         break;                                  \
258                 }                                               \
259                 spin_unlock(&(pta)->lock);                      \
260                 cfs_schedule();                                 \
261         } while(1)
262
263 /*
264  * Receive thread argument (Used in child thread), set the status
265  * of thread_args to THREAD_ARG_RECV.
266  */
267 #define __thread_arg_recv_fin(pta, _func, _arg, fin)            \
268         do {                                                    \
269                 spin_lock(&(pta)->lock);                        \
270                 if (get_targ_stat(pta) == THREAD_ARG_HOLD) {    \
271                         if (fin)                                \
272                             set_targ_stat(pta, THREAD_ARG_RECV);\
273                         _arg = (pta)->arg;                      \
274                         _func = (pta)->func;                    \
275                         spin_unlock(&(pta)->lock);              \
276                         break;                                  \
277                 }                                               \
278                 spin_unlock(&(pta)->lock);                      \
279                 cfs_schedule();                                 \
280         } while (1);                                            \
281
282 /*
283  * Just set the thread_args' status to THREAD_ARG_RECV
284  */
285 #define thread_arg_fin(pta)                                     \
286         do {                                                    \
287                 spin_lock(&(pta)->lock);                        \
288                 assert( get_targ_stat(pta) == THREAD_ARG_HOLD); \
289                 set_targ_stat(pta, THREAD_ARG_RECV);            \
290                 spin_unlock(&(pta)->lock);                      \
291         } while(0)
292
293 #define thread_arg_recv(pta, f, a)      __thread_arg_recv_fin(pta, f, a, 1)
294 #define thread_arg_keep(pta, f, a)      __thread_arg_recv_fin(pta, f, a, 0)
295
296 void
297 cfs_thread_agent_init(void)
298 {
299         set_targ_stat(&cfs_thread_arg, THREAD_ARG_FREE);
300         spin_lock_init(&cfs_thread_arg.lock);
301         cfs_thread_arg.arg = NULL;
302         cfs_thread_arg.func = NULL;
303 }
304
305 void
306 cfs_thread_agent_fini(void)
307 {
308         assert(get_targ_stat(&cfs_thread_arg) == THREAD_ARG_FREE);
309
310         spin_lock_done(&cfs_thread_arg.lock);
311 }
312
313 /*
314  *
315  * All requests to create kernel thread will create a new
316  * thread instance of cfs_thread_agent, one by one.
317  * cfs_thread_agent will call the caller's thread function
318  * with argument supplied by caller.
319  */
320 void
321 cfs_thread_agent (void)
322 {
323         cfs_thread_t           func = NULL;
324         void                   *arg = NULL;
325
326         thread_arg_recv(&cfs_thread_arg, func, arg);
327         /* printf("entry of thread agent (func: %08lx).\n", (void *)func); */
328         assert(func != NULL);
329         func(arg);
330         /* printf("thread agent exit. (func: %08lx)\n", (void *)func); */
331         (void) thread_terminate(current_thread());
332 }
333
334 extern thread_t kernel_thread(task_t task, void (*start)(void));
335
336 int
337 cfs_kernel_thread(cfs_thread_t  func, void *arg, int flag)
338 {
339         int ret = 0;
340         thread_t th = NULL;
341
342         thread_arg_hold(&cfs_thread_arg, func, arg);
343         th = kernel_thread(kernel_task, cfs_thread_agent);
344         thread_arg_release(&cfs_thread_arg);
345         if (th == THREAD_NULL)
346                 ret = -1;
347         return ret;
348 }
349
350 void cfs_daemonize(char *str)
351 {
352         snprintf(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX, "%s", str);
353         return;
354 }
355
356 /*
357  * XXX Liang: kexts cannot access sigmask in Darwin8.
358  * it's almost impossible for us to get/set signal mask
359  * without patching kernel.
360  * Should we provide these functions in xnu?
361  *
362  * These signal functions almost do nothing now, we 
363  * need to investigate more about signal in Darwin.
364  */
365 cfs_sigset_t cfs_get_blockedsigs()
366 {
367         return (cfs_sigset_t)0;
368 }
369
370 extern int block_procsigmask(struct proc *p,  int bit);
371
372 cfs_sigset_t cfs_block_allsigs()
373 {
374         cfs_sigset_t    old = 0;
375 #ifdef __DARWIN8__
376 #else
377         block_procsigmask(current_proc(), -1);
378 #endif
379         return old;
380 }
381
382 cfs_sigset_t cfs_block_sigs(sigset_t bit)
383 {
384         cfs_sigset_t    old = 0;
385 #ifdef __DARWIN8__
386 #else
387         block_procsigmask(current_proc(), bit);
388 #endif
389         return old;
390 }
391
392 void cfs_restore_sigs(cfs_sigset_t old)
393 {
394 }
395
396 int cfs_signal_pending(void)
397
398 {
399 #ifdef __DARWIN8__
400         extern int thread_issignal(proc_t, thread_t, sigset_t);
401         return thread_issignal(current_proc(), current_thread(), (sigset_t)-1);
402 #else
403         return SHOULDissignal(current_proc(), current_uthread())
404 #endif
405 }
406
407 void cfs_clear_sigpending(void)
408 {
409 #ifdef __DARWIN8__
410 #else
411         clear_procsiglist(current_proc(), -1);
412 #endif
413 }
414
415 #ifdef __DARWIN8__
416
417 #else /* !__DARWIN8__ */
418
419 void lustre_cone_in(boolean_t *state, funnel_t **cone)
420 {
421         *cone = thread_funnel_get();
422         if (*cone == network_flock)
423                 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
424         else if (*cone == NULL)
425                 *state = thread_funnel_set(kernel_flock, TRUE);
426 }
427
428 void lustre_cone_ex(boolean_t state, funnel_t *cone)
429 {
430         if (cone == network_flock)
431                 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
432         else if (cone == NULL)
433                 (void) thread_funnel_set(kernel_flock, state);
434 }
435
436 void lustre_net_in(boolean_t *state, funnel_t **cone)
437 {
438         *cone = thread_funnel_get();
439         if (*cone == kernel_flock)
440                 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
441         else if (*cone == NULL)
442                 *state = thread_funnel_set(network_flock, TRUE);
443 }
444
445 void lustre_net_ex(boolean_t state, funnel_t *cone)
446 {
447         if (cone == kernel_flock)
448                 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
449         else if (cone == NULL)
450                 (void) thread_funnel_set(network_flock, state);
451 }
452 #endif /* !__DARWIN8__ */
453
454 void cfs_waitq_init(struct cfs_waitq *waitq)
455 {
456         ksleep_chan_init(&waitq->wq_ksleep_chan);
457 }
458
459 void cfs_waitlink_init(struct cfs_waitlink *link)
460 {
461         ksleep_link_init(&link->wl_ksleep_link);
462 }
463
464 void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
465 {
466         link->wl_waitq = waitq;
467         ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
468 }
469
470 void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
471                              struct cfs_waitlink *link)
472 {
473         link->wl_waitq = waitq;
474         link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
475         ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
476 }
477
478 void cfs_waitq_forward(struct cfs_waitlink *link,
479                        struct cfs_waitq *waitq)
480 {
481         link->wl_ksleep_link.forward = &waitq->wq_ksleep_chan;
482 }
483
484 void cfs_waitq_del(struct cfs_waitq *waitq,
485                    struct cfs_waitlink *link)
486 {
487         ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
488 }
489
490 int cfs_waitq_active(struct cfs_waitq *waitq)
491 {
492         return (1);
493 }
494
495 void cfs_waitq_signal(struct cfs_waitq *waitq)
496 {
497         /*
498          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
499          * from here: this will lead to infinite recursion.
500          */
501         ksleep_wake(&waitq->wq_ksleep_chan);
502 }
503
504 void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
505 {
506         ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
507 }
508
509 void cfs_waitq_broadcast(struct cfs_waitq *waitq)
510 {
511         ksleep_wake_all(&waitq->wq_ksleep_chan);
512 }
513
514 void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
515 {
516         ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
517 }
518
519 cfs_duration_t  cfs_waitq_timedwait(struct cfs_waitlink *link,
520                                     cfs_task_state_t state,
521                                     cfs_duration_t timeout)
522 {
523         return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan, 
524                                 state, timeout);
525 }
526
527 typedef  void (*ktimer_func_t)(void *);
528 void cfs_timer_init(cfs_timer_t *t, void (* func)(unsigned long), void *arg)
529 {
530         ktimer_init(&t->t, (ktimer_func_t)func, arg);
531 }
532
533 void cfs_timer_done(struct cfs_timer *t)
534 {
535         ktimer_done(&t->t);
536 }
537
538 void cfs_timer_arm(struct cfs_timer *t, cfs_time_t deadline)
539 {
540         ktimer_arm(&t->t, deadline);
541 }
542
543 void cfs_timer_disarm(struct cfs_timer *t)
544 {
545         ktimer_disarm(&t->t);
546 }
547
548 int  cfs_timer_is_armed(struct cfs_timer *t)
549 {
550         return ktimer_is_armed(&t->t);
551 }
552
553 cfs_time_t cfs_timer_deadline(struct cfs_timer *t)
554 {
555         return ktimer_deadline(&t->t);
556 }
557
558 void cfs_enter_debugger(void)
559 {
560 #ifdef __DARWIN8__
561         extern void Debugger(const char * reason);
562         Debugger("CFS");
563 #else
564         extern void PE_enter_debugger(char *cause);
565         PE_enter_debugger("CFS");
566 #endif
567 }
568
569 int cfs_online_cpus(void)
570 {
571         int     activecpu;
572         size_t  size;
573
574 #ifdef __DARWIN8__ 
575         size = sizeof(int);
576         sysctlbyname("hw.activecpu", &activecpu, &size, NULL, 0);
577         return activecpu;
578 #else
579         host_basic_info_data_t hinfo;
580         kern_return_t kret;
581         int count = HOST_BASIC_INFO_COUNT;
582 #define BSD_HOST 1
583         kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count);
584         if (kret == KERN_SUCCESS) 
585                 return (hinfo.avail_cpus);
586         return(-EINVAL);
587 #endif
588 }
589
590 int cfs_ncpus(void)
591 {
592         int     ncpu;
593         size_t  size;
594
595         size = sizeof(int);
596
597         sysctlbyname("hw.ncpu", &ncpu, &size, NULL, 0);
598         return ncpu;
599 }