1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Phil Schwan <phil@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * Darwin porting library
23 * Make things easy to port
25 #define DEBUG_SUBSYSTEM S_LNET
27 #include <mach/mach_types.h>
32 #include <sys/filedesc.h>
33 #include <sys/namei.h>
34 #include <miscfs/devfs/devfs.h>
35 #include <kern/thread.h>
37 #include <libcfs/libcfs.h>
38 #include <libcfs/kp30.h>
41 * cfs pseudo device, actually pseudo char device in darwin
43 #define KLNET_MAJOR -1
45 kern_return_t cfs_psdev_register(cfs_psdev_t *dev) {
46 dev->index = cdevsw_add(KLNET_MAJOR, dev->devsw);
48 printf("libcfs_init: failed to allocate a major number!\n");
51 dev->handle = devfs_make_node(makedev (dev->index, 0),
53 GID_WHEEL, 0666, (char *)dev->name, 0);
57 kern_return_t cfs_psdev_deregister(cfs_psdev_t *dev) {
58 devfs_remove(dev->handle);
59 cdevsw_remove(dev->index, dev->devsw);
64 * KPortal symbol register / unregister support
66 struct rw_semaphore cfs_symbol_lock;
67 struct list_head cfs_symbol_list;
70 cfs_symbol_get(const char *name)
72 struct list_head *walker;
73 struct cfs_symbol *sym = NULL;
75 down_read(&cfs_symbol_lock);
76 list_for_each(walker, &cfs_symbol_list) {
77 sym = list_entry (walker, struct cfs_symbol, sym_list);
78 if (!strcmp(sym->name, name)) {
83 up_read(&cfs_symbol_lock);
90 cfs_symbol_put(const char *name)
92 struct list_head *walker;
93 struct cfs_symbol *sym = NULL;
95 down_read(&cfs_symbol_lock);
96 list_for_each(walker, &cfs_symbol_list) {
97 sym = list_entry (walker, struct cfs_symbol, sym_list);
98 if (!strcmp(sym->name, name)) {
100 LASSERT(sym->ref >= 0);
104 up_read(&cfs_symbol_lock);
105 LASSERT(sym != NULL);
111 cfs_symbol_register(const char *name, const void *value)
113 struct list_head *walker;
114 struct cfs_symbol *sym = NULL;
115 struct cfs_symbol *new = NULL;
117 MALLOC(new, struct cfs_symbol *, sizeof(struct cfs_symbol), M_TEMP, M_WAITOK|M_ZERO);
118 strncpy(new->name, name, CFS_SYMBOL_LEN);
119 new->value = (void *)value;
121 CFS_INIT_LIST_HEAD(&new->sym_list);
123 down_write(&cfs_symbol_lock);
124 list_for_each(walker, &cfs_symbol_list) {
125 sym = list_entry (walker, struct cfs_symbol, sym_list);
126 if (!strcmp(sym->name, name)) {
127 up_write(&cfs_symbol_lock);
129 return KERN_NAME_EXISTS;
133 list_add_tail(&new->sym_list, &cfs_symbol_list);
134 up_write(&cfs_symbol_lock);
140 cfs_symbol_unregister(const char *name)
142 struct list_head *walker;
143 struct list_head *nxt;
144 struct cfs_symbol *sym = NULL;
146 down_write(&cfs_symbol_lock);
147 list_for_each_safe(walker, nxt, &cfs_symbol_list) {
148 sym = list_entry (walker, struct cfs_symbol, sym_list);
149 if (!strcmp(sym->name, name)) {
150 LASSERT(sym->ref == 0);
151 list_del (&sym->sym_list);
156 up_write(&cfs_symbol_lock);
164 CFS_INIT_LIST_HEAD(&cfs_symbol_list);
165 init_rwsem(&cfs_symbol_lock);
171 struct list_head *walker;
172 struct cfs_symbol *sym = NULL;
174 down_write(&cfs_symbol_lock);
175 list_for_each(walker, &cfs_symbol_list) {
176 sym = list_entry (walker, struct cfs_symbol, sym_list);
177 LASSERT(sym->ref == 0);
178 list_del (&sym->sym_list);
181 up_write(&cfs_symbol_lock);
183 fini_rwsem(&cfs_symbol_lock);
187 struct kernel_thread_arg
195 struct kernel_thread_arg cfs_thread_arg;
197 #define THREAD_ARG_FREE 0
198 #define THREAD_ARG_HOLD 1
199 #define THREAD_ARG_RECV 2
201 #define set_targ_stat(a, v) atomic_set(&(a)->inuse, v)
202 #define get_targ_stat(a) atomic_read(&(a)->inuse)
205 * Hold the thread argument and set the status of thread_status
206 * to THREAD_ARG_HOLD, if the thread argument is held by other
207 * threads (It's THREAD_ARG_HOLD already), current-thread has to wait.
209 #define thread_arg_hold(pta, _func, _arg) \
211 spin_lock(&(pta)->lock); \
212 if (get_targ_stat(pta) == THREAD_ARG_FREE) { \
213 set_targ_stat((pta), THREAD_ARG_HOLD); \
214 (pta)->arg = (void *)_arg; \
215 (pta)->func = _func; \
216 spin_unlock(&(pta)->lock); \
219 spin_unlock(&(pta)->lock); \
224 * Release the thread argument if the thread argument has been
225 * received by the child-thread (Status of thread_args is
226 * THREAD_ARG_RECV), otherwise current-thread has to wait.
227 * After release, the thread_args' status will be set to
228 * THREAD_ARG_FREE, and others can re-use the thread_args to
229 * create new kernel_thread.
231 #define thread_arg_release(pta) \
233 spin_lock(&(pta)->lock); \
234 if (get_targ_stat(pta) == THREAD_ARG_RECV) { \
236 (pta)->func = NULL; \
237 set_targ_stat(pta, THREAD_ARG_FREE); \
238 spin_unlock(&(pta)->lock); \
241 spin_unlock(&(pta)->lock); \
246 * Receive thread argument (Used in child thread), set the status
247 * of thread_args to THREAD_ARG_RECV.
249 #define __thread_arg_recv_fin(pta, _func, _arg, fin) \
251 spin_lock(&(pta)->lock); \
252 if (get_targ_stat(pta) == THREAD_ARG_HOLD) { \
254 set_targ_stat(pta, THREAD_ARG_RECV);\
256 _func = (pta)->func; \
257 spin_unlock(&(pta)->lock); \
260 spin_unlock(&(pta)->lock); \
265 * Just set the thread_args' status to THREAD_ARG_RECV
267 #define thread_arg_fin(pta) \
269 spin_lock(&(pta)->lock); \
270 assert( get_targ_stat(pta) == THREAD_ARG_HOLD); \
271 set_targ_stat(pta, THREAD_ARG_RECV); \
272 spin_unlock(&(pta)->lock); \
275 #define thread_arg_recv(pta, f, a) __thread_arg_recv_fin(pta, f, a, 1)
276 #define thread_arg_keep(pta, f, a) __thread_arg_recv_fin(pta, f, a, 0)
279 cfs_thread_agent_init(void)
281 set_targ_stat(&cfs_thread_arg, THREAD_ARG_FREE);
282 spin_lock_init(&cfs_thread_arg.lock);
283 cfs_thread_arg.arg = NULL;
284 cfs_thread_arg.func = NULL;
288 cfs_thread_agent_fini(void)
290 assert(get_targ_stat(&cfs_thread_arg) == THREAD_ARG_FREE);
292 spin_lock_done(&cfs_thread_arg.lock);
297 * All requests to create kernel thread will create a new
298 * thread instance of cfs_thread_agent, one by one.
299 * cfs_thread_agent will call the caller's thread function
300 * with argument supplied by caller.
303 cfs_thread_agent (void)
305 cfs_thread_t func = NULL;
308 thread_arg_recv(&cfs_thread_arg, func, arg);
309 /* printf("entry of thread agent (func: %08lx).\n", (void *)func); */
310 assert(func != NULL);
312 /* printf("thread agent exit. (func: %08lx)\n", (void *)func); */
313 (void) thread_terminate(current_thread());
316 extern thread_t kernel_thread(task_t task, void (*start)(void));
319 cfs_kernel_thread(cfs_thread_t func, void *arg, int flag)
324 thread_arg_hold(&cfs_thread_arg, func, arg);
325 th = kernel_thread(kernel_task, cfs_thread_agent);
326 thread_arg_release(&cfs_thread_arg);
327 if (th == THREAD_NULL)
332 void cfs_daemonize(char *str)
334 snprintf(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX, "%s", str);
339 * XXX Liang: kexts cannot access sigmask in Darwin8.
340 * it's almost impossible for us to get/set signal mask
341 * without patching kernel.
342 * Should we provide these functions in xnu?
344 * These signal functions almost do nothing now, we
345 * need to investigate more about signal in Darwin.
347 cfs_sigset_t cfs_get_blockedsigs()
349 return (cfs_sigset_t)0;
352 extern int block_procsigmask(struct proc *p, int bit);
354 cfs_sigset_t cfs_block_allsigs()
356 cfs_sigset_t old = 0;
359 block_procsigmask(current_proc(), -1);
364 cfs_sigset_t cfs_block_sigs(sigset_t bit)
366 cfs_sigset_t old = 0;
369 block_procsigmask(current_proc(), bit);
374 void cfs_restore_sigs(cfs_sigset_t old)
378 int cfs_signal_pending(void)
382 extern int thread_issignal(proc_t, thread_t, sigset_t);
383 return thread_issignal(current_proc(), current_thread(), (sigset_t)-1);
385 return SHOULDissignal(current_proc(), current_uthread())
389 void cfs_clear_sigpending(void)
393 clear_procsiglist(current_proc(), -1);
399 #else /* !__DARWIN8__ */
401 void lustre_cone_in(boolean_t *state, funnel_t **cone)
403 *cone = thread_funnel_get();
404 if (*cone == network_flock)
405 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
406 else if (*cone == NULL)
407 *state = thread_funnel_set(kernel_flock, TRUE);
410 void lustre_cone_ex(boolean_t state, funnel_t *cone)
412 if (cone == network_flock)
413 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
414 else if (cone == NULL)
415 (void) thread_funnel_set(kernel_flock, state);
418 void lustre_net_in(boolean_t *state, funnel_t **cone)
420 *cone = thread_funnel_get();
421 if (*cone == kernel_flock)
422 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
423 else if (*cone == NULL)
424 *state = thread_funnel_set(network_flock, TRUE);
427 void lustre_net_ex(boolean_t state, funnel_t *cone)
429 if (cone == kernel_flock)
430 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
431 else if (cone == NULL)
432 (void) thread_funnel_set(network_flock, state);
434 #endif /* !__DARWIN8__ */
436 void cfs_waitq_init(struct cfs_waitq *waitq)
438 ksleep_chan_init(&waitq->wq_ksleep_chan);
441 void cfs_waitlink_init(struct cfs_waitlink *link)
443 ksleep_link_init(&link->wl_ksleep_link);
446 void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
448 link->wl_waitq = waitq;
449 ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
452 void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
453 struct cfs_waitlink *link)
455 link->wl_waitq = waitq;
456 link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
457 ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
460 void cfs_waitq_forward(struct cfs_waitlink *link,
461 struct cfs_waitq *waitq)
463 link->wl_ksleep_link.forward = &waitq->wq_ksleep_chan;
466 void cfs_waitq_del(struct cfs_waitq *waitq,
467 struct cfs_waitlink *link)
469 ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
472 int cfs_waitq_active(struct cfs_waitq *waitq)
477 void cfs_waitq_signal(struct cfs_waitq *waitq)
480 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
481 * from here: this will lead to infinite recursion.
483 ksleep_wake(&waitq->wq_ksleep_chan);
486 void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
488 ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
491 void cfs_waitq_broadcast(struct cfs_waitq *waitq)
493 ksleep_wake_all(&waitq->wq_ksleep_chan);
496 void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
498 ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
501 cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
502 cfs_task_state_t state,
503 cfs_duration_t timeout)
505 return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan,
509 typedef void (*ktimer_func_t)(void *);
510 void cfs_timer_init(cfs_timer_t *t, void (* func)(unsigned long), void *arg)
512 ktimer_init(&t->t, (ktimer_func_t)func, arg);
515 void cfs_timer_done(struct cfs_timer *t)
520 void cfs_timer_arm(struct cfs_timer *t, cfs_time_t deadline)
522 ktimer_arm(&t->t, deadline);
525 void cfs_timer_disarm(struct cfs_timer *t)
527 ktimer_disarm(&t->t);
530 int cfs_timer_is_armed(struct cfs_timer *t)
532 return ktimer_is_armed(&t->t);
535 cfs_time_t cfs_timer_deadline(struct cfs_timer *t)
537 return ktimer_deadline(&t->t);
540 void cfs_enter_debugger(void)
543 extern void Debugger(const char * reason);
546 extern void PE_enter_debugger(char *cause);
547 PE_enter_debugger("CFS");
551 int cfs_online_cpus(void)
558 sysctlbyname("hw.activecpu", &activecpu, &size, NULL, 0);
561 host_basic_info_data_t hinfo;
563 int count = HOST_BASIC_INFO_COUNT;
565 kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count);
566 if (kret == KERN_SUCCESS)
567 return (hinfo.avail_cpus);
579 sysctlbyname("hw.ncpu", &ncpu, &size, NULL, 0);