X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=libcfs%2Finclude%2Flibcfs%2Flibcfs_private.h;h=7557c62fd827f730f6409b9a2bf266cd46335646;hp=d0fd499fb2f9b585377ed8a7cdc63effce88f986;hb=874f67c06da8304a194df5fc0dd5a2c61937076c;hpb=3b4c006b28c9d6a7c3b00535cd3a6292178fa4c6 diff --git a/libcfs/include/libcfs/libcfs_private.h b/libcfs/include/libcfs/libcfs_private.h index d0fd499..7557c62 100644 --- a/libcfs/include/libcfs/libcfs_private.h +++ b/libcfs/include/libcfs/libcfs_private.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -71,48 +71,35 @@ * * requires -Wall. Unfortunately this rules out use of likely/unlikely. */ -#define LASSERT(cond) \ -do { \ - if (cond) \ - ; \ - else \ - libcfs_assertion_failed( #cond , __FILE__, \ - __FUNCTION__, __LINE__); \ -} while(0) +#define LASSERTF(cond, fmt, ...) \ +do { \ + if (cond) \ + ; \ + else { \ + LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \ + libcfs_debug_msg(&__msg_data, \ + "ASSERTION( %s ) failed: " fmt, #cond, \ + ## __VA_ARGS__); \ + lbug_with_loc(&__msg_data); \ + } \ +} while (0) -#define LASSERTF(cond, fmt, ...) \ -do { \ - if (cond) \ - ; \ - else { \ - libcfs_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \ - __FILE__, __FUNCTION__,__LINE__, \ - "ASSERTION(" #cond ") failed: " fmt, \ - ## __VA_ARGS__); \ - LBUG(); \ - } \ -} while(0) +#define LASSERT(cond) LASSERTF(cond, "\n") #else /* !LASSERT_CHECKED */ -#define LASSERT(cond) \ -do { \ - if (unlikely(!(cond))) \ - libcfs_assertion_failed(#cond , __FILE__, \ - __FUNCTION__, __LINE__); \ -} while(0) - -#define LASSERTF(cond, fmt, ...) \ -do { \ - if (unlikely(!(cond))) { \ - libcfs_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \ - __FILE__, __FUNCTION__,__LINE__, \ - "ASSERTION(" #cond ") failed: " fmt, \ - ## __VA_ARGS__ ); \ - LBUG(); \ - } \ -} while(0) +#define LASSERTF(cond, fmt, ...) \ +do { \ + if (unlikely(!(cond))) { \ + LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \ + libcfs_debug_msg(&__msg_data, \ + "ASSERTION( %s ) failed: " fmt, #cond, \ + ## __VA_ARGS__); \ + lbug_with_loc(&__msg_data); \ + } \ +} while (0) +#define LASSERT(cond) LASSERTF(cond, "\n") #endif /* !LASSERT_CHECKED */ #else /* !LIBCFS_DEBUG */ /* sizeof is to use expression without evaluating it. */ @@ -120,7 +107,7 @@ do { \ # define LASSERTF(cond, ...) ((void)sizeof!!(cond)) #endif /* !LIBCFS_DEBUG */ -#ifdef INVARIANT_CHECK +#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK /** * This is for more expensive checks that one doesn't want to be enabled all * the time. LINVRNT() has to be explicitly enabled by --enable-invariants @@ -133,10 +120,13 @@ do { \ #define KLASSERT(e) LASSERT(e) -void lbug_with_loc(const char *file, const char *func, const int line) - __attribute__((noreturn)); +void lbug_with_loc(struct libcfs_debug_msg_data *) __attribute__((noreturn)); -#define LBUG() lbug_with_loc(__FILE__, __FUNCTION__, __LINE__) +#define LBUG() \ +do { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ + lbug_with_loc(&msgdata); \ +} while(0) extern atomic_t libcfs_kmemory; /* @@ -144,65 +134,107 @@ extern atomic_t libcfs_kmemory; */ #ifdef LIBCFS_DEBUG -# define libcfs_kmem_inc(ptr, size) \ -do { \ - atomic_add(size, &libcfs_kmemory); \ +# define libcfs_kmem_inc(ptr, size) \ +do { \ + atomic_add(size, &libcfs_kmemory); \ } while (0) -# define libcfs_kmem_dec(ptr, size) do { \ - atomic_sub(size, &libcfs_kmemory); \ +# define libcfs_kmem_dec(ptr, size) \ +do { \ + atomic_sub(size, &libcfs_kmemory); \ } while (0) +# define libcfs_kmem_read() \ + atomic_read(&libcfs_kmemory) + #else # define libcfs_kmem_inc(ptr, size) do {} while (0) # define libcfs_kmem_dec(ptr, size) do {} while (0) +# define libcfs_kmem_read() (0) #endif /* LIBCFS_DEBUG */ -#define LIBCFS_VMALLOC_SIZE 16384 - -#define LIBCFS_ALLOC_GFP(ptr, size, mask) \ -do { \ - LASSERT(!in_interrupt() || \ - (size <= LIBCFS_VMALLOC_SIZE && mask == CFS_ALLOC_ATOMIC));\ - if (unlikely((size) > LIBCFS_VMALLOC_SIZE)) \ - (ptr) = cfs_alloc_large(size); \ - else \ - (ptr) = cfs_alloc((size), (mask)); \ - if (unlikely((ptr) == NULL)) { \ - CERROR("LNET: out of memory at %s:%d (tried to alloc '" \ - #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size));\ - CERROR("LNET: %d total bytes allocated by lnet\n", \ - atomic_read(&libcfs_kmemory)); \ - break; \ - } \ - libcfs_kmem_inc((ptr), (size)); \ - if (!((mask) & CFS_ALLOC_ZERO)) \ - memset((ptr), 0, (size)); \ - CDEBUG(D_MALLOC, "kmalloced '" #ptr "': %d at %p (tot %d).\n", \ - (int)(size), (ptr), atomic_read (&libcfs_kmemory)); \ +#ifndef LIBCFS_VMALLOC_SIZE +#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ +#endif + +#define LIBCFS_ALLOC_PRE(size, mask) \ +do { \ + LASSERT(!in_interrupt() || \ + ((size) <= LIBCFS_VMALLOC_SIZE && \ + ((mask) & GFP_ATOMIC)) != 0); \ +} while (0) + +#define LIBCFS_ALLOC_POST(ptr, size) \ +do { \ + if (unlikely((ptr) == NULL)) { \ + CERROR("LNET: out of memory at %s:%d (tried to alloc '" \ + #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \ + CERROR("LNET: %d total bytes allocated by lnet\n", \ + libcfs_kmem_read()); \ + } else { \ + memset((ptr), 0, (size)); \ + libcfs_kmem_inc((ptr), (size)); \ + CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %d).\n", \ + (int)(size), (ptr), libcfs_kmem_read()); \ + } \ +} while (0) + +/** + * allocate memory with GFP flags @mask + */ +#define LIBCFS_ALLOC_GFP(ptr, size, mask) \ +do { \ + LIBCFS_ALLOC_PRE((size), (mask)); \ + (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ + kmalloc((size), (mask)) : vmalloc(size); \ + LIBCFS_ALLOC_POST((ptr), (size)); \ } while (0) +/** + * default allocator + */ #define LIBCFS_ALLOC(ptr, size) \ - LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_IO) + LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO) +/** + * non-sleeping allocator + */ #define LIBCFS_ALLOC_ATOMIC(ptr, size) \ - LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_ATOMIC) + LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC) -#define LIBCFS_FREE(ptr, size) \ -do { \ - int s = (size); \ - if (unlikely((ptr) == NULL)) { \ - CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ - "%s:%d\n", s, __FILE__, __LINE__); \ - break; \ - } \ - libcfs_kmem_dec((ptr), s); \ - CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \ - s, (ptr), atomic_read(&libcfs_kmemory)); \ - if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \ - cfs_free_large(ptr); \ - else \ - cfs_free(ptr); \ +/** + * allocate memory for specified CPU partition + * \a cptab != NULL, \a cpt is CPU partition id of \a cptab + * \a cptab == NULL, \a cpt is HW NUMA node id + */ +#define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \ +do { \ + LIBCFS_ALLOC_PRE((size), (mask)); \ + (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ + cfs_cpt_malloc((cptab), (cpt), (size), (mask)) : \ + cfs_cpt_vmalloc((cptab), (cpt), (size)); \ + LIBCFS_ALLOC_POST((ptr), (size)); \ +} while (0) + +/** default numa allocator */ +#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \ + LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO) + +#define LIBCFS_FREE(ptr, size) \ +do { \ + int s = (size); \ + if (unlikely((ptr) == NULL)) { \ + CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ + "%s:%d\n", s, __FILE__, __LINE__); \ + break; \ + } \ + libcfs_kmem_dec((ptr), s); \ + CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \ + s, (ptr), libcfs_kmem_read()); \ + if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \ + vfree(ptr); \ + else \ + kfree(ptr); \ } while (0) /******************************************************************************/ @@ -219,9 +251,9 @@ do { \ #define ntohs(x) ___ntohs(x) #endif -void libcfs_debug_dumpstack(cfs_task_t *tsk); +void libcfs_debug_dumpstack(struct task_struct *tsk); void libcfs_run_upcall(char **argv); -void libcfs_run_lbug_upcall(const char * file, const char *fn, const int line); +void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *); void libcfs_debug_dumplog(void); int libcfs_debug_init(unsigned long bufsize); int libcfs_debug_cleanup(void); @@ -242,7 +274,7 @@ do { \ assert(cond); \ } while (0) # define LBUG() assert(0) -# ifdef INVARIANT_CHECK +# ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK # define LINVRNT(exp) LASSERT(exp) # else # define LINVRNT(exp) ((void)sizeof!!(exp)) @@ -255,16 +287,17 @@ do { \ # endif /* LIBCFS_DEBUG */ # define KLASSERT(e) ((void)0) # define printk printf -# ifdef CRAY_XT3 /* buggy calloc! */ -# define LIBCFS_ALLOC(ptr, size) \ - do { \ - (ptr) = malloc(size); \ - memset(ptr, 0, size); \ - } while (0) -# else -# define LIBCFS_ALLOC(ptr, size) do { (ptr) = calloc(1,size); } while (0) -# endif -# define LIBCFS_FREE(a, b) do { free(a); } while (0) +#define LIBCFS_ALLOC_GFP(ptr, size, mask) \ +do { \ + (ptr) = calloc(1, size); \ +} while (0) +# define LIBCFS_FREE(ptr, size) do { free(ptr); } while((size) - (size)) +# define LIBCFS_ALLOC(ptr, size) \ + LIBCFS_ALLOC_GFP(ptr, size, 0) +# define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \ + LIBCFS_ALLOC(ptr, size) +# define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \ + LIBCFS_ALLOC(ptr, size) void libcfs_debug_dumplog(void); int libcfs_debug_init(unsigned long bufsize); @@ -287,10 +320,207 @@ int libcfs_debug_cleanup(void); /* !__KERNEL__ */ #endif +/* + * allocate per-cpu-partition data, returned value is an array of pointers, + * variable can be indexed by CPU ID. + * cptable != NULL: size of array is number of CPU partitions + * cptable == NULL: size of array is number of HW cores + */ +void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); +/* + * destory per-cpu-partition variable + */ +void cfs_percpt_free(void *vars); +int cfs_percpt_number(void *vars); +void *cfs_percpt_current(void *vars); +void *cfs_percpt_index(void *vars, int idx); + +#define cfs_percpt_for_each(var, i, vars) \ + for (i = 0; i < cfs_percpt_number(vars) && \ + ((var) = (vars)[i]) != NULL; i++) + +/* + * allocate a variable array, returned value is an array of pointers. + * Caller can specify length of array by count. + */ +void *cfs_array_alloc(int count, unsigned int size); +void cfs_array_free(void *vars); + +#define LASSERT_ATOMIC_ENABLED (1) + +#if LASSERT_ATOMIC_ENABLED + +/** assert value of @a is equal to @v */ +#define LASSERT_ATOMIC_EQ(a, v) \ +do { \ + LASSERTF(atomic_read(a) == v, \ + "value: %d\n", atomic_read((a))); \ +} while (0) + +/** assert value of @a is unequal to @v */ +#define LASSERT_ATOMIC_NE(a, v) \ +do { \ + LASSERTF(atomic_read(a) != v, \ + "value: %d\n", atomic_read((a))); \ +} while (0) + +/** assert value of @a is little than @v */ +#define LASSERT_ATOMIC_LT(a, v) \ +do { \ + LASSERTF(atomic_read(a) < v, \ + "value: %d\n", atomic_read((a))); \ +} while (0) + +/** assert value of @a is little/equal to @v */ +#define LASSERT_ATOMIC_LE(a, v) \ +do { \ + LASSERTF(atomic_read(a) <= v, \ + "value: %d\n", atomic_read((a))); \ +} while (0) + +/** assert value of @a is great than @v */ +#define LASSERT_ATOMIC_GT(a, v) \ +do { \ + LASSERTF(atomic_read(a) > v, \ + "value: %d\n", atomic_read((a))); \ +} while (0) + +/** assert value of @a is great/equal to @v */ +#define LASSERT_ATOMIC_GE(a, v) \ +do { \ + LASSERTF(atomic_read(a) >= v, \ + "value: %d\n", atomic_read((a))); \ +} while (0) + +/** assert value of @a is great than @v1 and little than @v2 */ +#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \ +do { \ + int __v = atomic_read(a); \ + LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \ +} while (0) + +/** assert value of @a is great than @v1 and little/equal to @v2 */ +#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \ +do { \ + int __v = atomic_read(a); \ + LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \ +} while (0) + +/** assert value of @a is great/equal to @v1 and little than @v2 */ +#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \ +do { \ + int __v = atomic_read(a); \ + LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \ +} while (0) + +/** assert value of @a is great/equal to @v1 and little/equal to @v2 */ +#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \ +do { \ + int __v = atomic_read(a); \ + LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \ +} while (0) + +#else /* !LASSERT_ATOMIC_ENABLED */ + +#define LASSERT_ATOMIC_EQ(a, v) do {} while (0) +#define LASSERT_ATOMIC_NE(a, v) do {} while (0) +#define LASSERT_ATOMIC_LT(a, v) do {} while (0) +#define LASSERT_ATOMIC_LE(a, v) do {} while (0) +#define LASSERT_ATOMIC_GT(a, v) do {} while (0) +#define LASSERT_ATOMIC_GE(a, v) do {} while (0) +#define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0) +#define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0) +#define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0) +#define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0) + +#endif /* LASSERT_ATOMIC_ENABLED */ + +#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0) +#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0) + #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr))); #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr))); -/** Compile-time assertion. +/* + * percpu partition lock + * + * There are some use-cases like this in Lustre: + * . each CPU partition has it's own private data which is frequently changed, + * and mostly by the local CPU partition. + * . all CPU partitions share some global data, these data are rarely changed. + * + * LNet is typical example. + * CPU partition lock is designed for this kind of use-cases: + * . each CPU partition has it's own private lock + * . change on private data just needs to take the private lock + * . read on shared data just needs to take _any_ of private locks + * . change on shared data needs to take _all_ private locks, + * which is slow and should be really rare. + */ + +enum { + CFS_PERCPT_LOCK_EX = -1, /* negative */ +}; + +#ifdef __KERNEL__ + +struct cfs_percpt_lock { + /* cpu-partition-table for this lock */ + struct cfs_cpt_table *pcl_cptab; + /* exclusively locked */ + unsigned int pcl_locked; + /* private lock table */ + spinlock_t **pcl_locks; +}; + +/* return number of private locks */ +static inline int +cfs_percpt_lock_num(struct cfs_percpt_lock *pcl) +{ + return cfs_cpt_number(pcl->pcl_cptab); +} + +#else /* !__KERNEL__ */ + +# ifdef HAVE_LIBPTHREAD + +struct cfs_percpt_lock { + pthread_mutex_t pcl_mutex; +}; + +# else /* !HAVE_LIBPTHREAD */ + +struct cfs_percpt_lock { + int pcl_lock; +}; + +static const struct cfs_percpt_lock CFS_PERCPT_LOCK_MAGIC; + +# endif /* HAVE_LIBPTHREAD */ +# define cfs_percpt_lock_num(pcl) 1 +#endif /* __KERNEL__ */ + +/* + * create a cpu-partition lock based on CPU partition table \a cptab, + * each private lock has extra \a psize bytes padding data + */ +struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); +/* destroy a cpu-partition lock */ +void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); + +/* lock private lock \a index of \a pcl */ +void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); +/* unlock private lock \a index of \a pcl */ +void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); +/* create percpt (atomic) refcount based on @cptab */ +atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val); +/* destroy percpt refcount */ +void cfs_percpt_atomic_free(atomic_t **refs); +/* return sum of all percpu refs */ +int cfs_percpt_atomic_summary(atomic_t **refs); + + +/** Compile-time assertion. * Check an invariant described by a constant expression at compile time by * forcing a compiler error if it does not hold. \a cond must be a constant @@ -307,29 +537,36 @@ int libcfs_debug_cleanup(void); #define CLASSERT(cond) do {switch(42) {case (cond): case 0: break;}} while (0) /* support decl needed both by kernel and liblustre */ -int libcfs_isknown_lnd(int type); -char *libcfs_lnd2modname(int type); -char *libcfs_lnd2str(int type); -int libcfs_str2lnd(const char *str); -char *libcfs_net2str(__u32 net); -char *libcfs_nid2str(lnet_nid_t nid); -__u32 libcfs_str2net(const char *str); -lnet_nid_t libcfs_str2nid(const char *str); -int libcfs_str2anynid(lnet_nid_t *nid, const char *str); -char *libcfs_id2str(lnet_process_id_t id); -int cfs_iswhite(char c); -void cfs_free_nidlist(struct list_head *list); -int cfs_parse_nidlist(char *str, int len, struct list_head *list); -int cfs_match_nid(lnet_nid_t nid, struct list_head *list); - +int libcfs_isknown_lnd(int type); +char *libcfs_lnd2modname(int type); +char *libcfs_lnd2str(int type); +int libcfs_str2lnd(const char *str); +char *libcfs_net2str(__u32 net); +char *libcfs_nid2str(lnet_nid_t nid); +__u32 libcfs_str2net(const char *str); +lnet_nid_t libcfs_str2nid(const char *str); +int libcfs_str2anynid(lnet_nid_t *nid, const char *str); +char *libcfs_id2str(lnet_process_id_t id); +void cfs_free_nidlist(struct list_head *list); +int cfs_parse_nidlist(char *str, int len, struct list_head *list); +int cfs_print_nidlist(char *buffer, int count, + struct list_head *list); +int cfs_match_nid(lnet_nid_t nid, struct list_head *list); + +/** \addtogroup lnet_addr + * @{ */ /* how an LNET NID encodes net:address */ +/** extract the address part of an lnet_nid_t */ #define LNET_NIDADDR(nid) ((__u32)((nid) & 0xffffffff)) +/** extract the network part of an lnet_nid_t */ #define LNET_NIDNET(nid) ((__u32)(((nid) >> 32)) & 0xffffffff) +/** make an lnet_nid_t from a network part and an address part */ #define LNET_MKNID(net,addr) ((((__u64)(net))<<32)|((__u64)(addr))) /* how net encodes type:number */ #define LNET_NETNUM(net) ((net) & 0xffff) #define LNET_NETTYP(net) (((net) >> 16) & 0xffff) #define LNET_MKNET(typ,num) ((((__u32)(typ))<<16)|((__u32)(num))) +/** @} lnet_addr */ /* max value for numeric network address */ #define MAX_NUMERIC_VALUE 0xffffffff @@ -339,20 +576,14 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *list); /* logical equivalence */ #define equi(a, b) (!!(a) == !!(b)) -#ifndef CURRENT_TIME -# define CURRENT_TIME time(0) +#ifndef CFS_CURRENT_TIME +# define CFS_CURRENT_TIME time(0) #endif -/* -------------------------------------------------------------------- - * Light-weight trace - * Support for temporary event tracing with minimal Heisenberg effect. - * All stuff about lwt are put in arch/kp30.h - * -------------------------------------------------------------------- */ - struct libcfs_device_userstate { - int ldu_memhog_pages; - cfs_page_t *ldu_memhog_root_page; + int ldu_memhog_pages; + struct page *ldu_memhog_root_page; }; /* what used to be in portals_lib.h */ @@ -365,50 +596,66 @@ struct libcfs_device_userstate #define MKSTR(ptr) ((ptr))? (ptr) : "" -static inline int size_round4 (int val) +static inline int cfs_size_round4 (int val) { return (val + 3) & (~0x3); } -static inline int size_round (int val) +#ifndef HAVE_CFS_SIZE_ROUND +static inline int cfs_size_round (int val) { return (val + 7) & (~0x7); } +#define HAVE_CFS_SIZE_ROUND +#endif -static inline int size_round16(int val) +static inline int cfs_size_round16(int val) { return (val + 0xf) & (~0xf); } -static inline int size_round32(int val) +static inline int cfs_size_round32(int val) { return (val + 0x1f) & (~0x1f); } -static inline int size_round0(int val) +static inline int cfs_size_round0(int val) { if (!val) return 0; return (val + 1 + 7) & (~0x7); } -static inline size_t round_strlen(char *fset) +static inline size_t cfs_round_strlen(char *fset) +{ + return (size_t)cfs_size_round((int)strlen(fset) + 1); +} + +/* roundup \a val to power2 */ +static inline unsigned int cfs_power2_roundup(unsigned int val) { - return (size_t)size_round((int)strlen(fset) + 1); + if (val != LOWEST_BIT_SET(val)) { /* not a power of 2 already */ + do { + val &= ~LOWEST_BIT_SET(val); + } while (val != LOWEST_BIT_SET(val)); + /* ...and round up */ + val <<= 1; + } + return val; } #define LOGL(var,len,ptr) \ do { \ if (var) \ memcpy((char *)ptr, (const char *)var, len); \ - ptr += size_round(len); \ + ptr += cfs_size_round(len); \ } while (0) #define LOGU(var,len,ptr) \ do { \ if (var) \ memcpy((char *)var, (const char *)ptr, len); \ - ptr += size_round(len); \ + ptr += cfs_size_round(len); \ } while (0) #define LOGL0(var,len,ptr) \ @@ -417,7 +664,7 @@ do { \ break; \ memcpy((char *)ptr, (const char *)var, len); \ *((char *)(ptr) + len) = 0; \ - ptr += size_round(len + 1); \ + ptr += cfs_size_round(len + 1); \ } while (0) /** @@ -428,7 +675,7 @@ enum { * network addresses depend on them... */ QSWLND = 1, SOCKLND = 2, - GMLND = 3, + GMLND = 3, /* obsolete, keep it so that libcfs_nid2str works */ PTLLND = 4, O2IBLND = 5, CIBLND = 6, @@ -438,6 +685,8 @@ enum { RALND = 10, VIBLND = 11, MXLND = 12, + GNILND = 13, + GNIIPLND = 14, }; #endif