s/\bcfs_timer_t\b/struct timer_list/g
s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
/#[ \t]*define[ \t]*\bMAX_SCHEDULE_TIMEOUT\b[ \t]*\bMAX_SCHEDULE_TIMEOUT\b/d
+
+# membar
+s/\bcfs_mb\b/smp_mb/g
+/#[ \t]*define[ \t]*\bmb\b *( *)[ \t]*\bmb\b *( *)/d
+# interrupt
+s/\bcfs_in_interrupt\b/in_interrupt/g
+/#[ \t]*define[ \t]*\bin_interrupt\b *( *)[ \t]*\bin_interrupt\b *( *)/d
+# might_sleep
+s/\bcfs_might_sleep\b/might_sleep/g
+/#[ \t]*define[ \t]*\bmight_sleep\b *( *)[ \t]*\bmight_sleep\b *( *)/d
+# group_info
+s/\bcfs_group_info_t\b/struct group_info/g
+s/\bcfs_get_group_info\b/get_group_info/g
+/#[ \t]*define[ \t]*\bget_group_info\b *( *\w* *)[ \t]*\bget_group_info\b *( *\w* *)/d
+s/\bcfs_put_group_info\b/put_group_info/g
+/#[ \t]*define[ \t]*\bput_group_info\b *( *\w* *)[ \t]*\bput_group_info\b *( *\w* *)/d
+s/\bcfs_set_current_groups\b/set_current_groups/g
+/#[ \t]*define[ \t]*\bset_current_groups\b *( *\w* *)[ \t]*\bset_current_groups\b *( *\w* *)/d
+s/\bcfs_groups_free\b/groups_free/g
+/#[ \t]*define[ \t]*\bgroups_free\b *( *\w* *)[ \t]*\bgroups_free\b *( *\w* *)/d
+s/\bcfs_groups_alloc\b/groups_alloc/g
+/#[ \t]*define[ \t]*\bgroups_alloc\b *( *\w* *)[ \t]*\bgroups_alloc\b *( *\w* *)/d
+# Random bytes
+s/\bcfs_get_random_bytes_prim\b/get_random_bytes/g
+/#[ \t]*define[ \t]*\bget_random_bytes\b *( *\w* *, *\w* *)[ \t]*\bget_random_bytes\b *( *\w* *, *\w* *)/d
#define set_bit(i, a) setbit(a, i)
#define clear_bit(i, a) clrbit(a, i)
-#define cfs_get_random_bytes_prim(buf, len) read_random(buf, len)
+#define get_random_bytes(buf, len) read_random(buf, len)
#endif /* __KERNEL__ */
#define LIBCFS_ALLOC_PRE(size, mask) \
do { \
- LASSERT(!cfs_in_interrupt() || \
+ LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
((mask) & GFP_ATOMIC)) != 0); \
} while (0)
#define cfs_atomic_cmpxchg(atom, old, nv) atomic_cmpxchg(atom, old, nv)
#define CFS_ATOMIC_INIT(i) ATOMIC_INIT(i)
-/*
- * membar
- */
-
-#define cfs_mb() mb()
-
-/*
- * interrupt
- */
-
-#define cfs_in_interrupt() in_interrupt()
-
-/*
- * might_sleep
- */
-#define cfs_might_sleep() might_sleep()
-
-/*
- * group_info
- */
-typedef struct group_info cfs_group_info_t;
-
-#define cfs_get_group_info(group_info) get_group_info(group_info)
-#define cfs_put_group_info(group_info) put_group_info(group_info)
-#define cfs_set_current_groups(group_info) set_current_groups(group_info)
-#define cfs_groups_free(group_info) groups_free(group_info)
-#define cfs_groups_alloc(gidsetsize) groups_alloc(gidsetsize)
-
-/*
- * Random bytes
- */
-#define cfs_get_random_bytes_prim(buf, nbytes) get_random_bytes(buf, nbytes)
#endif
};
struct md_identity {
- struct upcall_cache_entry *mi_uc_entry;
- uid_t mi_uid;
- gid_t mi_gid;
- cfs_group_info_t *mi_ginfo;
- int mi_nperms;
- struct md_perm *mi_perms;
+ struct upcall_cache_entry *mi_uc_entry;
+ uid_t mi_uid;
+ gid_t mi_gid;
+ struct group_info *mi_ginfo;
+ int mi_nperms;
+ struct md_perm *mi_perms;
};
struct upcall_cache_entry {
};
-#define cfs_in_interrupt() (0)
+#define in_interrupt() (0)
struct miscdevice{
};
/*
* Groups
*/
-typedef struct cfs_group_info {
-
-} cfs_group_info_t;
+struct group_info{ };
#ifndef min
# define min(x,y) ((x)<(y) ? (x) : (y))
# define max(x,y) ((x)>(y) ? (x) : (y))
#endif
-#define cfs_get_random_bytes_prim(val, size) (*val) = 0
+#define get_random_bytes(val, size) (*val) = 0
/* utility libcfs init/fini entries */
#ifdef __WINNT__
}
}
-#define cfs_get_random_bytes_prim(buf, len) read_random(buf, len)
+#define get_random_bytes(buf, len) read_random(buf, len)
/* do NOT use function or expression as parameters ... */
#define MMSPACE_CLOSE do {} while (0)
-#define cfs_mb() do {} while(0)
-#define rmb() cfs_mb()
-#define wmb() cfs_mb()
+#define smp_mb() do {} while(0)
+#define rmb() smp_mb()
+#define wmb() smp_mb()
/*
* MM defintions from (linux/mm.h)
*/
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
-typedef struct cfs_group_info {
- int ngroups;
- cfs_atomic_t usage;
- gid_t small_block[NGROUPS_SMALL];
- int nblocks;
- gid_t *blocks[0];
-} cfs_group_info_t;
-
-#define cfs_get_group_info(group_info) do { \
+struct group_info {
+ int ngroups;
+ cfs_atomic_t usage;
+ gid_t small_block[NGROUPS_SMALL];
+ int nblocks;
+ gid_t *blocks[0];
+};
+
+#define get_group_info(group_info) do { \
cfs_atomic_inc(&(group_info)->usage); \
} while (0)
-#define cfs_put_group_info(group_info) do { \
+#define put_group_info(group_info) do { \
if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
- cfs_groups_free(group_info); \
+ groups_free(group_info); \
} while (0)
-static __inline cfs_group_info_t *cfs_groups_alloc(int gidsetsize)
+static __inline struct group_info *groups_alloc(int gidsetsize)
{
- cfs_group_info_t * groupinfo;
+ struct group_info * groupinfo;
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
- groupinfo = kmalloc(sizeof(cfs_group_info_t), 0);
+ groupinfo = kmalloc(sizeof(struct group_info), 0);
if (groupinfo) {
- memset(groupinfo, 0, sizeof(cfs_group_info_t));
+ memset(groupinfo, 0, sizeof(struct group_info));
}
return groupinfo;
}
-static __inline void cfs_groups_free(cfs_group_info_t *group_info)
+static __inline void groups_free(struct group_info *group_info)
{
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
__FUNCTION__));
}
static __inline int
-cfs_set_current_groups(cfs_group_info_t *group_info)
+set_current_groups(struct group_info *group_info)
{
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
__FUNCTION__));
return 0;
}
-static __inline int groups_search(cfs_group_info_t *group_info,
+static __inline int groups_search(struct group_info *group_info,
gid_t grp) {
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
__FUNCTION__));
int ngroups;
int cgroups;
gid_t groups[NGROUPS];
- cfs_group_info_t *group_info;
+ struct group_info *group_info;
kernel_cap_t cap_effective,
cap_inheritable,
cap_permitted;
struct task_struct * current;
int wake_up_process(struct task_struct * task);
void sleep_on(wait_queue_head_t *waitq);
-#define cfs_might_sleep() do {} while(0)
+#define might_sleep() do {} while(0)
#define DECL_JOURNAL_DATA
#define PUSH_JOURNAL do {;} while(0)
#define POP_JOURNAL do {;} while(0)
*/
#define NR_IRQS 512
-#define cfs_in_interrupt() (0)
+#define in_interrupt() (0)
/*
* printk flags
if (!enable) {
LWT_EVENT(0,0,0,0);
lwt_enabled = 0;
- cfs_mb();
+ smp_mb();
/* give people some time to stop adding traces */
schedule_timeout(10);
}
}
}
- if (enable) {
- lwt_enabled = 1;
- cfs_mb();
- LWT_EVENT(0,0,0,0);
- }
+ if (enable) {
+ lwt_enabled = 1;
+ smp_mb();
+ LWT_EVENT(0,0,0,0);
+ }
- return (0);
+ return (0);
}
int
}
}
- lwt_enabled = 1;
- cfs_mb();
+ lwt_enabled = 1;
+ smp_mb();
- LWT_EVENT(0,0,0,0);
+ LWT_EVENT(0,0,0,0);
- return (0);
+ return (0);
}
void lwt_fini ()
*/
void cfs_get_random_bytes(void *buf, int size)
{
- int *p = buf;
- int rem, tmp;
+ int *p = buf;
+ int rem, tmp;
- LASSERT(size >= 0);
+ LASSERT(size >= 0);
- rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
- if (rem) {
- cfs_get_random_bytes_prim(&tmp, sizeof(tmp));
- tmp ^= cfs_rand();
- memcpy(buf, &tmp, rem);
- p = buf + rem;
- size -= rem;
- }
+ rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
+ if (rem) {
+ get_random_bytes(&tmp, sizeof(tmp));
+ tmp ^= cfs_rand();
+ memcpy(buf, &tmp, rem);
+ p = buf + rem;
+ size -= rem;
+ }
- while (size >= sizeof(int)) {
- cfs_get_random_bytes_prim(&tmp, sizeof(tmp));
- *p = cfs_rand() ^ tmp;
- size -= sizeof(int);
- p++;
- }
- buf = p;
- if (size) {
- cfs_get_random_bytes_prim(&tmp, sizeof(tmp));
- tmp ^= cfs_rand();
- memcpy(buf, &tmp, size);
- }
+ while (size >= sizeof(int)) {
+ get_random_bytes(&tmp, sizeof(tmp));
+ *p = cfs_rand() ^ tmp;
+ size -= sizeof(int);
+ p++;
+ }
+ buf = p;
+ if (size) {
+ get_random_bytes(&tmp, sizeof(tmp));
+ tmp ^= cfs_rand();
+ memcpy(buf, &tmp, size);
+ }
}
EXPORT_SYMBOL(cfs_get_random_bytes);
struct cfs_trace_page *tage;
/* My caller is trying to free memory */
- if (!cfs_in_interrupt() && memory_pressure_get())
+ if (!in_interrupt() && memory_pressure_get())
return NULL;
/*
tage = cfs_tage_alloc(GFP_ATOMIC);
if (unlikely(tage == NULL)) {
if ((!memory_pressure_get() ||
- cfs_in_interrupt()) && printk_ratelimit())
+ in_interrupt()) && printk_ratelimit())
printk(KERN_WARNING
"cannot allocate a tage (%ld)\n",
tcd->tcd_cur_pages);
libcfs_panic_in_progress = 1;
libcfs_catastrophe = 1;
- cfs_mb();
+ smp_mb();
cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
void
cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
{
- LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
cfs_wi_sched_lock(sched);
{
int rc;
- LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
/*
void
cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
{
- LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
cfs_wi_sched_lock(sched);
static inline void
mxlnd_set_conn_status(kmx_conn_t *conn, int status)
{
- conn->mxk_status = status;
- cfs_mb();
+ conn->mxk_status = status;
+ smp_mb();
}
/**
int i;
LASSERT(net != NULL);
- LASSERT(!cfs_in_interrupt());
+ LASSERT(!in_interrupt());
dev = net->ibn_dev;
void
kiblnd_destroy_conn (kib_conn_t *conn)
{
- struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
- int rc;
-
- LASSERT (!cfs_in_interrupt());
- LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
- LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
- LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT (cfs_list_empty(&conn->ibc_active_txs));
- LASSERT (conn->ibc_noops_posted == 0);
- LASSERT (conn->ibc_nsends_posted == 0);
-
- switch (conn->ibc_state) {
- default:
- /* conn must be completely disengaged from the network */
- LBUG();
-
- case IBLND_CONN_DISCONNECTED:
- /* connvars should have been freed already */
- LASSERT (conn->ibc_connvars == NULL);
- break;
+ struct rdma_cm_id *cmid = conn->ibc_cmid;
+ kib_peer_t *peer = conn->ibc_peer;
+ int rc;
+
+ LASSERT (!in_interrupt());
+ LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT (cfs_list_empty(&conn->ibc_active_txs));
+ LASSERT (conn->ibc_noops_posted == 0);
+ LASSERT (conn->ibc_nsends_posted == 0);
+
+ switch (conn->ibc_state) {
+ default:
+ /* conn must be completely disengaged from the network */
+ LBUG();
+
+ case IBLND_CONN_DISCONNECTED:
+ /* connvars should have been freed already */
+ LASSERT (conn->ibc_connvars == NULL);
+ break;
- case IBLND_CONN_INIT:
- break;
- }
+ case IBLND_CONN_INIT:
+ break;
+ }
- /* conn->ibc_cmid might be destroyed by CM already */
- if (cmid != NULL && cmid->qp != NULL)
- rdma_destroy_qp(cmid);
+ /* conn->ibc_cmid might be destroyed by CM already */
+ if (cmid != NULL && cmid->qp != NULL)
+ rdma_destroy_qp(cmid);
- if (conn->ibc_cq != NULL) {
- rc = ib_destroy_cq(conn->ibc_cq);
- if (rc != 0)
- CWARN("Error destroying CQ: %d\n", rc);
- }
+ if (conn->ibc_cq != NULL) {
+ rc = ib_destroy_cq(conn->ibc_cq);
+ if (rc != 0)
+ CWARN("Error destroying CQ: %d\n", rc);
+ }
- if (conn->ibc_rx_pages != NULL)
- kiblnd_unmap_rx_descs(conn);
+ if (conn->ibc_rx_pages != NULL)
+ kiblnd_unmap_rx_descs(conn);
- if (conn->ibc_rxs != NULL) {
- LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
- }
+ if (conn->ibc_rxs != NULL) {
+ LIBCFS_FREE(conn->ibc_rxs,
+ IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
+ }
- if (conn->ibc_connvars != NULL)
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ if (conn->ibc_connvars != NULL)
+ LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
- if (conn->ibc_hdev != NULL)
- kiblnd_hdev_decref(conn->ibc_hdev);
+ if (conn->ibc_hdev != NULL)
+ kiblnd_hdev_decref(conn->ibc_hdev);
- /* See CAVEAT EMPTOR above in kiblnd_create_conn */
- if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
+ /* See CAVEAT EMPTOR above in kiblnd_create_conn */
+ if (conn->ibc_state != IBLND_CONN_INIT) {
+ kib_net_t *net = peer->ibp_ni->ni_data;
- kiblnd_peer_decref(peer);
- rdma_destroy_id(cmid);
- cfs_atomic_dec(&net->ibn_nconns);
- }
+ kiblnd_peer_decref(peer);
+ rdma_destroy_id(cmid);
+ cfs_atomic_dec(&net->ibn_nconns);
+ }
- LIBCFS_FREE(conn, sizeof(*conn));
+ LIBCFS_FREE(conn, sizeof(*conn));
}
int
static inline void
kiblnd_set_conn_state (kib_conn_t *conn, int state)
{
- conn->ibc_state = state;
- cfs_mb();
+ conn->ibc_state = state;
+ smp_mb();
}
static inline void
void
kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
{
- lnet_msg_t *lntmsg[2];
- kib_net_t *net = ni->ni_data;
- int rc;
- int i;
+ lnet_msg_t *lntmsg[2];
+ kib_net_t *net = ni->ni_data;
+ int rc;
+ int i;
- LASSERT (net != NULL);
- LASSERT (!cfs_in_interrupt());
- LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
- LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
- LASSERT (tx->tx_pool != NULL);
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
+ LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
+ LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
+ LASSERT (tx->tx_pool != NULL);
- kiblnd_unmap_tx(ni, tx);
+ kiblnd_unmap_tx(ni, tx);
- /* tx may have up to 2 lnet msgs to finalise */
- lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- rc = tx->tx_status;
+ /* tx may have up to 2 lnet msgs to finalise */
+ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+ lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+ rc = tx->tx_status;
- if (tx->tx_conn != NULL) {
- LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
+ if (tx->tx_conn != NULL) {
+ LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
- kiblnd_conn_decref(tx->tx_conn);
- tx->tx_conn = NULL;
- }
+ kiblnd_conn_decref(tx->tx_conn);
+ tx->tx_conn = NULL;
+ }
- tx->tx_nwrq = 0;
- tx->tx_status = 0;
+ tx->tx_nwrq = 0;
+ tx->tx_status = 0;
- kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
- /* delay finalize until my descs have been freed */
- for (i = 0; i < 2; i++) {
- if (lntmsg[i] == NULL)
- continue;
+ /* delay finalize until my descs have been freed */
+ for (i = 0; i < 2; i++) {
+ if (lntmsg[i] == NULL)
+ continue;
- lnet_finalize(ni, lntmsg[i], rc);
- }
+ lnet_finalize(ni, lntmsg[i], rc);
+ }
}
void
int
kiblnd_post_rx (kib_rx_t *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr;
- int rc;
+ kib_conn_t *conn = rx->rx_conn;
+ kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct ib_recv_wr *bad_wrq = NULL;
+ struct ib_mr *mr;
+ int rc;
- LASSERT (net != NULL);
- LASSERT (!cfs_in_interrupt());
- LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
- credit == IBLND_POSTRX_PEER_CREDIT ||
- credit == IBLND_POSTRX_RSRVD_CREDIT);
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
+ credit == IBLND_POSTRX_PEER_CREDIT ||
+ credit == IBLND_POSTRX_RSRVD_CREDIT);
- mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
- LASSERT (mr != NULL);
+ mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
+ LASSERT (mr != NULL);
rx->rx_sge.lkey = mr->lkey;
rx->rx_sge.addr = rx->rx_msgaddr;
int
kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
- struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_send_wr *wrq = &tx->tx_wrq[0];
- int rc = resid;
- int srcidx;
- int dstidx;
- int wrknob;
-
- LASSERT (!cfs_in_interrupt());
- LASSERT (tx->tx_nwrq == 0);
- LASSERT (type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- srcidx = dstidx = 0;
+ kib_msg_t *ibmsg = tx->tx_msg;
+ kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct ib_sge *sge = &tx->tx_sge[0];
+ struct ib_send_wr *wrq = &tx->tx_wrq[0];
+ int rc = resid;
+ int srcidx;
+ int dstidx;
+ int wrknob;
+
+ LASSERT (!in_interrupt());
+ LASSERT (tx->tx_nwrq == 0);
+ LASSERT (type == IBLND_MSG_GET_DONE ||
+ type == IBLND_MSG_PUT_DONE);
+
+ srcidx = dstidx = 0;
while (resid > 0) {
if (srcidx >= srcrd->rd_nfrags) {
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
- /* Thread context */
- LASSERT (!cfs_in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ /* Thread context */
+ LASSERT (!in_interrupt());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- switch (type) {
- default:
- LBUG();
- return (-EIO);
+ switch (type) {
+ default:
+ LBUG();
+ return (-EIO);
case LNET_MSG_ACK:
LASSERT (payload_nob == 0);
kib_conn_t *conn = rx->rx_conn;
kib_tx_t *tx;
kib_msg_t *txmsg;
- int nob;
- int post_credit = IBLND_POSTRX_PEER_CREDIT;
- int rc = 0;
+ int nob;
+ int post_credit = IBLND_POSTRX_PEER_CREDIT;
+ int rc = 0;
- LASSERT (mlen <= rlen);
- LASSERT (!cfs_in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (mlen <= rlen);
+ LASSERT (!in_interrupt());
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
- switch (rxmsg->ibm_type) {
- default:
- LBUG();
+ switch (rxmsg->ibm_type) {
+ default:
+ LBUG();
case IBLND_MSG_IMMEDIATE:
nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
void
kiblnd_peer_alive (kib_peer_t *peer)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->ibp_last_alive = cfs_time_current();
- cfs_mb();
+ /* This is racy, but everyone's only writing cfs_time_current() */
+ peer->ibp_last_alive = cfs_time_current();
+ smp_mb();
}
void
unsigned long flags;
kib_rx_t *rx;
- LASSERT(!cfs_in_interrupt());
+ LASSERT(!in_interrupt());
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
kiblnd_handle_rx(rx);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
kiblnd_finalise_conn (kib_conn_t *conn)
{
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->ibc_state > IBLND_CONN_INIT);
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state > IBLND_CONN_INIT);
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
- /* abort_receives moves QP state to IB_QPS_ERR. This is only required
- * for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free. */
- kiblnd_abort_receives(conn);
+ /* abort_receives moves QP state to IB_QPS_ERR. This is only required
+ * for connections that didn't get as far as being connected, because
+ * rdma_disconnect() does this for free. */
+ kiblnd_abort_receives(conn);
- /* Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state */
+ /* Complete all tx descs not waiting for sends to complete.
+ * NB we should be safe from RDMA now that the QP has changed state */
- kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
- kiblnd_abort_txs(conn, &conn->ibc_active_txs);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
+ kiblnd_abort_txs(conn, &conn->ibc_active_txs);
- kiblnd_handle_early_rxs(conn);
+ kiblnd_handle_early_rxs(conn);
}
void
kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
{
- CFS_LIST_HEAD (zombies);
- unsigned long flags;
+ CFS_LIST_HEAD (zombies);
+ unsigned long flags;
- LASSERT (error != 0);
- LASSERT (!cfs_in_interrupt());
+ LASSERT (error != 0);
+ LASSERT (!in_interrupt());
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (active) {
- LASSERT (peer->ibp_connecting > 0);
- peer->ibp_connecting--;
- } else {
- LASSERT (peer->ibp_accepting > 0);
- peer->ibp_accepting--;
- }
+ if (active) {
+ LASSERT (peer->ibp_connecting > 0);
+ peer->ibp_connecting--;
+ } else {
+ LASSERT (peer->ibp_accepting > 0);
+ peer->ibp_accepting--;
+ }
- if (peer->ibp_connecting != 0 ||
+ if (peer->ibp_connecting != 0 ||
peer->ibp_accepting != 0) {
/* another connection attempt under way... */
write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
- libcfs_nid2str(peer->ibp_nid), active,
- conn->ibc_version, status);
+ CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
+ libcfs_nid2str(peer->ibp_nid), active,
+ conn->ibc_version, status);
- LASSERT (!cfs_in_interrupt());
- LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
- (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
+ LASSERT (!in_interrupt());
+ LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
+ peer->ibp_connecting > 0) ||
+ (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
+ peer->ibp_accepting > 0));
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
conn->ibc_connvars = NULL;
lnet_nid_t nid;
struct rdma_conn_param cp;
kib_rej_t rej;
- int version = IBLND_MSG_VERSION;
- unsigned long flags;
- int rc;
- struct sockaddr_in *peer_addr;
- LASSERT (!cfs_in_interrupt());
+ int version = IBLND_MSG_VERSION;
+ unsigned long flags;
+ int rc;
+ struct sockaddr_in *peer_addr;
+ LASSERT (!in_interrupt());
- /* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
- LASSERT (ibdev != NULL);
+ /* cmid inherits 'context' from the corresponding listener id */
+ ibdev = (kib_dev_t *)cmid->context;
+ LASSERT (ibdev != NULL);
memset(&rej, 0, sizeof(rej));
rej.ibr_magic = IBLND_MSG_MAGIC;
void
kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ kib_peer_t *peer = conn->ibc_peer;
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- switch (reason) {
- case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
- break;
+ switch (reason) {
+ case IB_CM_REJ_STALE_CONN:
+ kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
+ break;
case IB_CM_REJ_INVALID_SERVICE_ID:
CNETERR("%s rejected: no listener at %d\n",
void
kiblnd_disconnect_conn (kib_conn_t *conn)
{
- LASSERT (!cfs_in_interrupt());
- LASSERT (current == kiblnd_data.kib_connd);
- LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
+ LASSERT (!in_interrupt());
+ LASSERT (current == kiblnd_data.kib_connd);
+ LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
- rdma_disconnect(conn->ibc_cmid);
- kiblnd_finalise_conn(conn);
+ rdma_disconnect(conn->ibc_cmid);
+ kiblnd_finalise_conn(conn);
- kiblnd_peer_notify(conn->ibc_peer);
+ kiblnd_peer_notify(conn->ibc_peer);
}
int
flags);
CDEBUG(D_NET, "All peers deleted\n");
- /* Shutdown phase 2: kill the daemons... */
- kptllnd_data.kptl_shutdown = 2;
- cfs_mb();
+ /* Shutdown phase 2: kill the daemons... */
+ kptllnd_data.kptl_shutdown = 2;
+ smp_mb();
i = 2;
while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
static inline void
kptllnd_tx_decref(kptl_tx_t *tx)
{
- LASSERT (!cfs_in_interrupt()); /* Thread context only */
+ LASSERT (!in_interrupt()); /* Thread context only */
- if (cfs_atomic_dec_and_test(&tx->tx_refcount))
- kptllnd_tx_fini(tx);
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount))
+ kptllnd_tx_fini(tx);
}
/*
int nfrag;
int rc;
- LASSERT (net->net_ni == ni);
- LASSERT (!net->net_shutdown);
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!cfs_in_interrupt());
+ LASSERT (net->net_ni == ni);
+ LASSERT (!net->net_shutdown);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT (!in_interrupt());
- if (lntmsg->msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
+ if (lntmsg->msg_vmflush)
+ mpflag = cfs_memory_pressure_get_and_set();
- rc = kptllnd_find_target(net, target, &peer);
- if (rc != 0)
- goto out;
+ rc = kptllnd_find_target(net, target, &peer);
+ if (rc != 0)
+ goto out;
/* NB peer->peer_id does NOT always equal target, be careful with
* which one to use */
int nob;
int rc;
- CDEBUG(D_NET, "%s niov=%d offset=%d mlen=%d rlen=%d\n",
- kptllnd_msgtype2str(rxmsg->ptlm_type),
- niov, offset, mlen, rlen);
+ CDEBUG(D_NET, "%s niov=%d offset=%d mlen=%d rlen=%d\n",
+ kptllnd_msgtype2str(rxmsg->ptlm_type),
+ niov, offset, mlen, rlen);
- LASSERT (mlen <= rlen);
- LASSERT (mlen >= 0);
- LASSERT (!cfs_in_interrupt());
- LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
- LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
+ LASSERT (mlen <= rlen);
+ LASSERT (mlen >= 0);
+ LASSERT (!in_interrupt());
+ LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
+ LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
- switch(rxmsg->ptlm_type)
- {
- default:
+ switch(rxmsg->ptlm_type)
+ {
+ default:
LBUG();
rc = -EINVAL;
break;
void
kptllnd_peer_destroy (kptl_peer_t *peer)
{
- unsigned long flags;
+ unsigned long flags;
- CDEBUG(D_NET, "Peer=%p\n", peer);
+ CDEBUG(D_NET, "Peer=%p\n", peer);
- LASSERT (!cfs_in_interrupt());
- LASSERT (cfs_atomic_read(&peer->peer_refcount) == 0);
- LASSERT (peer->peer_state == PEER_STATE_ALLOCATED ||
- peer->peer_state == PEER_STATE_ZOMBIE);
- LASSERT (cfs_list_empty(&peer->peer_noops));
- LASSERT (cfs_list_empty(&peer->peer_sendq));
- LASSERT (cfs_list_empty(&peer->peer_activeq));
+ LASSERT (!in_interrupt());
+ LASSERT (cfs_atomic_read(&peer->peer_refcount) == 0);
+ LASSERT (peer->peer_state == PEER_STATE_ALLOCATED ||
+ peer->peer_state == PEER_STATE_ZOMBIE);
+ LASSERT (cfs_list_empty(&peer->peer_noops));
+ LASSERT (cfs_list_empty(&peer->peer_sendq));
+ LASSERT (cfs_list_empty(&peer->peer_activeq));
write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
- if (peer->peer_state == PEER_STATE_ZOMBIE)
- cfs_list_del(&peer->peer_list);
+ if (peer->peer_state == PEER_STATE_ZOMBIE)
+ cfs_list_del(&peer->peer_list);
- kptllnd_data.kptl_npeers--;
+ kptllnd_data.kptl_npeers--;
write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
- LIBCFS_FREE (peer, sizeof (*peer));
+ LIBCFS_FREE (peer, sizeof (*peer));
}
void
void
kptllnd_peer_alive (kptl_peer_t *peer)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->peer_last_alive = cfs_time_current();
- cfs_mb();
+ /* This is racy, but everyone's only writing cfs_time_current() */
+ peer->peer_last_alive = cfs_time_current();
+ smp_mb();
}
void
void
kptllnd_peer_check_sends (kptl_peer_t *peer)
{
- ptl_handle_me_t meh;
- kptl_tx_t *tx;
- int rc;
- int msg_type;
- unsigned long flags;
+ ptl_handle_me_t meh;
+ kptl_tx_t *tx;
+ int rc;
+ int msg_type;
+ unsigned long flags;
- LASSERT(!cfs_in_interrupt());
+ LASSERT(!in_interrupt());
spin_lock_irqsave(&peer->peer_lock, flags);
- peer->peer_retry_noop = 0;
+ peer->peer_retry_noop = 0;
if (kptllnd_peer_send_noop(peer)) {
/* post a NOOP to return credits */
void
kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
{
- int rc;
- ptl_md_t md;
- ptl_handle_me_t meh;
- ptl_handle_md_t mdh;
- ptl_process_id_t any;
- kptl_rx_buffer_pool_t *rxbp = rxb->rxb_pool;
- unsigned long flags;
-
- LASSERT (!cfs_in_interrupt());
- LASSERT (rxb->rxb_refcount == 0);
- LASSERT (!rxb->rxb_idle);
- LASSERT (!rxb->rxb_posted);
- LASSERT (PtlHandleIsEqual(rxb->rxb_mdh, PTL_INVALID_HANDLE));
+ int rc;
+ ptl_md_t md;
+ ptl_handle_me_t meh;
+ ptl_handle_md_t mdh;
+ ptl_process_id_t any;
+ kptl_rx_buffer_pool_t *rxbp = rxb->rxb_pool;
+ unsigned long flags;
+
+ LASSERT (!in_interrupt());
+ LASSERT (rxb->rxb_refcount == 0);
+ LASSERT (!rxb->rxb_idle);
+ LASSERT (!rxb->rxb_posted);
+ LASSERT (PtlHandleIsEqual(rxb->rxb_mdh, PTL_INVALID_HANDLE));
any.nid = PTL_NID_ANY;
any.pid = PTL_PID_ANY;
int post_credit = PTLLND_POSTRX_PEER_CREDIT;
kptl_net_t *net = NULL;
kptl_peer_t *peer;
- cfs_list_t txs;
- unsigned long flags;
- lnet_process_id_t srcid;
+ cfs_list_t txs;
+ unsigned long flags;
+ lnet_process_id_t srcid;
- LASSERT (!cfs_in_interrupt());
- LASSERT (rx->rx_peer == NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (rx->rx_peer == NULL);
- CFS_INIT_LIST_HEAD(&txs);
+ CFS_INIT_LIST_HEAD(&txs);
- if ((rx->rx_nob >= 4 &&
+ if ((rx->rx_nob >= 4 &&
(msg->ptlm_magic == LNET_PROTO_MAGIC ||
msg->ptlm_magic == __swab32(LNET_PROTO_MAGIC))) ||
(rx->rx_nob >= 6 &&
void
kptllnd_tx_fini (kptl_tx_t *tx)
{
- lnet_msg_t *replymsg = tx->tx_lnet_replymsg;
- lnet_msg_t *msg = tx->tx_lnet_msg;
- kptl_peer_t *peer = tx->tx_peer;
- int status = tx->tx_status;
- int rc;
-
- LASSERT (!cfs_in_interrupt());
- LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
- LASSERT (!tx->tx_idle);
- LASSERT (!tx->tx_active);
-
- /* TX has completed or failed */
-
- if (peer != NULL) {
- rc = kptllnd_tx_abort_netio(tx);
- if (rc != 0)
- return;
- }
+ lnet_msg_t *replymsg = tx->tx_lnet_replymsg;
+ lnet_msg_t *msg = tx->tx_lnet_msg;
+ kptl_peer_t *peer = tx->tx_peer;
+ int status = tx->tx_status;
+ int rc;
+
+ LASSERT (!in_interrupt());
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (!tx->tx_idle);
+ LASSERT (!tx->tx_active);
+
+ /* TX has completed or failed */
+
+ if (peer != NULL) {
+ rc = kptllnd_tx_abort_netio(tx);
+ if (rc != 0)
+ return;
+ }
LASSERT (PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE));
LASSERT (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE));
void
kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx)
{
- lnet_msg_t *lnetmsg0 = NULL;
- lnet_msg_t *lnetmsg1 = NULL;
- int status0 = 0;
- int status1 = 0;
- kqswnal_rx_t *krx;
+ lnet_msg_t *lnetmsg0 = NULL;
+ lnet_msg_t *lnetmsg1 = NULL;
+ int status0 = 0;
+ int status1 = 0;
+ kqswnal_rx_t *krx;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- if (ktx->ktx_status == -EHOSTDOWN)
- kqswnal_notify_peer_down(ktx);
+ if (ktx->ktx_status == -EHOSTDOWN)
+ kqswnal_notify_peer_down(ktx);
switch (ktx->ktx_state) {
case KTX_RDMA_FETCH: /* optimized PUT/REPLY handled */
void
kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
{
- unsigned long flags;
+ unsigned long flags;
- ktx->ktx_status = status;
+ ktx->ktx_status = status;
- if (!cfs_in_interrupt()) {
- kqswnal_tx_done_in_thread_context(ktx);
- return;
- }
+ if (!in_interrupt()) {
+ kqswnal_tx_done_in_thread_context(ktx);
+ return;
+ }
- /* Complete the send in thread context */
+ /* Complete the send in thread context */
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
cfs_list_add_tail(&ktx->ktx_schedlist,
int
kqswnal_launch (kqswnal_tx_t *ktx)
{
- /* Don't block for transmit descriptor if we're in interrupt context */
- int attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
- int dest = kqswnal_nid2elanid (ktx->ktx_nid);
- unsigned long flags;
- int rc;
+ /* Don't block for transmit descriptor if we're in interrupt context */
+ int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
+ int dest = kqswnal_nid2elanid (ktx->ktx_nid);
+ unsigned long flags;
+ int rc;
- ktx->ktx_launchtime = cfs_time_current();
+ ktx->ktx_launchtime = cfs_time_current();
if (kqswnal_data.kqn_shuttingdown)
return (-ESHUTDOWN);
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
- /* It must be OK to kmap() if required */
- LASSERT (payload_kiov == NULL || !cfs_in_interrupt ());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ /* It must be OK to kmap() if required */
+ LASSERT (payload_kiov == NULL || !in_interrupt ());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- if (kqswnal_nid2elanid (target.nid) < 0) {
- CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid));
- return -EIO;
- }
+ if (kqswnal_nid2elanid (target.nid) < 0) {
+ CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid));
+ return -EIO;
+ }
/* I may not block for a transmit descriptor if I might block the
* router, receiver, or an interrupt handler. */
void
kqswnal_rx_done (kqswnal_rx_t *krx)
{
- int rc;
+ int rc;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
- if (krx->krx_rpc_reply_needed) {
- /* We've not completed the peer's RPC yet... */
- krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
- krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
+ if (krx->krx_rpc_reply_needed) {
+ /* We've not completed the peer's RPC yet... */
+ krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
+ krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- rc = ep_complete_rpc(krx->krx_rxd,
- kqswnal_rpc_complete, krx,
- &krx->krx_rpc_reply.ep_statusblk,
- NULL, NULL, 0);
- if (rc == EP_SUCCESS)
- return;
+ rc = ep_complete_rpc(krx->krx_rxd,
+ kqswnal_rpc_complete, krx,
+ &krx->krx_rpc_reply.ep_statusblk,
+ NULL, NULL, 0);
+ if (rc == EP_SUCCESS)
+ return;
- CERROR("can't complete RPC: %d\n", rc);
- krx->krx_rpc_reply_needed = 0;
- }
+ CERROR("can't complete RPC: %d\n", rc);
+ krx->krx_rpc_reply_needed = 0;
+ }
- kqswnal_requeue_rx(krx);
+ kqswnal_requeue_rx(krx);
}
void
else
CERROR("receive status failed with status %d nob %d\n",
ep_rxd_status(rxd), nob);
- kqswnal_rx_decref(krx);
- return;
- }
+ kqswnal_rx_decref(krx);
+ return;
+ }
- if (!cfs_in_interrupt()) {
- kqswnal_parse(krx);
- return;
- }
+ if (!in_interrupt()) {
+ kqswnal_parse(krx);
+ return;
+ }
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
unsigned int mlen,
unsigned int rlen)
{
- kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
- lnet_nid_t fromnid;
- kqswnal_msg_t *msg;
- lnet_hdr_t *hdr;
- kqswnal_remotemd_t *rmd;
- int msg_offset;
- int rc;
-
- LASSERT (!cfs_in_interrupt ()); /* OK to map */
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
+ kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
+ lnet_nid_t fromnid;
+ kqswnal_msg_t *msg;
+ lnet_hdr_t *hdr;
+ kqswnal_remotemd_t *rmd;
+ int msg_offset;
+ int rc;
+
+ LASSERT (!in_interrupt ()); /* OK to map */
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
fromnid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ep_rxd_node(krx->krx_rxd));
msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page);
int
kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
{
- kra_conn_t *conn;
- RAP_RETURN rrc;
+ kra_conn_t *conn;
+ RAP_RETURN rrc;
- LASSERT (!cfs_in_interrupt());
- LIBCFS_ALLOC(conn, sizeof(*conn));
+ LASSERT (!in_interrupt());
+ LIBCFS_ALLOC(conn, sizeof(*conn));
- if (conn == NULL)
- return -ENOMEM;
+ if (conn == NULL)
+ return -ENOMEM;
memset(conn, 0, sizeof(*conn));
cfs_atomic_set(&conn->rac_refcount, 1);
void
kranal_destroy_conn(kra_conn_t *conn)
{
- RAP_RETURN rrc;
-
- LASSERT (!cfs_in_interrupt());
- LASSERT (!conn->rac_scheduled);
- LASSERT (cfs_list_empty(&conn->rac_list));
- LASSERT (cfs_list_empty(&conn->rac_hashlist));
- LASSERT (cfs_list_empty(&conn->rac_schedlist));
- LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
- LASSERT (cfs_list_empty(&conn->rac_fmaq));
- LASSERT (cfs_list_empty(&conn->rac_rdmaq));
- LASSERT (cfs_list_empty(&conn->rac_replyq));
-
- rrc = RapkDestroyRi(conn->rac_device->rad_handle,
- conn->rac_rihandle);
- LASSERT (rrc == RAP_SUCCESS);
-
- if (conn->rac_peer != NULL)
- kranal_peer_decref(conn->rac_peer);
-
- LIBCFS_FREE(conn, sizeof(*conn));
- cfs_atomic_dec(&kranal_data.kra_nconns);
+ RAP_RETURN rrc;
+
+ LASSERT (!in_interrupt());
+ LASSERT (!conn->rac_scheduled);
+ LASSERT (cfs_list_empty(&conn->rac_list));
+ LASSERT (cfs_list_empty(&conn->rac_hashlist));
+ LASSERT (cfs_list_empty(&conn->rac_schedlist));
+ LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
+ LASSERT (cfs_list_empty(&conn->rac_fmaq));
+ LASSERT (cfs_list_empty(&conn->rac_rdmaq));
+ LASSERT (cfs_list_empty(&conn->rac_replyq));
+
+ rrc = RapkDestroyRi(conn->rac_device->rad_handle,
+ conn->rac_rihandle);
+ LASSERT (rrc == RAP_SUCCESS);
+
+ if (conn->rac_peer != NULL)
+ kranal_peer_decref(conn->rac_peer);
+
+ LIBCFS_FREE(conn, sizeof(*conn));
+ cfs_atomic_dec(&kranal_data.kra_nconns);
}
void
kranal_terminate_conn_locked (kra_conn_t *conn)
{
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
- LASSERT (!cfs_list_empty(&conn->rac_hashlist));
- LASSERT (cfs_list_empty(&conn->rac_list));
+ LASSERT (!in_interrupt());
+ LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
+ LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+ LASSERT (cfs_list_empty(&conn->rac_list));
- /* Remove from conn hash table: no new callbacks */
- cfs_list_del_init(&conn->rac_hashlist);
- kranal_conn_decref(conn);
+ /* Remove from conn hash table: no new callbacks */
+ cfs_list_del_init(&conn->rac_hashlist);
+ kranal_conn_decref(conn);
- conn->rac_state = RANAL_CONN_CLOSED;
+ conn->rac_state = RANAL_CONN_CLOSED;
- /* schedule to clear out all uncompleted comms in context of dev's
- * scheduler */
- kranal_schedule_conn(conn);
+ /* schedule to clear out all uncompleted comms in context of dev's
+ * scheduler */
+ kranal_schedule_conn(conn);
}
void
kranal_close_conn_locked (kra_conn_t *conn, int error)
{
- kra_peer_t *peer = conn->rac_peer;
+ kra_peer_t *peer = conn->rac_peer;
- CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
- "closing conn to %s: error %d\n",
- libcfs_nid2str(peer->rap_nid), error);
+ CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
+ "closing conn to %s: error %d\n",
+ libcfs_nid2str(peer->rap_nid), error);
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
- LASSERT (!cfs_list_empty(&conn->rac_hashlist));
- LASSERT (!cfs_list_empty(&conn->rac_list));
+ LASSERT (!in_interrupt());
+ LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
+ LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+ LASSERT (!cfs_list_empty(&conn->rac_list));
- cfs_list_del_init(&conn->rac_list);
+ cfs_list_del_init(&conn->rac_list);
- if (cfs_list_empty(&peer->rap_conns) &&
- peer->rap_persistence == 0) {
- /* Non-persistent peer with no more conns... */
- kranal_unlink_peer_locked(peer);
- }
+ if (cfs_list_empty(&peer->rap_conns) &&
+ peer->rap_persistence == 0) {
+ /* Non-persistent peer with no more conns... */
+ kranal_unlink_peer_locked(peer);
+ }
- /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
- * full timeout. If we get a CLOSE we know the peer has stopped all
- * RDMA. Otherwise if we wait for the full timeout we can also be sure
- * all RDMA has stopped. */
- conn->rac_last_rx = jiffies;
- cfs_mb();
+ /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
+ * full timeout. If we get a CLOSE we know the peer has stopped all
+ * RDMA. Otherwise if we wait for the full timeout we can also be sure
+ * all RDMA has stopped. */
+ conn->rac_last_rx = jiffies;
+ smp_mb();
- conn->rac_state = RANAL_CONN_CLOSING;
- kranal_schedule_conn(conn); /* schedule sending CLOSE */
+ conn->rac_state = RANAL_CONN_CLOSING;
+ kranal_schedule_conn(conn); /* schedule sending CLOSE */
- kranal_conn_decref(conn); /* lose peer's ref */
+ kranal_conn_decref(conn); /* lose peer's ref */
}
void
void
kranal_tx_done (kra_tx_t *tx, int completion)
{
- lnet_msg_t *lnetmsg[2];
- unsigned long flags;
- int i;
+ lnet_msg_t *lnetmsg[2];
+ unsigned long flags;
+ int i;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- kranal_unmap_buffer(tx);
+ kranal_unmap_buffer(tx);
- lnetmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lnetmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+ lnetmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+ lnetmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- tx->tx_buftype = RANAL_BUF_NONE;
- tx->tx_msg.ram_type = RANAL_MSG_NONE;
- tx->tx_conn = NULL;
+ tx->tx_buftype = RANAL_BUF_NONE;
+ tx->tx_msg.ram_type = RANAL_MSG_NONE;
+ tx->tx_conn = NULL;
spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
- cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
+ cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
- /* finalize AFTER freeing lnet msgs */
- for (i = 0; i < 2; i++) {
- if (lnetmsg[i] == NULL)
- continue;
+ /* finalize AFTER freeing lnet msgs */
+ for (i = 0; i < 2; i++) {
+ if (lnetmsg[i] == NULL)
+ continue;
- lnet_finalize(kranal_data.kra_ni, lnetmsg[i], completion);
- }
+ lnet_finalize(kranal_data.kra_ni, lnetmsg[i], completion);
+ }
}
kra_conn_t *
/* NB 'private' is different depending on what we're sending.... */
- CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
- nob, niov, libcfs_id2str(target));
+ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
+ nob, niov, libcfs_id2str(target));
- LASSERT (nob == 0 || niov > 0);
- LASSERT (niov <= LNET_MAX_IOV);
+ LASSERT (nob == 0 || niov > 0);
+ LASSERT (niov <= LNET_MAX_IOV);
- LASSERT (!cfs_in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (!in_interrupt());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(kiov != NULL && iov != NULL));
- if (routing) {
- CERROR ("Can't route\n");
- return -EIO;
- }
+ if (routing) {
+ CERROR ("Can't route\n");
+ return -EIO;
+ }
switch(type) {
default:
struct iovec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- kra_conn_t *conn = private;
- kra_msg_t *rxmsg = conn->rac_rxmsg;
- kra_tx_t *tx;
- void *buffer;
- int rc;
-
- LASSERT (mlen <= rlen);
- LASSERT (!cfs_in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
-
- CDEBUG(D_NET, "conn %p, rxmsg %p, lntmsg %p\n", conn, rxmsg, lntmsg);
+ kra_conn_t *conn = private;
+ kra_msg_t *rxmsg = conn->rac_rxmsg;
+ kra_tx_t *tx;
+ void *buffer;
+ int rc;
+
+ LASSERT (mlen <= rlen);
+ LASSERT (!in_interrupt());
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
+
+ CDEBUG(D_NET, "conn %p, rxmsg %p, lntmsg %p\n", conn, rxmsg, lntmsg);
switch(rxmsg->ram_type) {
default:
int
ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
{
- ksock_net_t *net = ni->ni_data;
- ksock_peer_t *peer;
+ ksock_net_t *net = ni->ni_data;
+ ksock_peer_t *peer;
- LASSERT (id.nid != LNET_NID_ANY);
- LASSERT (id.pid != LNET_PID_ANY);
- LASSERT (!cfs_in_interrupt());
+ LASSERT (id.nid != LNET_NID_ANY);
+ LASSERT (id.pid != LNET_PID_ANY);
+ LASSERT (!in_interrupt());
- LIBCFS_ALLOC (peer, sizeof (*peer));
- if (peer == NULL)
- return -ENOMEM;
+ LIBCFS_ALLOC (peer, sizeof (*peer));
+ if (peer == NULL)
+ return -ENOMEM;
- memset (peer, 0, sizeof (*peer)); /* NULL pointers/clear flags etc */
+ memset (peer, 0, sizeof (*peer)); /* NULL pointers/clear flags etc */
peer->ksnp_ni = ni;
peer->ksnp_id = id;
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
- conn->ksnc_tx_last_post = cfs_time_current();
- /* Set the deadline for the outgoing HELLO to drain */
- conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
- conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- cfs_mb(); /* order with adding to peer's conn list */
+ conn->ksnc_tx_last_post = cfs_time_current();
+ /* Set the deadline for the outgoing HELLO to drain */
+ conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
+ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ smp_mb(); /* order with adding to peer's conn list */
- cfs_list_add (&conn->ksnc_list, &peer->ksnp_conns);
- ksocknal_conn_addref(conn);
+ cfs_list_add (&conn->ksnc_list, &peer->ksnp_conns);
+ ksocknal_conn_addref(conn);
- ksocknal_new_packet(conn, 0);
+ ksocknal_new_packet(conn, 0);
conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
if (rc > 0) /* sent something? */
conn->ksnc_tx_bufnob += rc; /* account it */
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* allocated send buffer bytes < computed; infer
- * something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = bufnob;
- cfs_mb();
- }
+ if (bufnob < conn->ksnc_tx_bufnob) {
+ /* allocated send buffer bytes < computed; infer
+ * something got ACKed */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = bufnob;
+ smp_mb();
+ }
- if (rc <= 0) { /* Didn't write anything? */
+ if (rc <= 0) { /* Didn't write anything? */
if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
rc = -EAGAIN;
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- cfs_mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ smp_mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_niov > 0);
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- cfs_mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ smp_mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_nkiov > 0);
bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
spin_lock_bh(&sched->kss_lock);
- if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
- /* First packet starts the timeout */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = 0;
- cfs_mb(); /* order with adding to tx_queue */
- }
+ if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ /* First packet starts the timeout */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = 0;
+ smp_mb(); /* order with adding to tx_queue */
+ }
- if (msg->ksm_type == KSOCK_MSG_NOOP) {
- /* The packet is noop ZC ACK, try to piggyback the ack_cookie
- * on a normal packet so I don't need to send it */
+ if (msg->ksm_type == KSOCK_MSG_NOOP) {
+ /* The packet is noop ZC ACK, try to piggyback the ack_cookie
+ * on a normal packet so I don't need to send it */
LASSERT (msg->ksm_zc_cookies[1] != 0);
LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT (!in_interrupt ());
- if (payload_iov != NULL)
- desc_size = offsetof(ksock_tx_t,
- tx_frags.virt.iov[1 + payload_niov]);
- else
- desc_size = offsetof(ksock_tx_t,
- tx_frags.paged.kiov[payload_niov]);
+ if (payload_iov != NULL)
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.virt.iov[1 + payload_niov]);
+ else
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
mpflag = cfs_memory_pressure_get_and_set();
ksocknal_lib_eager_ack(conn);
}
- if (nob_to_skip == 0) { /* right at next packet boundary now */
- conn->ksnc_rx_started = 0;
- cfs_mb(); /* racing with timeout thread */
+ if (nob_to_skip == 0) { /* right at next packet boundary now */
+ conn->ksnc_rx_started = 0;
+ smp_mb(); /* racing with timeout thread */
- switch (conn->ksnc_proto->pro_version) {
- case KSOCK_PROTO_V2:
- case KSOCK_PROTO_V3:
+ switch (conn->ksnc_proto->pro_version) {
+ case KSOCK_PROTO_V2:
+ case KSOCK_PROTO_V3:
conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
ni->ni_lnd->lnd_refcount--;
lnet_net_unlock(LNET_LOCK_EX);
- islo = ni->ni_lnd->lnd_type == LOLND;
+ islo = ni->ni_lnd->lnd_type == LOLND;
- LASSERT (!cfs_in_interrupt ());
- (ni->ni_lnd->lnd_shutdown)(ni);
+ LASSERT (!in_interrupt ());
+ (ni->ni_lnd->lnd_shutdown)(ni);
- /* can't deref lnd anymore now; it might have unregistered
- * itself... */
+ /* can't deref lnd anymore now; it might have unregistered
+ * itself... */
if (!islo)
CDEBUG(D_LNI, "Removed LNI %s\n",
unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
unsigned int nob)
{
- /* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
- char *daddr = NULL;
- char *saddr = NULL;
+ /* NB diov, siov are READ-ONLY */
+ unsigned int this_nob;
+ char *daddr = NULL;
+ char *saddr = NULL;
- if (nob == 0)
- return;
+ if (nob == 0)
+ return;
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (!in_interrupt ());
- LASSERT (ndiov > 0);
+ LASSERT (ndiov > 0);
while (doffset >= diov->kiov_len) {
doffset -= diov->kiov_len;
diov++;
unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
unsigned int nob)
{
- /* NB iov, kiov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
+ /* NB iov, kiov are READ-ONLY */
+ unsigned int this_nob;
+ char *addr = NULL;
- if (nob == 0)
- return;
+ if (nob == 0)
+ return;
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (!in_interrupt ());
- LASSERT (niov > 0);
+ LASSERT (niov > 0);
while (iovoffset >= iov->iov_len) {
iovoffset -= iov->iov_len;
iov++;
unsigned int niov, struct iovec *iov, unsigned int iovoffset,
unsigned int nob)
{
- /* NB kiov, iov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
+ /* NB kiov, iov are READ-ONLY */
+ unsigned int this_nob;
+ char *addr = NULL;
- if (nob == 0)
- return;
+ if (nob == 0)
+ return;
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (!in_interrupt ());
- LASSERT (nkiov > 0);
+ LASSERT (nkiov > 0);
while (kiovoffset >= kiov->kiov_len) {
kiovoffset -= kiov->kiov_len;
kiov++;
lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- unsigned int niov = 0;
- struct iovec *iov = NULL;
- lnet_kiov_t *kiov = NULL;
- int rc;
+ unsigned int niov = 0;
+ struct iovec *iov = NULL;
+ lnet_kiov_t *kiov = NULL;
+ int rc;
- LASSERT (!cfs_in_interrupt ());
- LASSERT (mlen == 0 || msg != NULL);
+ LASSERT (!in_interrupt ());
+ LASSERT (mlen == 0 || msg != NULL);
if (msg != NULL) {
LASSERT(msg->msg_receiving);
void
lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
{
- void *priv = msg->msg_private;
- int rc;
+ void *priv = msg->msg_private;
+ int rc;
- LASSERT (!cfs_in_interrupt ());
- LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
- (msg->msg_txcredit && msg->msg_peertxcredit));
+ LASSERT (!in_interrupt ());
+ LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
+ (msg->msg_txcredit && msg->msg_peertxcredit));
- rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
- if (rc < 0)
- lnet_finalize(ni, msg, rc);
+ rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
+ if (rc < 0)
+ lnet_finalize(ni, msg, rc);
}
int
struct lnet_msg *msg;
lnet_pid_t dest_pid;
lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
- __u32 payload_length;
- __u32 type;
+ lnet_nid_t src_nid;
+ __u32 payload_length;
+ __u32 type;
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (!in_interrupt ());
- type = le32_to_cpu(hdr->type);
- src_nid = le64_to_cpu(hdr->src_nid);
- dest_nid = le64_to_cpu(hdr->dest_nid);
- dest_pid = le32_to_cpu(hdr->dest_pid);
+ type = le32_to_cpu(hdr->type);
+ src_nid = le64_to_cpu(hdr->src_nid);
+ dest_nid = le64_to_cpu(hdr->dest_nid);
+ dest_pid = le32_to_cpu(hdr->dest_pid);
payload_length = le32_to_cpu(hdr->payload_length);
for_me = (ni->ni_nid == dest_nid);
int rc;
int i;
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (!in_interrupt ());
- if (msg == NULL)
- return;
+ if (msg == NULL)
+ return;
#if 0
CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
cfs_time_t now = cfs_time_current();
int cpt = lnet_cpt_of_nid(nid);
- LASSERT (!cfs_in_interrupt ());
+ LASSERT (!in_interrupt ());
- CDEBUG (D_NET, "%s notifying %s: %s\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid),
- alive ? "up" : "down");
+ CDEBUG (D_NET, "%s notifying %s: %s\n",
+ (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid),
+ alive ? "up" : "down");
if (ni != NULL &&
LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
- srpc_event_t *rpcev = ev->md.user_ptr;
- srpc_client_rpc_t *crpc;
- srpc_server_rpc_t *srpc;
- srpc_buffer_t *buffer;
- srpc_service_t *sv;
- srpc_msg_t *msg;
- srpc_msg_type_t type;
+ srpc_event_t *rpcev = ev->md.user_ptr;
+ srpc_client_rpc_t *crpc;
+ srpc_server_rpc_t *srpc;
+ srpc_buffer_t *buffer;
+ srpc_service_t *sv;
+ srpc_msg_t *msg;
+ srpc_msg_type_t type;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- if (ev->status != 0) {
+ if (ev->status != 0) {
spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.errors++;
spin_unlock(&srpc_data.rpc_glock);
- }
+ }
rpcev->ev_lnet = ev->type;
#define unlikely(exp) (exp)
#endif
-#define cfs_might_sleep()
+#define might_sleep()
#define might_sleep_if(c)
#define smp_mb()
struct lu_ucred;
-extern void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist);
-extern void lustre_groups_sort(cfs_group_info_t *group_info);
+extern void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist);
+extern void lustre_groups_sort(struct group_info *group_info);
extern int lustre_in_group_p(struct lu_ucred *mu, gid_t grp);
extern int lustre_idmap_add(struct lustre_idmap_table *t,
__u32 uc_suppgids[2];
cfs_cap_t uc_cap;
__u32 uc_umask;
- cfs_group_info_t *uc_ginfo;
+ struct group_info *uc_ginfo;
struct md_identity *uc_identity;
};
#define __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, type) \
do { \
- LASSERT(ergo((type) != GFP_ATOMIC, !cfs_in_interrupt())); \
+ LASSERT(ergo((type) != GFP_ATOMIC, !in_interrupt())); \
(ptr) = (cptab) == NULL ? \
kmem_cache_alloc(slab, type | __GFP_ZERO) : \
cfs_mem_cache_cpt_alloc(slab, cptab, cpt, type | __GFP_ZERO); \
int i = 0, rc;
ENTRY;
- do {
- save = lli->lli_rmtperm_time;
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i))
- break;
+ do {
+ save = lli->lli_rmtperm_time;
+ rc = do_check_remote_perm(lli, mask);
+ if (!rc || (rc != -ENOENT && i))
+ break;
- cfs_might_sleep();
+ might_sleep();
mutex_lock(&lli->lli_rmtperm_mutex);
- /* check again */
- if (save != lli->lli_rmtperm_time) {
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i)) {
+ /* check again */
+ if (save != lli->lli_rmtperm_time) {
+ rc = do_check_remote_perm(lli, mask);
+ if (!rc || (rc != -ENOENT && i)) {
mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
- }
+ break;
+ }
+ }
if (i++ > 5) {
CERROR("check remote perm falls in dead loop!\n");
* flag, because it needs accurate counting lest memory leak
* check reports error.
*/
- if (cfs_in_interrupt() &&
+ if (in_interrupt() &&
(stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
percpu_cntr->lc_sum_irq += amount;
else
* flag, because it needs accurate counting lest memory leak
* check reports error.
*/
- if (cfs_in_interrupt() &&
+ if (in_interrupt() &&
(stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
percpu_cntr->lc_sum_irq -= amount;
else
}
static void mdt_identity_entry_free(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
- struct md_identity *identity = &entry->u.identity;
-
- if (identity->mi_ginfo) {
- cfs_put_group_info(identity->mi_ginfo);
- identity->mi_ginfo = NULL;
- }
-
- if (identity->mi_nperms) {
- LASSERT(identity->mi_perms);
- OBD_FREE(identity->mi_perms,
- identity->mi_nperms * sizeof(struct md_perm));
- identity->mi_nperms = 0;
- }
+ struct md_identity *identity = &entry->u.identity;
+
+ if (identity->mi_ginfo) {
+ put_group_info(identity->mi_ginfo);
+ identity->mi_ginfo = NULL;
+ }
+
+ if (identity->mi_nperms) {
+ LASSERT(identity->mi_perms);
+ OBD_FREE(identity->mi_perms,
+ identity->mi_nperms * sizeof(struct md_perm));
+ identity->mi_nperms = 0;
+ }
}
static int mdt_identity_do_upcall(struct upcall_cache *cache,
}
static int mdt_identity_parse_downcall(struct upcall_cache *cache,
- struct upcall_cache_entry *entry,
- void *args)
+ struct upcall_cache_entry *entry,
+ void *args)
{
- struct md_identity *identity = &entry->u.identity;
- struct identity_downcall_data *data = args;
- cfs_group_info_t *ginfo = NULL;
- struct md_perm *perms = NULL;
- int size, i;
- ENTRY;
-
- LASSERT(data);
- if (data->idd_ngroups > NGROUPS_MAX)
- RETURN(-E2BIG);
-
- if (data->idd_ngroups > 0) {
- ginfo = cfs_groups_alloc(data->idd_ngroups);
- if (!ginfo) {
- CERROR("failed to alloc %d groups\n", data->idd_ngroups);
- RETURN(-ENOMEM);
- }
-
- lustre_groups_from_list(ginfo, data->idd_groups);
- lustre_groups_sort(ginfo);
- }
-
- if (data->idd_nperms) {
- size = data->idd_nperms * sizeof(*perms);
- OBD_ALLOC(perms, size);
- if (!perms) {
- CERROR("failed to alloc %d permissions\n",
- data->idd_nperms);
- if (ginfo != NULL)
- cfs_put_group_info(ginfo);
- RETURN(-ENOMEM);
- }
-
- for (i = 0; i < data->idd_nperms; i++) {
- perms[i].mp_nid = data->idd_perms[i].pdd_nid;
- perms[i].mp_perm = data->idd_perms[i].pdd_perm;
- }
- }
-
- identity->mi_uid = data->idd_uid;
- identity->mi_gid = data->idd_gid;
- identity->mi_ginfo = ginfo;
- identity->mi_nperms = data->idd_nperms;
- identity->mi_perms = perms;
-
- CDEBUG(D_OTHER, "parse mdt identity@%p: %d:%d, ngroups %u, nperms %u\n",
- identity, identity->mi_uid, identity->mi_gid,
- data->idd_ngroups, data->idd_nperms);
-
- RETURN(0);
+ struct md_identity *identity = &entry->u.identity;
+ struct identity_downcall_data *data = args;
+ struct group_info *ginfo = NULL;
+ struct md_perm *perms = NULL;
+ int size, i;
+ ENTRY;
+
+ LASSERT(data);
+ if (data->idd_ngroups > NGROUPS_MAX)
+ RETURN(-E2BIG);
+
+ if (data->idd_ngroups > 0) {
+ ginfo = groups_alloc(data->idd_ngroups);
+ if (!ginfo) {
+ CERROR("failed to alloc %d groups\n", data->idd_ngroups);
+ RETURN(-ENOMEM);
+ }
+
+ lustre_groups_from_list(ginfo, data->idd_groups);
+ lustre_groups_sort(ginfo);
+ }
+
+ if (data->idd_nperms) {
+ size = data->idd_nperms * sizeof(*perms);
+ OBD_ALLOC(perms, size);
+ if (!perms) {
+ CERROR("failed to alloc %d permissions\n",
+ data->idd_nperms);
+ if (ginfo != NULL)
+ put_group_info(ginfo);
+ RETURN(-ENOMEM);
+ }
+
+ for (i = 0; i < data->idd_nperms; i++) {
+ perms[i].mp_nid = data->idd_perms[i].pdd_nid;
+ perms[i].mp_perm = data->idd_perms[i].pdd_perm;
+ }
+ }
+
+ identity->mi_uid = data->idd_uid;
+ identity->mi_gid = data->idd_gid;
+ identity->mi_ginfo = ginfo;
+ identity->mi_nperms = data->idd_nperms;
+ identity->mi_perms = perms;
+
+ CDEBUG(D_OTHER, "parse mdt identity@%p: %d:%d, ngroups %u, nperms %u\n",
+ identity, identity->mi_uid, identity->mi_gid,
+ data->idd_ngroups, data->idd_nperms);
+
+ RETURN(0);
}
struct md_identity *mdt_identity_get(struct upcall_cache *cache, __u32 uid)
if (uc->uc_valid != UCRED_INIT) {
uc->uc_suppgids[0] = uc->uc_suppgids[1] = -1;
if (uc->uc_ginfo) {
- cfs_put_group_info(uc->uc_ginfo);
+ put_group_info(uc->uc_ginfo);
uc->uc_ginfo = NULL;
}
if (uc->uc_identity) {
if (!remote && perm & CFS_SETGRP_PERM) {
if (pud->pud_ngroups) {
/* setgroups for local client */
- ucred->uc_ginfo = cfs_groups_alloc(pud->pud_ngroups);
+ ucred->uc_ginfo = groups_alloc(pud->pud_ngroups);
if (!ucred->uc_ginfo) {
CERROR("failed to alloc %d groups\n",
pud->pud_ngroups);
out:
if (rc) {
if (ucred->uc_ginfo) {
- cfs_put_group_info(ucred->uc_ginfo);
+ put_group_info(ucred->uc_ginfo);
ucred->uc_ginfo = NULL;
}
if (ucred->uc_identity) {
LINVRNT(!cl_lock_is_mutexed(lock));
- ENTRY;
- cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- cfs_might_sleep();
- while (!cfs_list_empty(&lock->cll_layers)) {
- struct cl_lock_slice *slice;
-
- slice = cfs_list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
- cfs_list_del_init(lock->cll_layers.next);
- slice->cls_ops->clo_fini(env, slice);
- }
+ ENTRY;
+ cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
+ might_sleep();
+ while (!cfs_list_empty(&lock->cll_layers)) {
+ struct cl_lock_slice *slice;
+
+ slice = cfs_list_entry(lock->cll_layers.next,
+ struct cl_lock_slice, cls_linkage);
+ cfs_list_del_init(lock->cll_layers.next);
+ slice->cls_ops->clo_fini(env, slice);
+ }
CS_LOCK_DEC(obj, total);
CS_LOCKSTATE_DEC(obj, lock->cll_state);
lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
struct cl_device *cd, const struct lu_fid *fid,
const struct cl_object_conf *c)
{
- cfs_might_sleep();
+ might_sleep();
return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
}
EXPORT_SYMBOL(cl_object_find);
PASSERT(env, page, page->cp_parent == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
- ENTRY;
- cfs_might_sleep();
- while (!cfs_list_empty(&page->cp_layers)) {
- struct cl_page_slice *slice;
-
- slice = cfs_list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
- cfs_list_del_init(page->cp_layers.next);
- slice->cpl_ops->cpo_fini(env, slice);
- }
+ ENTRY;
+ might_sleep();
+ while (!cfs_list_empty(&page->cp_layers)) {
+ struct cl_page_slice *slice;
+
+ slice = cfs_list_entry(page->cp_layers.next,
+ struct cl_page_slice, cpl_linkage);
+ cfs_list_del_init(page->cp_layers.next);
+ slice->cpl_ops->cpo_fini(env, slice);
+ }
CS_PAGE_DEC(obj, total);
CS_PAGESTATE_DEC(obj, page->cp_state);
lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
enum cl_page_type type,
struct cl_page *parent)
{
- struct cl_page *page = NULL;
- struct cl_page *ghost = NULL;
- struct cl_object_header *hdr;
- int err;
+ struct cl_page *page = NULL;
+ struct cl_page *ghost = NULL;
+ struct cl_object_header *hdr;
+ int err;
- LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
- cfs_might_sleep();
+ LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
+ might_sleep();
- ENTRY;
+ ENTRY;
- hdr = cl_object_header(o);
+ hdr = cl_object_header(o);
CS_PAGE_INC(o, lookup);
CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
} while (0)
#define lustre_put_group_info(group_info) do { \
- if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
- cfs_groups_free(group_info); \
+ if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+ groups_free(group_info); \
} while (0)
/*
* groups_search() is copied from linux kernel!
* A simple bsearch.
*/
-static int lustre_groups_search(cfs_group_info_t *group_info,
- gid_t grp)
+static int lustre_groups_search(struct group_info *group_info,
+ gid_t grp)
{
- int left, right;
-
- if (!group_info)
- return 0;
-
- left = 0;
- right = group_info->ngroups;
- while (left < right) {
- int mid = (left + right) / 2;
- int cmp = grp - CFS_GROUP_AT(group_info, mid);
-
- if (cmp > 0)
- left = mid + 1;
- else if (cmp < 0)
- right = mid;
- else
- return 1;
- }
- return 0;
+ int left, right;
+
+ if (!group_info)
+ return 0;
+
+ left = 0;
+ right = group_info->ngroups;
+ while (left < right) {
+ int mid = (left + right) / 2;
+ int cmp = grp - CFS_GROUP_AT(group_info, mid);
+
+ if (cmp > 0)
+ left = mid + 1;
+ else if (cmp < 0)
+ right = mid;
+ else
+ return 1;
+ }
+ return 0;
}
-void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist)
+void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
{
- int i;
- int count = ginfo->ngroups;
+ int i;
+ int count = ginfo->ngroups;
- /* fill group_info from gid array */
- for (i = 0; i < ginfo->nblocks && count > 0; i++) {
- int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
- int off = i * CFS_NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*glist);
+ /* fill group_info from gid array */
+ for (i = 0; i < ginfo->nblocks && count > 0; i++) {
+ int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
+ int off = i * CFS_NGROUPS_PER_BLOCK;
+ int len = cp_count * sizeof(*glist);
- memcpy(ginfo->blocks[i], glist + off, len);
- count -= cp_count;
- }
+ memcpy(ginfo->blocks[i], glist + off, len);
+ count -= cp_count;
+ }
}
EXPORT_SYMBOL(lustre_groups_from_list);
/* groups_sort() is copied from linux kernel! */
/* a simple shell-metzner sort */
-void lustre_groups_sort(cfs_group_info_t *group_info)
+void lustre_groups_sort(struct group_info *group_info)
{
int base, max, stride;
int gidsetsize = group_info->ngroups;
int rc = 1;
if (grp != mu->uc_fsgid) {
- cfs_group_info_t *group_info = NULL;
+ struct group_info *group_info = NULL;
if (mu->uc_ginfo || !mu->uc_identity ||
mu->uc_valid == UCRED_OLD)
void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source)
{
- cfs_might_sleep();
+ might_sleep();
lu_ref_add_context(ref, GFP_IOFS, scope, source);
}
EXPORT_SYMBOL(lu_ref_add);
const struct osc_page *opg,
enum cl_lock_mode mode, int pending, int unref)
{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- ldlm_policy_data_t *policy;
- ldlm_mode_t dlmmode;
- int flags;
-
- cfs_might_sleep();
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
+ struct cl_page *page;
+ struct osc_object *obj;
+ struct osc_thread_info *info;
+ struct ldlm_res_id *resname;
+ struct lustre_handle *lockh;
+ ldlm_policy_data_t *policy;
+ ldlm_mode_t dlmmode;
+ int flags;
+
+ might_sleep();
+
+ info = osc_env_info(env);
+ resname = &info->oti_resname;
+ policy = &info->oti_policy;
+ lockh = &info->oti_handle;
+ page = opg->ops_cl.cpl_page;
+ obj = cl2osc(opg->ops_cl.cpl_obj);
flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
if (pending)
*/
obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
- cfs_mb();
+ smp_mb();
obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
RETURN(0);
*/
int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
- int rc;
- struct l_wait_info lwi;
+ int rc;
+ struct l_wait_info lwi;
- /*
- * Might sleep.
- */
- LASSERT(!cfs_in_interrupt());
+ /*
+ * Might sleep.
+ */
+ LASSERT(!in_interrupt());
- /*
- * Let's setup deadline for reply unlink.
- */
+ /*
+ * Let's setup deadline for reply unlink.
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
* Create a work for ptlrpc.
*/
void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *cbdata)
+ int (*cb)(const struct lu_env *, void *), void *cbdata)
{
- struct ptlrpc_request *req = NULL;
- struct ptlrpc_work_async_args *args;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_work_async_args *args;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- if (cb == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ if (cb == NULL)
+ RETURN(ERR_PTR(-EINVAL));
/* copy some code from deprecated fakereq. */
req = ptlrpc_request_cache_alloc(__GFP_IO);
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
- CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
- "don't send destroy rpc\n", ctx,
- ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- RETURN(0);
- }
+ if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
+ CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
+ "don't send destroy rpc\n", ctx,
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ RETURN(0);
+ }
- cfs_might_sleep();
+ might_sleep();
- CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
- sec_is_reverse(ctx->cc_sec) ?
- "server finishing reverse" : "client finishing forward",
- ctx, gss_handle_to_u64(&gctx->gc_handle),
- ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
+ sec_is_reverse(ctx->cc_sec) ?
+ "server finishing reverse" : "client finishing forward",
+ ctx, gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
cfs_hlist_head_t *hash_head;
cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
- unsigned int hash, gc = 0, found = 0;
- ENTRY;
+ CFS_HLIST_HEAD(freelist);
+ unsigned int hash, gc = 0, found = 0;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- gsec = container_of(sec, struct gss_sec, gs_base);
- gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
hash = ctx_hash_index(gsec_pf->gsp_chash_size,
(__u64) vcred->vc_uid);
struct gss_sec *gsec;
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
- cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
- int i, busy = 0;
- ENTRY;
+ cfs_hlist_node_t *pos, *next;
+ CFS_HLIST_HEAD(freelist);
+ int i, busy = 0;
+ ENTRY;
- might_sleep_if(grace);
+ might_sleep_if(grace);
- gsec = container_of(sec, struct gss_sec, gs_base);
- gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
spin_lock(&sec->ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
static
int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
- struct obd_import *imp;
- struct gss_sec *gsec;
- struct gss_upcall_msg *gmsg;
- int rc = 0;
- ENTRY;
+ struct obd_import *imp;
+ struct gss_sec *gsec;
+ struct gss_upcall_msg *gmsg;
+ int rc = 0;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_import);
- LASSERT(ctx->cc_sec->ps_import->imp_obd);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_import);
+ LASSERT(ctx->cc_sec->ps_import->imp_obd);
imp = ctx->cc_sec->ps_import;
if (!imp->imp_connection) {
*/
void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
{
- struct l_wait_info lwi;
- int rc;
+ struct l_wait_info lwi;
+ int rc;
- LASSERT(!cfs_in_interrupt()); /* might sleep */
+ LASSERT(!in_interrupt()); /* might sleep */
- if (!ptlrpc_server_bulk_active(desc)) /* completed or */
- return; /* never started */
+ if (!ptlrpc_server_bulk_active(desc)) /* completed or */
+ return; /* never started */
- /* We used to poison the pages with 0xab here because we did not want to
- * send any meaningful data over the wire for evicted clients (bug 9297)
- * However, this is no longer safe now that we use the page cache on the
- * OSS (bug 20560) */
+ /* We used to poison the pages with 0xab here because we did not want to
+ * send any meaningful data over the wire for evicted clients (bug 9297)
+ * However, this is no longer safe now that we use the page cache on the
+ * OSS (bug 20560) */
- /* The unlink ensures the callback happens ASAP and is the last
- * one. If it fails, it must be because completion just happened,
- * but we must still l_wait_event() in this case, to give liblustre
- * a chance to run server_bulk_callback()*/
+ /* The unlink ensures the callback happens ASAP and is the last
+ * one. If it fails, it must be because completion just happened,
+ * but we must still l_wait_event() in this case, to give liblustre
+ * a chance to run server_bulk_callback()*/
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_count);
- for (;;) {
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(desc->bd_waitq,
- !ptlrpc_server_bulk_active(desc), &lwi);
- if (rc == 0)
- return;
-
- LASSERT(rc == -ETIMEDOUT);
- CWARN("Unexpectedly long timeout: desc %p\n", desc);
- }
+ for (;;) {
+ /* Network access will complete in finite time but the HUGE
+ * timeout lets us CWARN for visibility of sluggish NALs */
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(desc->bd_waitq,
+ !ptlrpc_server_bulk_active(desc), &lwi);
+ if (rc == 0)
+ return;
+
+ LASSERT(rc == -ETIMEDOUT);
+ CWARN("Unexpectedly long timeout: desc %p\n", desc);
+ }
}
EXPORT_SYMBOL(ptlrpc_abort_bulk);
#endif /* HAVE_SERVER_SUPPORT */
*/
int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- struct l_wait_info lwi;
- int rc;
- ENTRY;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct l_wait_info lwi;
+ int rc;
+ ENTRY;
- LASSERT(!cfs_in_interrupt()); /* might sleep */
+ LASSERT(!in_interrupt()); /* might sleep */
- /* Let's setup deadline for reply unlink. */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0)
- req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+ /* Let's setup deadline for reply unlink. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
+ async && req->rq_bulk_deadline == 0)
+ req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
RETURN(1); /* never registered */
*/
int sptlrpc_import_check_ctx(struct obd_import *imp)
{
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
- struct ptlrpc_request *req = NULL;
- int rc;
- ENTRY;
+ struct ptlrpc_sec *sec;
+ struct ptlrpc_cli_ctx *ctx;
+ struct ptlrpc_request *req = NULL;
+ int rc;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- sec = sptlrpc_import_sec_ref(imp);
- ctx = get_my_ctx(sec);
- sptlrpc_sec_put(sec);
+ sec = sptlrpc_import_sec_ref(imp);
+ ctx = get_my_ctx(sec);
+ sptlrpc_sec_put(sec);
if (!ctx)
RETURN(-ENOMEM);
struct ptlrpc_svc_ctx *svc_ctx,
struct sptlrpc_flavor *flvr)
{
- struct ptlrpc_connection *conn;
- struct sptlrpc_flavor sf;
- struct ptlrpc_sec *sec, *newsec;
- enum lustre_sec_part sp;
- char str[24];
- int rc = 0;
- ENTRY;
+ struct ptlrpc_connection *conn;
+ struct sptlrpc_flavor sf;
+ struct ptlrpc_sec *sec, *newsec;
+ enum lustre_sec_part sp;
+ char str[24];
+ int rc = 0;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- if (imp == NULL)
- RETURN(0);
+ if (imp == NULL)
+ RETURN(0);
- conn = imp->imp_connection;
+ conn = imp->imp_connection;
if (svc_ctx == NULL) {
struct client_obd *cliobd = &imp->imp_obd->u.cli;
*/
int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
{
- struct sptlrpc_rule *rules;
- int nslot;
+ struct sptlrpc_rule *rules;
+ int nslot;
- cfs_might_sleep();
+ might_sleep();
- if (rset->srs_nrule < rset->srs_nslot)
- return 0;
+ if (rset->srs_nrule < rset->srs_nslot)
+ return 0;
- nslot = rset->srs_nslot + 8;
+ nslot = rset->srs_nslot + 8;
/* better use realloc() if available */
OBD_ALLOC(rules, nslot * sizeof(*rset->srs_rules));
int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset,
struct sptlrpc_rule *rule)
{
- struct sptlrpc_rule *p = rset->srs_rules;
- int spec_dir, spec_net;
- int rc, n, match = 0;
+ struct sptlrpc_rule *p = rset->srs_rules;
+ int spec_dir, spec_net;
+ int rc, n, match = 0;
- cfs_might_sleep();
+ might_sleep();
- spec_net = rule_spec_net(rule);
- spec_dir = rule_spec_dir(rule);
+ spec_net = rule_spec_net(rule);
+ spec_dir = rule_spec_dir(rule);
for (n = 0; n < rset->srs_nrule; n++) {
p = &rset->srs_rules[n];
enum lustre_sec_part to,
struct sptlrpc_rule_set *rset)
{
- struct sptlrpc_rule_set *src[2] = { gen, tgt };
- struct sptlrpc_rule *rule;
- int i, n, rc;
+ struct sptlrpc_rule_set *src[2] = { gen, tgt };
+ struct sptlrpc_rule *rule;
+ int i, n, rc;
- cfs_might_sleep();
+ might_sleep();
- /* merge general rules firstly, then target-specific rules */
- for (i = 0; i < 2; i++) {
- if (src[i] == NULL)
- continue;
+ /* merge general rules firstly, then target-specific rules */
+ for (i = 0; i < 2; i++) {
+ if (src[i] == NULL)
+ continue;
for (n = 0; n < src[i]->srs_nrule; n++) {
rule = &src[i]->srs_rules[n];
void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
{
- if (cfs_list_empty(&sec->ps_gc_list))
- return;
+ if (cfs_list_empty(&sec->ps_gc_list))
+ return;
- cfs_might_sleep();
+ might_sleep();
- /* signal before list_del to make iteration in gc thread safe */
- cfs_atomic_inc(&sec_gc_wait_del);
+ /* signal before list_del to make iteration in gc thread safe */
+ cfs_atomic_inc(&sec_gc_wait_del);
spin_lock(&sec_gc_list_lock);
cfs_list_del_init(&sec->ps_gc_list);
struct ptlrpc_service *svc = svcpt->scp_service;
struct ptlrpc_reply_state *rs;
#ifdef WITH_GROUP_INFO
- cfs_group_info_t *ginfo = NULL;
+ struct group_info *ginfo = NULL;
#endif
- struct lu_env *env;
- int counter = 0, rc = 0;
- ENTRY;
+ struct lu_env *env;
+ int counter = 0, rc = 0;
+ ENTRY;
thread->t_pid = current_pid();
unshare_fs_struct();
}
#ifdef WITH_GROUP_INFO
- ginfo = cfs_groups_alloc(0);
- if (!ginfo) {
- rc = -ENOMEM;
- goto out;
- }
+ ginfo = groups_alloc(0);
+ if (!ginfo) {
+ rc = -ENOMEM;
+ goto out;
+ }
- cfs_set_current_groups(ginfo);
- cfs_put_group_info(ginfo);
+ set_current_groups(ginfo);
+ put_group_info(ginfo);
#endif
if (svc->srv_ops.so_thr_init != NULL) {