static inline void
cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
+__acquires(&lock->spin)
{
spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
+__releases(&lock->spin)
{
spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
+__acquires(&lock->rw)
{
if (!exclusive)
read_lock(&lock->rw);
static inline void
cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
+__releases(&lock->rw)
{
if (!exclusive)
read_unlock(&lock->rw);
*/
void
cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
+__acquires(pcl->pcl_locks)
{
int ncpt = cfs_cpt_number(pcl->pcl_cptab);
int i;
/** unlock a CPU partition */
void
cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
+__releases(pcl->pcl_locks)
{
int ncpt = cfs_cpt_number(pcl->pcl_cptab);
int i;
* for details.
*/
int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+__acquires(&tcd->tcd_lock)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
}
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+__releases(&tcd->tcd_lock)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
static int
kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
+__must_hold(&conn->ibc_lock)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
int
ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+__must_hold(&ksocknal_data.ksnd_global_lock)
{
ksock_sched_t *sched;
ksock_conn_t *conn;
static int
lnet_eq_wait_locked(int *timeout_ms)
+__must_hold(&the_lnet.ln_eq_wait_lock)
{
int tms = *timeout_ms;
int wait;
/* called with sfw_data.fw_lock held */
static void
sfw_deactivate_session (void)
+__must_hold(&sfw_data.fw_lock)
{
sfw_session_t *sn = sfw_data.fw_session;
int nactive = 0;
int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+__must_hold(&scd->scd_lock)
{
struct srpc_service *sv = scd->scd_svc;
struct srpc_msg *msg = &buf->buf_msg;
/* called with sv->sv_lock held */
void
srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
if (srpc_service_post_buffer(scd, buf) != 0) {
}
void lov_stripe_lock(struct lov_stripe_md *md)
+__acquires(&md->lsm_lock)
{
LASSERT(md->lsm_lock_owner != current_pid());
spin_lock(&md->lsm_lock);
}
void lov_stripe_unlock(struct lov_stripe_md *md)
+__releases(&md->lsm_lock)
{
LASSERT(md->lsm_lock_owner == current_pid());
md->lsm_lock_owner = 0;
* cl_object_attr_get(), cl_object_attr_set().
*/
void cl_object_attr_lock(struct cl_object *o)
+__acquires(cl_object_attr_guard(o))
{
spin_lock(cl_object_attr_guard(o));
}
* Releases data-attributes lock, acquired by cl_object_attr_lock().
*/
void cl_object_attr_unlock(struct cl_object *o)
+__releases(cl_object_attr_guard(o))
{
spin_unlock(cl_object_attr_guard(o));
}
static int
osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, pdl_policy_t pol)
+__must_hold(osc)
{
struct list_head rpclist = LIST_HEAD_INIT(rpclist);
struct osc_extent *ext;
static int
osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, pdl_policy_t pol)
+__must_hold(osc)
{
struct osc_extent *ext;
struct osc_extent *next;
/* called with the loi list lock held */
static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
pdl_policy_t pol)
+__must_hold(&cli->cl_loi_list_lock)
{
struct osc_object *osc;
int rc = 0;
* If anything goes wrong just ignore it - same as if it never happened
*/
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
+__must_hold(&req->rq_lock)
{
struct ptlrpc_request *early_req;
time_t olddl;