/* reserved for hotplug */
unsigned long cpt_version;
/* mutex to protect cpt_cpumask */
- struct semaphore cpt_mutex;
+ struct mutex cpt_mutex;
/* scratch buffer for set/unset_node */
cpumask_t *cpt_cpumask;
};
{
int num;
- down(&cpt_data.cpt_mutex);
+ mutex_lock(&cpt_data.cpt_mutex);
cfs_cpu_core_siblings(cpu, cpt_data.cpt_cpumask);
num = cpus_weight(*cpt_data.cpt_cpumask);
- up(&cpt_data.cpt_mutex);
+ mutex_unlock(&cpt_data.cpt_mutex);
return num;
}
{
int num;
- down(&cpt_data.cpt_mutex);
+ mutex_lock(&cpt_data.cpt_mutex);
cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
num = cpus_weight(*cpt_data.cpt_cpumask);
- up(&cpt_data.cpt_mutex);
+ mutex_unlock(&cpt_data.cpt_mutex);
return num;
}
return 0;
}
- down(&cpt_data.cpt_mutex);
+ mutex_lock(&cpt_data.cpt_mutex);
mask = cpt_data.cpt_cpumask;
cfs_node_to_cpumask(node, mask);
rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
- up(&cpt_data.cpt_mutex);
+ mutex_unlock(&cpt_data.cpt_mutex);
return rc;
}
return;
}
- down(&cpt_data.cpt_mutex);
+ mutex_lock(&cpt_data.cpt_mutex);
mask = cpt_data.cpt_cpumask;
cfs_node_to_cpumask(node, mask);
cfs_cpt_unset_cpumask(cptab, cpt, mask);
- up(&cpt_data.cpt_mutex);
+ mutex_unlock(&cpt_data.cpt_mutex);
}
EXPORT_SYMBOL(cfs_cpt_unset_node);
break;
}
- down(&cpt_data.cpt_mutex);
+ mutex_lock(&cpt_data.cpt_mutex);
/* if all HTs in a core are offline, it may break affinity */
cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
- up(&cpt_data.cpt_mutex);
+ mutex_unlock(&cpt_data.cpt_mutex);
CDEBUG(warn ? D_WARNING : D_INFO,
"Lustre: can't support CPU plug-out well now, "
"performance and stability could be impacted"
}
spin_lock_init(&cpt_data.cpt_lock);
- sema_init(&cpt_data.cpt_mutex, 1);
+ mutex_init(&cpt_data.cpt_mutex);
#ifdef CONFIG_HOTPLUG_CPU
register_hotcpu_notifier(&cfs_cpu_notifier);
INIT_LIST_HEAD(&dev->gnd_map_tx);
INIT_LIST_HEAD(&dev->gnd_fma_buffs);
mutex_init(&dev->gnd_cq_mutex);
- sema_init(&dev->gnd_fmablk_sem, 1);
+ mutex_init(&dev->gnd_fmablk_mutex);
spin_lock_init(&dev->gnd_fmablk_lock);
init_waitqueue_head(&dev->gnd_waitq);
init_waitqueue_head(&dev->gnd_dgram_waitq);
init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
- sema_init(&kgnilnd_data.kgn_quiesce_sem, 1);
+ mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
}
/* Serialize with shutdown. */
- down(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
LIBCFS_ALLOC(net, sizeof(*net));
if (net == NULL) {
/* we need a separate thread to call probe_wait_by_id until
* we get a function callback notifier from kgni */
- up(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
RETURN(0);
failed:
- up(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
kgnilnd_shutdown(ni);
RETURN(rc);
}
"init %d\n", kgnilnd_data.kgn_init);
/* Serialize with startup. */
- down(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
atomic_read(&libcfs_kmemory));
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
atomic_read(&libcfs_kmemory));
- up(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
EXIT;
}
int gnd_id; /* device id, also index in kgn_devices */
__u32 gnd_nid; /* ph host ID translated to NID */
struct list_head gnd_fma_buffs; /* list of FMA memory blocks */
- struct semaphore gnd_fmablk_sem; /* semaphore for FMA block memory alloc/free */
+ struct mutex gnd_fmablk_mutex; /* mutex for FMA block memory alloc/free */
spinlock_t gnd_fmablk_lock; /* lock for mbox alloc/release */
atomic_t gnd_nfmablk; /* # of fmablk live */
atomic_t gnd_fmablk_vers; /* gnd_fma_bufs stamp */
wait_queue_head_t kgn_ruhroh_waitq; /* ruhroh thread wakeup */
int kgn_quiesce_trigger; /* should we quiesce ? */
atomic_t kgn_nquiesce; /* how many quiesced ? */
- struct semaphore kgn_quiesce_sem; /* serialize ruhroh task, startup and shutdown */
+ struct mutex kgn_quiesce_mutex; /* serialize ruhroh task, startup and shutdown */
int kgn_needs_reset; /* we need stack reset */
/* These next three members implement communication from gnilnd into
gni_smsg_attr_t smsg_attr;
unsigned long fmablk_vers;
- /* we'll use fmablk_vers and the gnd_fmablk_sem to gate access
+ /* we'll use fmablk_vers and the gnd_fmablk_mutex to gate access
* to this allocation code. Everyone will sample the version
- * before and after getting the semaphore. If it has changed,
+ * before and after getting the mutex. If it has changed,
* we'll bail out to check the lists again - this indicates that
* some sort of change was made to the lists and it is possible
* that there is a mailbox for us to find now. This should prevent
* that need a yet-to-be-allocated mailbox for a connection. */
fmablk_vers = atomic_read(&device->gnd_fmablk_vers);
- down(&device->gnd_fmablk_sem);
+ mutex_lock(&device->gnd_fmablk_mutex);
if (fmablk_vers != atomic_read(&device->gnd_fmablk_vers)) {
/* version changed while we were waiting for semaphore,
* we'll recheck the lists assuming something nice happened */
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
return 0;
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
return 0;
free_desc:
LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
out:
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
return rc;
}
int rc = 0;
kgn_fma_memblock_t *fma_blk;
- /* use sem to gate access to single thread, just in case */
- down(&device->gnd_fmablk_sem);
+ /* use mutex to gate access to single thread, just in case */
+ mutex_lock(&device->gnd_fmablk_mutex);
spin_lock(&device->gnd_fmablk_lock);
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
RETURN(rc);
}
kgn_fma_memblock_t *fma_blk;
- /* use sem to gate access to single thread, just in case */
- down(&device->gnd_fmablk_sem);
+ /* use mutex to gate access to single thread, just in case */
+ mutex_lock(&device->gnd_fmablk_mutex);
spin_lock(&device->gnd_fmablk_lock);
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
}
void
kgn_fma_memblock_t *fma_blk, *fma_blkN;
- /* use sem to gate access to single thread, just in case */
- down(&device->gnd_fmablk_sem);
+ /* use mutex to gate access to single thread, just in case */
+ mutex_lock(&device->gnd_fmablk_mutex);
spin_lock(&device->gnd_fmablk_lock);
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
}
/* kgnilnd dgram nid->struct managment */
break;
/* Serialize with driver startup and shutdown. */
- down(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
CDEBUG(D_NET, "trigger %d reset %d to_bump %d pause %d\n",
kgnilnd_data.kgn_quiesce_trigger,
set_mb(kgnilnd_data.kgn_needs_reset, 0);
}
- up(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
}
kgnilnd_data.kgn_ruhroh_running = 0;
}
if (old_val != kgnilnd_sysctl.ksd_pause_trigger) {
- down(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
CDEBUG(D_NET, "setting quiesce_trigger %d\n", old_val);
kgnilnd_data.kgn_quiesce_trigger = kgnilnd_sysctl.ksd_pause_trigger;
kgnilnd_quiesce_wait("admin sysctl");
- up(&kgnilnd_data.kgn_quiesce_sem);
+ mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
}
RETURN(rc);
void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
-/* l_lock.c */
-struct lustre_lock {
- int l_depth;
- struct task_struct *l_owner;
- struct semaphore l_sem;
- spinlock_t l_spin;
-};
-
-void l_lock_init(struct lustre_lock *);
-void l_lock(struct lustre_lock *);
-void l_unlock(struct lustre_lock *);
-int l_has_lock(struct lustre_lock *);
-
/*
* For md echo client
*/
struct mdc_rpc_lock *cl_close_lock;
/* mgc datastruct */
- struct semaphore cl_mgc_sem;
+ struct mutex cl_mgc_mutex;
struct local_oid_storage *cl_mgc_los;
struct dt_object *cl_mgc_configs_dir;
atomic_t cl_mgc_refcount;
}
init_rwsem(&cli->cl_sem);
- sema_init(&cli->cl_mgc_sem, 1);
+ mutex_init(&cli->cl_mgc_mutex);
cli->cl_conn_count = 0;
memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
if (env == NULL)
RETURN(-ENOMEM);
- /* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- down(&cli->cl_mgc_sem);
+ /* The mgc fs exclusion mutex. Only one fs can be setup at a time. */
+ mutex_lock(&cli->cl_mgc_mutex);
cfs_cleanup_group_info();
if (rc < 0) {
local_oid_storage_fini(env, cli->cl_mgc_los);
cli->cl_mgc_los = NULL;
- up(&cli->cl_mgc_sem);
+ mutex_unlock(&cli->cl_mgc_mutex);
}
out_env:
lu_env_fini(env);
unlock:
class_decref(obd, "mgc_fs", obd);
- up(&cli->cl_mgc_sem);
+ mutex_unlock(&cli->cl_mgc_mutex);
RETURN(0);
}
CFS_INIT_LIST_HEAD(&dev->od_ost_map->om_seq_list);
rwlock_init(&dev->od_ost_map->om_seq_list_lock);
- sema_init(&dev->od_ost_map->om_dir_init_sem, 1);
+ mutex_init(&dev->od_ost_map->om_dir_init_mutex);
osd_push_ctxt(dev, &new, &save);
RETURN(osd_seq);
/* Serializing init process */
- down(&map->om_dir_init_sem);
+ mutex_lock(&map->om_dir_init_mutex);
/* Check whether the seq has been added */
read_lock(&map->om_seq_list_lock);
write_unlock(&map->om_seq_list_lock);
cleanup:
- up(&map->om_dir_init_sem);
+ mutex_unlock(&map->om_dir_init_mutex);
if (rc != 0) {
if (osd_seq != NULL)
OBD_FREE_PTR(osd_seq);
struct buffer_head *bh;
int err;
- LASSERT(down_trylock(&c->ic_idle_sem) != 0);
+ LASSERT(mutex_is_locked(&c->ic_idle_mutex));
if (blk == 0)
return NULL;
idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
c->ic_descr->id_root_gap +
sizeof(struct dx_countlimit));
- down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
if (bh != NULL && IS_ERR(bh))
result = PTR_ERR(bh);
else
c->ic_idle_bh = bh;
- up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
}
return result;
c->ic_object = inode;
init_rwsem(&c->ic_sem);
dynlock_init(&c->ic_tree_lock);
- sema_init(&c->ic_idle_sem, 1);
+ mutex_init(&c->ic_idle_mutex);
return 0;
}
EXPORT_SYMBOL(iam_container_init);
if (c->ic_idle_bh == NULL)
goto newblock;
- down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
if (unlikely(c->ic_idle_bh == NULL)) {
- up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
goto newblock;
}
if (*e != 0)
goto fail;
- up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
bh = ldiskfs_bread(NULL, inode, *b, 0, e);
if (bh == NULL)
return NULL;
}
c->ic_idle_bh = idle;
- up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
got:
/* get write access for the found buffer head */
return bh;
fail:
- up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
ldiskfs_std_error(inode->i_sb, *e);
return NULL;
}
int count;
int rc;
- down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
if (unlikely(c->ic_idle_failed)) {
rc = -EFAULT;
goto unlock;
rc = iam_txn_dirty(h, p, c->ic_idle_bh);
unlock:
- up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
if (rc != 0)
CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
*/
struct rw_semaphore ic_sem;
struct dynlock ic_tree_lock;
- /*
- * Protect ic_idle_bh
- */
- struct semaphore ic_idle_sem;
+ /* Protect ic_idle_bh */
+ struct mutex ic_idle_mutex;
/*
* BH for idle blocks
*/
#if defined(__KERNEL__)
+/* struct mutex */
+#include <linux/mutex.h>
/* struct rw_semaphore */
#include <linux/rwsem.h>
/* struct dentry */
rwlock_t om_seq_list_lock; /* lock for seq_list */
cfs_list_t om_seq_list; /* list head for seq */
int om_subdir_count;
- struct semaphore om_dir_init_sem;
+ struct mutex om_dir_init_mutex;
};
struct osd_mdobj {
if (new == NULL)
RETURN(NULL);
- sema_init(&new->qfs_sem, 1);
+ mutex_init(&new->qfs_mutex);
CFS_INIT_LIST_HEAD(&new->qfs_qsd_list);
strcpy(new->qfs_name, name);
new->qfs_ref = 1;
if (strchr(valstr, 'g'))
enabled |= 1 << GRPQUOTA;
- down(&qfs->qfs_sem);
+ mutex_lock(&qfs->qfs_mutex);
if (qfs->qfs_enabled[pool - LQUOTA_FIRST_RES] == enabled)
/* no change required */
GOTO(out, rc = 0);
}
}
out:
- up(&qfs->qfs_sem);
+ mutex_unlock(&qfs->qfs_mutex);
qsd_put_fsinfo(qfs);
RETURN(0);
}
/* list of all qsd_instance for this fs */
cfs_list_t qfs_qsd_list;
- struct semaphore qfs_sem;
+ struct mutex qfs_mutex;
/* link to the global quota fsinfo list. */
cfs_list_t qfs_link;
/* release per-filesystem information */
if (qsd->qsd_fsinfo != NULL) {
- down(&qsd->qsd_fsinfo->qfs_sem);
+ mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
/* remove from the list of fsinfo */
cfs_list_del_init(&qsd->qsd_link);
- up(&qsd->qsd_fsinfo->qfs_sem);
+ mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);
qsd_put_fsinfo(qsd->qsd_fsinfo);
qsd->qsd_fsinfo = NULL;
}
}
/* add in the list of lquota_fsinfo */
- down(&qsd->qsd_fsinfo->qfs_sem);
+ mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
list_add_tail(&qsd->qsd_link, &qsd->qsd_fsinfo->qfs_qsd_list);
- up(&qsd->qsd_fsinfo->qfs_sem);
+ mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);
/* register procfs directory */
qsd->qsd_proc = lprocfs_seq_register(QSD_DIR, osd_proc,