When declaring a local list head, instead of
struct list_head list;
INIT_LIST_HEAD(&list);
use
LIST_HEAD(list);
which does both steps.
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I67bda77c04479e9b2b8c84f02bfb86d9c2ef5671
Reviewed-on: https://review.whamcloud.com/36955
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Arshad Hussain <arshad.super@gmail.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
28 files changed:
* LDLM_PROCESS_ENQUEUE from ldlm_reprocess_queue.
*/
if ((mode == LCK_NL) && overlaps) {
* LDLM_PROCESS_ENQUEUE from ldlm_reprocess_queue.
*/
if ((mode == LCK_NL) && overlaps) {
- struct list_head rpc_list;
- INIT_LIST_HEAD(&rpc_list);
restart:
ldlm_reprocess_queue(res, &res->lr_waiting,
&rpc_list,
restart:
ldlm_reprocess_queue(res, &res->lr_waiting,
&rpc_list,
static void abort_req_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
static void abort_req_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- struct list_head abort_list;
- INIT_LIST_HEAD(&abort_list);
spin_lock(&obd->obd_recovery_task_lock);
list_splice_init(&obd->obd_req_replay_queue, &abort_list);
spin_unlock(&obd->obd_recovery_task_lock);
spin_lock(&obd->obd_recovery_task_lock);
list_splice_init(&obd->obd_req_replay_queue, &abort_list);
spin_unlock(&obd->obd_recovery_task_lock);
static void abort_lock_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
static void abort_lock_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- struct list_head abort_list;
- INIT_LIST_HEAD(&abort_list);
spin_lock(&obd->obd_recovery_task_lock);
list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
spin_unlock(&obd->obd_recovery_task_lock);
spin_lock(&obd->obd_recovery_task_lock);
list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
spin_unlock(&obd->obd_recovery_task_lock);
void target_cleanup_recovery(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
void target_cleanup_recovery(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- struct list_head clean_list;
- INIT_LIST_HEAD(&clean_list);
spin_lock(&obd->obd_dev_lock);
if (!obd->obd_recovering) {
spin_unlock(&obd->obd_dev_lock);
spin_lock(&obd->obd_dev_lock);
if (!obd->obd_recovering) {
spin_unlock(&obd->obd_dev_lock);
enum ldlm_process_intention intention,
struct ldlm_lock *hint)
{
enum ldlm_process_intention intention,
struct ldlm_lock *hint)
{
- struct list_head rpc_list;
#ifdef HAVE_SERVER_SUPPORT
ldlm_reprocessing_policy reprocess;
struct obd_device *obd;
#ifdef HAVE_SERVER_SUPPORT
ldlm_reprocessing_policy reprocess;
struct obd_device *obd;
- INIT_LIST_HEAD(&rpc_list);
/* Local lock trees don't get reprocessed. */
if (ns_is_client(ldlm_res_to_ns(res))) {
EXIT;
/* Local lock trees don't get reprocessed. */
if (ns_is_client(ldlm_res_to_ns(res))) {
EXIT;
- INIT_LIST_HEAD(&rpc_list);
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
struct ldlm_request *dlm_req,
struct ldlm_lock *lock)
{
struct ldlm_request *dlm_req,
struct ldlm_lock *lock)
{
- struct list_head ast_list;
LDLM_DEBUG(lock, "client completion callback handler START");
LDLM_DEBUG(lock, "client completion callback handler START");
- INIT_LIST_HEAD(&ast_list);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
long to = cfs_time_seconds(1);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
long to = cfs_time_seconds(1);
void ldlm_revoke_export_locks(struct obd_export *exp)
{
void ldlm_revoke_export_locks(struct obd_export *exp)
{
- struct list_head rpc_list;
- INIT_LIST_HEAD(&rpc_list);
cfs_hash_for_each_nolock(exp->exp_lock_hash,
ldlm_revoke_lock_cb, &rpc_list, 0);
ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
cfs_hash_for_each_nolock(exp->exp_lock_hash,
ldlm_revoke_lock_cb, &rpc_list, 0);
ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
LU_KEY_INIT_GENERIC(lfsck);
LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
LU_KEY_INIT_GENERIC(lfsck);
-static struct list_head lfsck_instance_list;
-static struct list_head lfsck_ost_orphan_list;
-static struct list_head lfsck_mdt_orphan_list;
+static LIST_HEAD(lfsck_instance_list);
+static LIST_HEAD(lfsck_ost_orphan_list);
+static LIST_HEAD(lfsck_mdt_orphan_list);
static DEFINE_SPINLOCK(lfsck_instance_lock);
const char *lfsck_flags_names[] = {
static DEFINE_SPINLOCK(lfsck_instance_lock);
const char *lfsck_flags_names[] = {
- INIT_LIST_HEAD(&lfsck_instance_list);
- INIT_LIST_HEAD(&lfsck_ost_orphan_list);
- INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
lfsck_key_init_generic(&lfsck_thread_key, NULL);
rc = lu_context_key_register(&lfsck_thread_key);
if (!rc) {
lfsck_key_init_generic(&lfsck_thread_key, NULL);
rc = lu_context_key_register(&lfsck_thread_key);
if (!rc) {
ltd->ltd_refcount--;
if (ltd->ltd_refcount == 0 && ltd->ltd_death_row) {
struct lod_tgt_desc *tgt_desc, *tmp;
ltd->ltd_refcount--;
if (ltd->ltd_refcount == 0 && ltd->ltd_death_row) {
struct lod_tgt_desc *tgt_desc, *tmp;
CDEBUG(D_CONFIG, "destroying %d ltd desc\n",
ltd->ltd_death_row);
CDEBUG(D_CONFIG, "destroying %d ltd desc\n",
ltd->ltd_death_row);
- INIT_LIST_HEAD(&kill);
-
ltd_foreach_tgt_safe(ltd, tgt_desc, tmp) {
LASSERT(tgt_desc);
if (!tgt_desc->ltd_reap)
ltd_foreach_tgt_safe(ltd, tgt_desc, tmp) {
LASSERT(tgt_desc);
if (!tgt_desc->ltd_reap)
static int mdt_export_cleanup(struct obd_export *exp)
{
static int mdt_export_cleanup(struct obd_export *exp)
{
- struct list_head closing_list;
+ LIST_HEAD(closing_list);
struct mdt_export_data *med = &exp->exp_mdt_data;
struct obd_device *obd = exp->exp_obd;
struct mdt_device *mdt;
struct mdt_export_data *med = &exp->exp_mdt_data;
struct obd_device *obd = exp->exp_obd;
struct mdt_device *mdt;
- INIT_LIST_HEAD(&closing_list);
spin_lock(&med->med_open_lock);
while (!list_empty(&med->med_open_head)) {
struct list_head *tmp = med->med_open_head.next;
spin_lock(&med->med_open_lock);
while (!list_empty(&med->med_open_head)) {
struct list_head *tmp = med->med_open_head.next;
enum ldlm_mode mode;
struct ldlm_lock *lock;
struct ldlm_glimpse_work *gl_work;
enum ldlm_mode mode;
struct ldlm_lock *lock;
struct ldlm_glimpse_work *gl_work;
- struct list_head gl_list;
gl_work->gl_lock = LDLM_LOCK_GET(lock);
/* The glimpse callback is sent to one single IO lock. As a result,
* the gl_work list is just composed of one element */
gl_work->gl_lock = LDLM_LOCK_GET(lock);
/* The glimpse callback is sent to one single IO lock. As a result,
* the gl_work list is just composed of one element */
- INIT_LIST_HEAD(&gl_list);
list_add_tail(&gl_work->gl_list, &gl_list);
/* There is actually no need for a glimpse descriptor when glimpsing
* IO locks */
list_add_tail(&gl_work->gl_list, &gl_list);
/* There is actually no need for a glimpse descriptor when glimpsing
* IO locks */
void class_disconnect_exports(struct obd_device *obd)
{
void class_disconnect_exports(struct obd_device *obd)
{
- struct list_head work_list;
ENTRY;
/* Move all of the exports from obd_exports to a work list, en masse. */
ENTRY;
/* Move all of the exports from obd_exports to a work list, en masse. */
- INIT_LIST_HEAD(&work_list);
spin_lock(&obd->obd_dev_lock);
list_splice_init(&obd->obd_exports, &work_list);
list_splice_init(&obd->obd_delayed_exports, &work_list);
spin_lock(&obd->obd_dev_lock);
list_splice_init(&obd->obd_exports, &work_list);
list_splice_init(&obd->obd_delayed_exports, &work_list);
void class_disconnect_stale_exports(struct obd_device *obd,
int (*test_export)(struct obd_export *))
{
void class_disconnect_stale_exports(struct obd_device *obd,
int (*test_export)(struct obd_export *))
{
- struct list_head work_list;
struct obd_export *exp, *n;
struct obd_export *exp, *n;
- int evicted = 0;
- ENTRY;
+ int evicted = 0;
+ ENTRY;
- INIT_LIST_HEAD(&work_list);
spin_lock(&obd->obd_dev_lock);
list_for_each_entry_safe(exp, n, &obd->obd_exports,
exp_obd_chain) {
spin_lock(&obd->obd_dev_lock);
list_for_each_entry_safe(exp, n, &obd->obd_exports,
exp_obd_chain) {
int rc;
char *kernbuf = NULL;
char *errmsg;
int rc;
char *kernbuf = NULL;
char *errmsg;
if (cfs_parse_nidlist(kernbuf, count, &tmp) <= 0) {
errmsg = "can't parse";
GOTO(failed, rc = -EINVAL);
if (cfs_parse_nidlist(kernbuf, count, &tmp) <= 0) {
errmsg = "can't parse";
GOTO(failed, rc = -EINVAL);
struct seq_file *m = file->private_data;
struct obd_device *obd = m->private;
struct nid_stat *client_stat;
struct seq_file *m = file->private_data;
struct obd_device *obd = m->private;
struct nid_stat *client_stat;
- struct list_head free_list;
- INIT_LIST_HEAD(&free_list);
cfs_hash_cond_del(obd->obd_nid_stats_hash,
lprocfs_nid_stats_clear_write_cb, &free_list);
cfs_hash_cond_del(obd->obd_nid_stats_hash,
lprocfs_nid_stats_clear_write_cb, &free_list);
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
wait_queue_head_t *wq;
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
wait_queue_head_t *wq;
- struct lu_site *site;
- struct lu_object *scan;
- struct list_head *layers;
- struct list_head splice;
+ struct lu_site *site;
+ struct lu_object *scan;
+ struct list_head *layers;
+ LIST_HEAD(splice);
site = o->lo_dev->ld_site;
layers = &o->lo_header->loh_layers;
site = o->lo_dev->ld_site;
layers = &o->lo_header->loh_layers;
* necessary, because lu_object_header is freed together with the
* top-level slice.
*/
* necessary, because lu_object_header is freed together with the
* top-level slice.
*/
- INIT_LIST_HEAD(&splice);
list_splice_init(layers, &splice);
while (!list_empty(&splice)) {
/*
list_splice_init(layers, &splice);
while (!list_empty(&splice)) {
/*
struct lu_object_header *h;
struct lu_object_header *temp;
struct lu_site_bkt_data *bkt;
struct lu_object_header *h;
struct lu_object_header *temp;
struct lu_site_bkt_data *bkt;
- struct list_head dispose;
int did_sth;
unsigned int start = 0;
int count;
int did_sth;
unsigned int start = 0;
int count;
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
RETURN(0);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
RETURN(0);
- INIT_LIST_HEAD(&dispose);
/*
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
/*
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
int class_del_uuid(const char *uuid)
{
struct uuid_nid_data *data;
int class_del_uuid(const char *uuid)
{
struct uuid_nid_data *data;
- struct list_head deathrow;
-
- INIT_LIST_HEAD(&deathrow);
spin_lock(&g_uuid_lock);
if (uuid != NULL) {
spin_lock(&g_uuid_lock);
if (uuid != NULL) {
*/
static int lmd_parse_nidlist(char *buf, char **endh)
{
*/
static int lmd_parse_nidlist(char *buf, char **endh)
{
- struct list_head nidlist;
char *endp = buf;
char tmp;
int rc = 0;
char *endp = buf;
char tmp;
int rc = 0;
tmp = *endp;
*endp = '\0';
tmp = *endp;
*endp = '\0';
- INIT_LIST_HEAD(&nidlist);
if (cfs_parse_nidlist(buf, strlen(buf), &nidlist) <= 0)
rc = 1;
cfs_free_nidlist(&nidlist);
if (cfs_parse_nidlist(buf, strlen(buf), &nidlist) <= 0)
rc = 1;
cfs_free_nidlist(&nidlist);
*/
void ofd_seqs_free(const struct lu_env *env, struct ofd_device *ofd)
{
*/
void ofd_seqs_free(const struct lu_env *env, struct ofd_device *ofd)
{
- struct ofd_seq *oseq;
- struct ofd_seq *tmp;
- struct list_head dispose;
+ struct ofd_seq *oseq;
+ struct ofd_seq *tmp;
+ LIST_HEAD(dispose);
- INIT_LIST_HEAD(&dispose);
write_lock(&ofd->ofd_seq_list_lock);
list_for_each_entry_safe(oseq, tmp, &ofd->ofd_seq_list, os_list)
list_move(&oseq->os_list, &dispose);
write_lock(&ofd->ofd_seq_list_lock);
list_for_each_entry_safe(oseq, tmp, &ofd->ofd_seq_list, os_list)
list_move(&oseq->os_list, &dispose);
struct osd_thread_info *info = osd_oti_get(env);
struct osd_obj_orphan *oor, *tmp;
struct osd_inode_id id;
struct osd_thread_info *info = osd_oti_get(env);
struct osd_obj_orphan *oor, *tmp;
struct osd_inode_id id;
struct inode *inode;
struct lu_fid fid;
handle_t *jh;
__u32 ino;
struct inode *inode;
struct lu_fid fid;
handle_t *jh;
__u32 ino;
- INIT_LIST_HEAD(&list);
-
spin_lock(&osd->od_osfs_lock);
list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
if (oor->oor_env == env)
spin_lock(&osd->od_osfs_lock);
list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
if (oor->oor_env == env)
struct osd_device *osd = osd_dt_dev(th->th_dev);
bool sync = (th->th_sync != 0);
struct osd_thandle *oh;
struct osd_device *osd = osd_dt_dev(th->th_dev);
bool sync = (th->th_sync != 0);
struct osd_thandle *oh;
- struct list_head unlinked;
uint64_t txg;
int rc;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
uint64_t txg;
int rc;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
- INIT_LIST_HEAD(&unlinked);
list_splice_init(&oh->ot_unlinked_list, &unlinked);
osd_oti_get(env)->oti_ins_cache_depth--;
list_splice_init(&oh->ot_unlinked_list, &unlinked);
osd_oti_get(env)->oti_ins_cache_depth--;
struct llog_ctxt *ctxt;
struct llog_handle *llh;
int *arr;
struct llog_ctxt *ctxt;
struct llog_handle *llh;
int *arr;
- struct list_head list, *le;
+ LIST_HEAD(list);
+ struct list_head *le;
struct llog_logid lgid;
int rc, i, count = 0, done = 0;
struct llog_logid lgid;
int rc, i, count = 0, done = 0;
llh = ctxt->loc_handle;
LASSERT(llh);
llh = ctxt->loc_handle;
LASSERT(llh);
spin_lock(&d->opd_sync_lock);
list_splice(&d->opd_sync_committed_there, &list);
INIT_LIST_HEAD(&d->opd_sync_committed_there);
spin_lock(&d->opd_sync_lock);
list_splice(&d->opd_sync_committed_there, &list);
INIT_LIST_HEAD(&d->opd_sync_committed_there);
- INIT_LIST_HEAD(&list);
-
spin_lock(&ou->ou_lock);
/* invalidate all of request in the sending list */
list_for_each_entry_safe(our, tmp, &ou->ou_list, our_list) {
spin_lock(&ou->ou_lock);
/* invalidate all of request in the sending list */
list_for_each_entry_safe(our, tmp, &ou->ou_list, our_list) {
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
- struct list_head comp_reqs;
int force_timer_recalc = 0;
ENTRY;
if (atomic_read(&set->set_remaining) == 0)
RETURN(1);
int force_timer_recalc = 0;
ENTRY;
if (atomic_read(&set->set_remaining) == 0)
RETURN(1);
- INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
struct cfs_hash *hs = head->th_cli_hash;
struct nrs_tbf_bucket *bkt;
int hw;
struct cfs_hash *hs = head->th_cli_hash;
struct nrs_tbf_bucket *bkt;
int hw;
- struct list_head zombies;
- INIT_LIST_HEAD(&zombies);
cfs_hash_bd_get(hs, &cli->tc_jobid, &bd);
bkt = cfs_hash_bd_extra_get(hs, &bd);
if (!cfs_hash_bd_dec_and_lock(hs, &bd, &cli->tc_ref))
cfs_hash_bd_get(hs, &cli->tc_jobid, &bd);
bkt = cfs_hash_bd_extra_get(hs, &bd);
if (!cfs_hash_bd_dec_and_lock(hs, &bd, &cli->tc_ref))
struct cfs_hash *hs = head->th_cli_hash;
struct nrs_tbf_bucket *bkt;
int hw;
struct cfs_hash *hs = head->th_cli_hash;
struct nrs_tbf_bucket *bkt;
int hw;
- struct list_head zombies;
- INIT_LIST_HEAD(&zombies);
cfs_hash_bd_get(hs, &cli->tc_key, &bd);
bkt = cfs_hash_bd_extra_get(hs, &bd);
if (!cfs_hash_bd_dec_and_lock(hs, &bd, &cli->tc_ref))
cfs_hash_bd_get(hs, &cli->tc_key, &bd);
bkt = cfs_hash_bd_extra_get(hs, &bd);
if (!cfs_hash_bd_dec_and_lock(hs, &bd, &cli->tc_ref))
static int pet_refcount;
static int pet_state;
static wait_queue_head_t pet_waitq;
static int pet_refcount;
static int pet_state;
static wait_queue_head_t pet_waitq;
-static struct list_head pet_list;
+static LIST_HEAD(pet_list);
static DEFINE_SPINLOCK(pet_lock);
int ping_evictor_wake(struct obd_export *exp)
static DEFINE_SPINLOCK(pet_lock);
int ping_evictor_wake(struct obd_export *exp)
if (++pet_refcount > 1)
return;
if (++pet_refcount > 1)
return;
- INIT_LIST_HEAD(&pet_list);
init_waitqueue_head(&pet_waitq);
task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
init_waitqueue_head(&pet_waitq);
task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
#if RS_DEBUG
spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
#if RS_DEBUG
spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
- INIT_LIST_HEAD(&ptlrpc_all_services);
mutex_init(&ptlrpc_all_services_mutex);
mutex_init(&pinger_mutex);
mutex_init(&ptlrpcd_mutex);
mutex_init(&ptlrpc_all_services_mutex);
mutex_init(&pinger_mutex);
mutex_init(&ptlrpcd_mutex);
};
static struct mutex sptlrpc_conf_lock;
};
static struct mutex sptlrpc_conf_lock;
-static struct list_head sptlrpc_confs;
+static LIST_HEAD(sptlrpc_confs);
static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
{
static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
{
int sptlrpc_conf_init(void)
{
int sptlrpc_conf_init(void)
{
- INIT_LIST_HEAD(&sptlrpc_confs);
mutex_init(&sptlrpc_conf_lock);
return 0;
}
mutex_init(&sptlrpc_conf_lock);
return 0;
}
static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
/** Holds a list of all PTLRPC services */
static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
/** Holds a list of all PTLRPC services */
-struct list_head ptlrpc_all_services;
+LIST_HEAD(ptlrpc_all_services);
/** Used to protect the \e ptlrpc_all_services list */
struct mutex ptlrpc_all_services_mutex;
/** Used to protect the \e ptlrpc_all_services list */
struct mutex ptlrpc_all_services_mutex;
{
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq, *n;
{
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq, *n;
- struct list_head work_list;
__u32 index, count;
time64_t deadline;
time64_t now = ktime_get_real_seconds();
__u32 index, count;
time64_t deadline;
time64_t now = ktime_get_real_seconds();
* We're close to a timeout, and we don't know how much longer the
* server will take. Send early replies to everyone expiring soon.
*/
* We're close to a timeout, and we don't know how much longer the
* server will take. Send early replies to everyone expiring soon.
*/
- INIT_LIST_HEAD(&work_list);
deadline = -1;
div_u64_rem(array->paa_deadline, array->paa_size, &index);
count = array->paa_count;
deadline = -1;
div_u64_rem(array->paa_deadline, array->paa_size, &index);
count = array->paa_count;
{
struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg;
struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
{
struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg;
struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
- struct list_head replies;
struct lu_env *env;
int rc;
struct lu_env *env;
int rc;
if (env == NULL)
RETURN(-ENOMEM);
if (env == NULL)
RETURN(-ENOMEM);
- INIT_LIST_HEAD(&replies);
unshare_fs_struct();
rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
unshare_fs_struct();
rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_thread *thread;
static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_thread *thread;
- struct list_head zombie;
ENTRY;
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
ENTRY;
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
- INIT_LIST_HEAD(&zombie);
spin_lock(&svcpt->scp_lock);
/* let the thread know that we would like it to stop asap */
list_for_each_entry(thread, &svcpt->scp_threads, t_link)
spin_lock(&svcpt->scp_lock);
/* let the thread know that we would like it to stop asap */
list_for_each_entry(thread, &svcpt->scp_threads, t_link)
{
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
{
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct list_head queue;
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
int qtype, rc = 0;
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
int qtype, rc = 0;
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
- INIT_LIST_HEAD(&queue);
while (1) {
wait_event_idle_timeout(
thread->t_ctl_waitq,
while (1) {
wait_event_idle_timeout(
thread->t_ctl_waitq,
struct lu_env env;
struct ptlrpc_request *req;
__u32 start_epoch;
struct lu_env env;
struct ptlrpc_request *req;
__u32 start_epoch;
- struct list_head client_list;
+ LIST_HEAD(client_list);
int rc;
if (tgt->lut_obd->obd_stopping)
int rc;
if (tgt->lut_obd->obd_stopping)
tgt->lut_lsd.lsd_start_epoch = start_epoch;
spin_unlock(&tgt->lut_translock);
tgt->lut_lsd.lsd_start_epoch = start_epoch;
spin_unlock(&tgt->lut_translock);
- INIT_LIST_HEAD(&client_list);
/**
* The recovery is not yet finished and final queue can still be updated
* with resend requests. Move final list to separate one for processing
/**
* The recovery is not yet finished and final queue can still be updated
* with resend requests. Move final list to separate one for processing
struct lu_target *lut = tdtd->tdtd_lut;
struct ptlrpc_thread *thread = &lut->lut_tdtd_commit_thread;
struct lu_env env;
struct lu_target *lut = tdtd->tdtd_lut;
struct ptlrpc_thread *thread = &lut->lut_tdtd_commit_thread;
struct lu_env env;
int rc;
struct top_multiple_thandle *tmt;
struct top_multiple_thandle *tmp;
int rc;
struct top_multiple_thandle *tmt;
struct top_multiple_thandle *tmp;
thread->t_flags = SVC_RUNNING;
spin_unlock(&tdtd->tdtd_batchid_lock);
wake_up(&thread->t_ctl_waitq);
thread->t_flags = SVC_RUNNING;
spin_unlock(&tdtd->tdtd_batchid_lock);
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_HA, "%s: start commit thread committed batchid %llu\n",
tdtd->tdtd_lut->lut_obd->obd_name,
CDEBUG(D_HA, "%s: start commit thread committed batchid %llu\n",
tdtd->tdtd_lut->lut_obd->obd_name,