* GPL HEADER END
*/
/*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
* Author: Niu Yawei <yawei.niu@intel.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-
#define DEBUG_SUBSYSTEM S_LQUOTA
#include "qsd_internal.h"
-extern cfs_mem_cache_t *upd_kmem;
+extern struct kmem_cache *upd_kmem;
/*
* Allocate and fill an qsd_upd_rec structure to be processed by the writeback
{
struct qsd_upd_rec *upd;
- OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, __GFP_IO);
if (upd == NULL) {
CERROR("Failed to allocate upd");
return NULL;
/* wake up the upd thread */
cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
} else {
- CWARN("%s: discard deferred update.\n", qsd->qsd_svname);
+ CWARN("%s: discard update.\n", qsd->qsd_svname);
if (upd->qur_lqe)
- LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+ LQUOTA_WARN(upd->qur_lqe, "discard update.");
qsd_upd_free(upd);
}
}
/* must hold the qsd_lock */
-static void qsd_add_deferred(cfs_list_t *list, struct qsd_upd_rec *upd)
+static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
+ struct qsd_upd_rec *upd)
{
struct qsd_upd_rec *tmp, *n;
+ if (qsd->qsd_stopping) {
+ CWARN("%s: discard deferred udpate.\n", qsd->qsd_svname);
+ if (upd->qur_lqe)
+ LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+ qsd_upd_free(upd);
+ return;
+ }
+
/* Sort the updates in ascending order */
cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
* \param qid - quota id
* \param rec - global or slave record to be updated to disk
* \param ver - new index file version
- * \param global- ture : master record; false : slave record
+ * \param global- true: master record; false: slave record
*/
void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
union lquota_id *qid, union lquota_rec *rec, __u64 ver,
* the reintegration is in progress. Defer the update. */
cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
&qqi->qqi_deferred_slv;
- qsd_add_deferred(list, upd);
+ qsd_add_deferred(qsd, list, upd);
}
write_unlock(&qsd->qsd_lock);
struct qsd_instance *qsd = lqe2qqi(lqe)->qqi_qsd;
bool added = false;
+ read_lock(&qsd->qsd_lock);
+ if (qsd->qsd_stopping) {
+ read_unlock(&qsd->qsd_lock);
+ return;
+ }
+ read_unlock(&qsd->qsd_lock);
+
lqe_getref(lqe);
spin_lock(&qsd->qsd_adjust_lock);
struct l_wait_info lwi;
cfs_list_t queue;
struct qsd_upd_rec *upd, *n;
- char pname[MTI_NAME_MAXLEN];
struct lu_env *env;
int qtype, rc = 0;
bool uptodate;
RETURN(rc);
}
- snprintf(pname, MTI_NAME_MAXLEN, "lquota_wb_%s", qsd->qsd_svname);
- cfs_daemonize(pname);
-
thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
struct l_wait_info lwi = { 0 };
- int rc;
+ cfs_task_t *task;
ENTRY;
- rc = cfs_create_thread(qsd_upd_thread, (void *)qsd, 0);
- if (rc < 0) {
- CERROR("Fail to start quota update thread. rc: %d\n", rc);
+ task = kthread_run(qsd_upd_thread, (void *)qsd,
+ "lquota_wb_%s", qsd->qsd_svname);
+ if (IS_ERR(task)) {
+ CERROR("Fail to start quota update thread. rc: %ld\n",
+ PTR_ERR(task));
thread_set_flags(thread, SVC_STOPPED);
- RETURN(rc);
+ RETURN(PTR_ERR(task));
}
l_wait_event(thread->t_ctl_waitq,