Whamcloud - gitweb
LU-13090 utils: fix lfs_migrate -p for file with pool
[fs/lustre-release.git] / libcfs / libcfs / workitem.c
index 1a4ca46..7768e5c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -46,7 +42,7 @@
 
 #define CFS_WS_NAME_LEN         16
 
-typedef struct cfs_wi_sched {
+struct cfs_wi_sched {
        struct list_head                ws_list;        /* chain on global list */
        /** serialised workitems */
        spinlock_t                      ws_lock;
@@ -74,45 +70,33 @@ typedef struct cfs_wi_sched {
        unsigned int            ws_starting:1;
        /** scheduler name */
        char                    ws_name[CFS_WS_NAME_LEN];
-} cfs_wi_sched_t;
+};
 
 static struct cfs_workitem_data {
        /** serialize */
        spinlock_t              wi_glock;
        /** list of all schedulers */
-       struct list_head                wi_scheds;
+       struct list_head        wi_scheds;
        /** WI module is initialized */
        int                     wi_init;
        /** shutting down the whole WI module */
        int                     wi_stopping;
 } cfs_wi_data;
 
-static inline void
-cfs_wi_sched_lock(cfs_wi_sched_t *sched)
-{
-       spin_lock(&sched->ws_lock);
-}
-
-static inline void
-cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
-{
-       spin_unlock(&sched->ws_lock);
-}
-
 static inline int
-cfs_wi_sched_cansleep(cfs_wi_sched_t *sched)
+cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
 {
-       cfs_wi_sched_lock(sched);
+       spin_lock(&sched->ws_lock);
        if (sched->ws_stopping) {
-               cfs_wi_sched_unlock(sched);
+               spin_unlock(&sched->ws_lock);
                return 0;
        }
 
        if (!list_empty(&sched->ws_runq)) {
-               cfs_wi_sched_unlock(sched);
+               spin_unlock(&sched->ws_lock);
                return 0;
        }
-       cfs_wi_sched_unlock(sched);
+       spin_unlock(&sched->ws_lock);
        return 1;
 }
 
@@ -121,12 +105,12 @@ cfs_wi_sched_cansleep(cfs_wi_sched_t *sched)
  * 1. when it returns no one shall try to schedule the workitem.
  */
 void
-cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
 {
        LASSERT(!in_interrupt()); /* because we use plain spinlock */
        LASSERT(!sched->ws_stopping);
 
-       cfs_wi_sched_lock(sched);
+       spin_lock(&sched->ws_lock);
 
        LASSERT(wi->wi_running);
 
@@ -141,9 +125,7 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
        LASSERT(list_empty(&wi->wi_list));
 
        wi->wi_scheduled = 1; /* LBUG future schedule attempts */
-       cfs_wi_sched_unlock(sched);
-
-       return;
+       spin_unlock(&sched->ws_lock);
 }
 EXPORT_SYMBOL(cfs_wi_exit);
 
@@ -151,7 +133,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
  * cancel schedule request of workitem \a wi
  */
 int
-cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
 {
        int     rc;
 
@@ -163,7 +145,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
          * means the workitem will not be scheduled and will not have
          * any race with wi_action.
          */
-       cfs_wi_sched_lock(sched);
+       spin_lock(&sched->ws_lock);
 
        rc = !(wi->wi_running);
 
@@ -179,7 +161,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 
        LASSERT (list_empty(&wi->wi_list));
 
-       cfs_wi_sched_unlock(sched);
+       spin_unlock(&sched->ws_lock);
        return rc;
 }
 EXPORT_SYMBOL(cfs_wi_deschedule);
@@ -192,12 +174,12 @@ EXPORT_SYMBOL(cfs_wi_deschedule);
  * be added, and even dynamic creation of serialised queues might be supported.
  */
 void
-cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
 {
        LASSERT(!in_interrupt()); /* because we use plain spinlock */
        LASSERT(!sched->ws_stopping);
 
-       cfs_wi_sched_lock(sched);
+       spin_lock(&sched->ws_lock);
 
        if (!wi->wi_scheduled) {
                LASSERT (list_empty(&wi->wi_list));
@@ -213,22 +195,21 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
        }
 
        LASSERT (!list_empty(&wi->wi_list));
-       cfs_wi_sched_unlock(sched);
-       return;
+       spin_unlock(&sched->ws_lock);
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
 
 static int
-cfs_wi_scheduler (void *arg)
+cfs_wi_scheduler(void *arg)
 {
-       struct cfs_wi_sched     *sched = (cfs_wi_sched_t *)arg;
+       struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
 
        cfs_block_allsigs();
 
        /* CPT affinity scheduler? */
        if (sched->ws_cptab != NULL)
                if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
-                       CWARN("Failed to bind %s on CPT %d\n",
+                       CWARN("Unable to bind %s on CPU partition %d\n",
                                sched->ws_name, sched->ws_cpt);
 
        spin_lock(&cfs_wi_data.wi_glock);
@@ -239,17 +220,17 @@ cfs_wi_scheduler (void *arg)
 
        spin_unlock(&cfs_wi_data.wi_glock);
 
-       cfs_wi_sched_lock(sched);
+       spin_lock(&sched->ws_lock);
 
        while (!sched->ws_stopping) {
                int             nloops = 0;
                int             rc;
-               cfs_workitem_t *wi;
+               struct cfs_workitem *wi;
 
                while (!list_empty(&sched->ws_runq) &&
                       nloops < CFS_WI_RESCHED) {
                        wi = list_entry(sched->ws_runq.next,
-                                           cfs_workitem_t, wi_list);
+                                       struct cfs_workitem, wi_list);
                        LASSERT(wi->wi_scheduled && !wi->wi_running);
 
                        list_del_init(&wi->wi_list);
@@ -260,13 +241,12 @@ cfs_wi_scheduler (void *arg)
                         wi->wi_running   = 1;
                         wi->wi_scheduled = 0;
 
-
-                        cfs_wi_sched_unlock(sched);
+                       spin_unlock(&sched->ws_lock);
                         nloops++;
 
                         rc = (*wi->wi_action) (wi);
 
-                        cfs_wi_sched_lock(sched);
+                       spin_lock(&sched->ws_lock);
                         if (rc != 0) /* WI should be dead, even be freed! */
                                 continue;
 
@@ -281,21 +261,21 @@ cfs_wi_scheduler (void *arg)
                 }
 
                if (!list_empty(&sched->ws_runq)) {
-                       cfs_wi_sched_unlock(sched);
+                       spin_unlock(&sched->ws_lock);
                        /* don't sleep because some workitems still
                         * expect me to come back soon */
                        cond_resched();
-                       cfs_wi_sched_lock(sched);
+                       spin_lock(&sched->ws_lock);
                        continue;
                }
 
-               cfs_wi_sched_unlock(sched);
+               spin_unlock(&sched->ws_lock);
                rc = wait_event_interruptible_exclusive(sched->ws_waitq,
                                !cfs_wi_sched_cansleep(sched));
-               cfs_wi_sched_lock(sched);
+               spin_lock(&sched->ws_lock);
         }
 
-        cfs_wi_sched_unlock(sched);
+       spin_unlock(&sched->ws_lock);
 
        spin_lock(&cfs_wi_data.wi_glock);
        sched->ws_nthreads--;
@@ -330,10 +310,9 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
                int i = 2;
 
                while (sched->ws_nthreads > 0) {
-                       CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
-                              "waiting for %d threads of WI sched[%s] to "
-                              "terminate\n", sched->ws_nthreads,
-                              sched->ws_name);
+                       CDEBUG(is_power_of_2(++i / 20) ? D_WARNING : D_NET,
+                              "waiting %us for %d %s worker threads to exit\n",
+                              i / 20, sched->ws_nthreads, sched->ws_name);
 
                        spin_unlock(&cfs_wi_data.wi_glock);
                        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -438,7 +417,7 @@ EXPORT_SYMBOL(cfs_wi_sched_create);
 int
 cfs_wi_startup(void)
 {
-       memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
+       memset(&cfs_wi_data, 0, sizeof(struct cfs_workitem_data));
 
        spin_lock_init(&cfs_wi_data.wi_glock);
        INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);