Whamcloud - gitweb
LU-3963 libcfs: convert DT objects atomic primitives 76/7076/7
authorPeng Tao <tao.peng@emc.com>
Tue, 17 Dec 2013 19:55:13 +0000 (14:55 -0500)
committerOleg Drokin <oleg.drokin@intel.com>
Mon, 13 Jan 2014 19:19:25 +0000 (19:19 +0000)
This patch convers all cfs_atomic primitives in
ofd, osc, osd-ldiskfs and osd-zfs to the linux
atomic api.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: I235cd45503115a936cf502e5469daf806cf16078
Reviewed-on: http://review.whamcloud.com/7076
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Yang Sheng <yang.sheng@intel.com>
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
16 files changed:
lustre/ofd/ofd_dev.c
lustre/ofd/ofd_fs.c
lustre/ofd/ofd_internal.h
lustre/osc/lproc_osc.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_object.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_io.c
lustre/osd-zfs/osd_handler.c
lustre/osd-zfs/osd_internal.h
lustre/osd-zfs/osd_io.c

index 21b124c..cb58b99 100644 (file)
@@ -2140,7 +2140,7 @@ static void ofd_fini(const struct lu_env *env, struct ofd_device *m)
 
        ofd_stack_fini(env, m, &m->ofd_dt_dev.dd_lu_dev);
        ofd_procfs_fini(m);
-       LASSERT(cfs_atomic_read(&d->ld_ref) == 0);
+       LASSERT(atomic_read(&d->ld_ref) == 0);
        server_put_mount(obd->obd_name);
        EXIT;
 }
index 8a2a8a5..bba1618 100644 (file)
@@ -86,7 +86,7 @@ struct ofd_seq *ofd_seq_get(struct ofd_device *ofd, obd_seq seq)
        read_lock(&ofd->ofd_seq_list_lock);
        cfs_list_for_each_entry(oseq, &ofd->ofd_seq_list, os_list) {
                if (ostid_seq(&oseq->os_oi) == seq) {
-                       cfs_atomic_inc(&oseq->os_refc);
+                       atomic_inc(&oseq->os_refc);
                        read_unlock(&ofd->ofd_seq_list_lock);
                        return oseq;
                }
@@ -106,7 +106,7 @@ static void ofd_seq_destroy(const struct lu_env *env,
 
 void ofd_seq_put(const struct lu_env *env, struct ofd_seq *oseq)
 {
-       if (cfs_atomic_dec_and_test(&oseq->os_refc))
+       if (atomic_dec_and_test(&oseq->os_refc))
                ofd_seq_destroy(env, oseq);
 }
 
@@ -133,14 +133,14 @@ static struct ofd_seq *ofd_seq_add(const struct lu_env *env,
        write_lock(&ofd->ofd_seq_list_lock);
        cfs_list_for_each_entry(os, &ofd->ofd_seq_list, os_list) {
                if (ostid_seq(&os->os_oi) == ostid_seq(&new_seq->os_oi)) {
-                       cfs_atomic_inc(&os->os_refc);
+                       atomic_inc(&os->os_refc);
                        write_unlock(&ofd->ofd_seq_list_lock);
                        /* The seq has not been added to the list */
                        ofd_seq_put(env, new_seq);
                        return os;
                }
        }
-       cfs_atomic_inc(&new_seq->os_refc);
+       atomic_inc(&new_seq->os_refc);
        cfs_list_add_tail(&new_seq->os_list, &ofd->ofd_seq_list);
        ofd->ofd_seq_count++;
        write_unlock(&ofd->ofd_seq_list_lock);
@@ -299,7 +299,7 @@ struct ofd_seq *ofd_seq_load(const struct lu_env *env, struct ofd_device *ofd,
        spin_lock_init(&oseq->os_last_oid_lock);
        ostid_set_seq(&oseq->os_oi, seq);
 
-       cfs_atomic_set(&oseq->os_refc, 1);
+       atomic_set(&oseq->os_refc, 1);
 
        rc = dt_attr_get(env, dob, &info->fti_attr, BYPASS_CAPA);
        if (rc)
index 9285a1f..394efc9 100644 (file)
@@ -113,7 +113,7 @@ struct ofd_seq {
        struct ost_id           os_oi;
        spinlock_t              os_last_oid_lock;
        struct mutex            os_create_lock;
-       cfs_atomic_t            os_refc;
+       atomic_t                os_refc;
        struct dt_object        *os_lastid_obj;
        unsigned long           os_destroys_in_progress:1;
 };
index 06bd4c9..d6e3703 100644 (file)
@@ -169,9 +169,9 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
        rc = seq_printf(m,
                      "used_mb: %d\n"
                      "busy_cnt: %d\n",
-                     (cfs_atomic_read(&cli->cl_lru_in_list) +
-                       cfs_atomic_read(&cli->cl_lru_busy)) >> shift,
-                     cfs_atomic_read(&cli->cl_lru_busy));
+                     (atomic_read(&cli->cl_lru_in_list) +
+                       atomic_read(&cli->cl_lru_busy)) >> shift,
+                     atomic_read(&cli->cl_lru_busy));
 
        return rc;
 }
@@ -194,7 +194,7 @@ osc_cached_mb_seq_write(struct file *file, const char *buffer,
        if (pages_number < 0)
                return -ERANGE;
 
-       rc = cfs_atomic_read(&cli->cl_lru_in_list) - pages_number;
+       rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
        if (rc > 0) {
                struct lu_env *env;
                int refcheck;
@@ -401,7 +401,7 @@ static int osc_resend_count_seq_show(struct seq_file *m, void *v)
 {
        struct obd_device *obd = m->private;
 
-       return seq_printf(m, "%u\n", cfs_atomic_read(&obd->u.cli.cl_resends));
+       return seq_printf(m, "%u\n", atomic_read(&obd->u.cli.cl_resends));
 }
 
 static ssize_t osc_resend_count_seq_write(struct file *file, const char *buffer,
@@ -410,16 +410,16 @@ static ssize_t osc_resend_count_seq_write(struct file *file, const char *buffer,
        struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
        int val, rc;
 
-        rc = lprocfs_write_helper(buffer, count, &val);
-        if (rc)
-                return rc;
+       rc = lprocfs_write_helper(buffer, count, &val);
+       if (rc)
+               return rc;
 
-        if (val < 0)
-               return -EINVAL;
+       if (val < 0)
+               return -EINVAL;
 
-        cfs_atomic_set(&obd->u.cli.cl_resends, val);
+       atomic_set(&obd->u.cli.cl_resends, val);
 
-        return count;
+       return count;
 }
 LPROC_SEQ_FOPS(osc_resend_count);
 
@@ -464,7 +464,7 @@ static int osc_destroys_in_flight_seq_show(struct seq_file *m, void *v)
 {
        struct obd_device *obd = m->private;
        return seq_printf(m, "%u\n",
-                         cfs_atomic_read(&obd->u.cli.cl_destroy_in_flight));
+                         atomic_read(&obd->u.cli.cl_destroy_in_flight));
 }
 LPROC_SEQ_FOPS_RO(osc_destroys_in_flight);
 
@@ -515,7 +515,7 @@ static int osc_unstable_stats_seq_show(struct seq_file *m, void *v)
        struct client_obd *cli = &dev->u.cli;
        int pages, mb;
 
-       pages = cfs_atomic_read(&cli->cl_unstable_count);
+       pages = atomic_read(&cli->cl_unstable_count);
        mb    = (pages * PAGE_CACHE_SIZE) >> 20;
 
        return seq_printf(m, "unstable_pages: %8d\n"
@@ -581,56 +581,57 @@ struct lprocfs_seq_vars lprocfs_osc_obd_vars[] = {
 
 static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
 {
-        struct timeval now;
-        struct obd_device *dev = seq->private;
-        struct client_obd *cli = &dev->u.cli;
-        unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
+       struct timeval now;
+       struct obd_device *dev = seq->private;
+       struct client_obd *cli = &dev->u.cli;
+       unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
        int i;
 
        do_gettimeofday(&now);
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
 
-        seq_printf(seq, "snapshot_time:         %lu.%lu (secs.usecs)\n",
-                   now.tv_sec, now.tv_usec);
-        seq_printf(seq, "read RPCs in flight:  %d\n",
-                   cli->cl_r_in_flight);
-        seq_printf(seq, "write RPCs in flight: %d\n",
-                   cli->cl_w_in_flight);
-        seq_printf(seq, "pending write pages:  %d\n",
-                  cfs_atomic_read(&cli->cl_pending_w_pages));
-        seq_printf(seq, "pending read pages:   %d\n",
-                  cfs_atomic_read(&cli->cl_pending_r_pages));
-
-        seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
-        seq_printf(seq, "pages per rpc         rpcs   %% cum %% |");
-        seq_printf(seq, "       rpcs   %% cum %%\n");
-
-        read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
-        write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
-
-        read_cum = 0;
-        write_cum = 0;
-        for (i = 0; i < OBD_HIST_MAX; i++) {
-                unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
-                unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
-                read_cum += r;
-                write_cum += w;
-                seq_printf(seq, "%d:\t\t%10lu %3lu %3lu   | %10lu %3lu %3lu\n",
-                                 1 << i, r, pct(r, read_tot),
-                                 pct(read_cum, read_tot), w,
-                                 pct(w, write_tot),
-                                 pct(write_cum, write_tot));
-                if (read_cum == read_tot && write_cum == write_tot)
-                        break;
-        }
+       seq_printf(seq, "snapshot_time:         %lu.%lu (secs.usecs)\n",
+                  now.tv_sec, now.tv_usec);
+       seq_printf(seq, "read RPCs in flight:  %d\n",
+                  cli->cl_r_in_flight);
+       seq_printf(seq, "write RPCs in flight: %d\n",
+                  cli->cl_w_in_flight);
+       seq_printf(seq, "pending write pages:  %d\n",
+                  atomic_read(&cli->cl_pending_w_pages));
+       seq_printf(seq, "pending read pages:   %d\n",
+                  atomic_read(&cli->cl_pending_r_pages));
+
+       seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+       seq_printf(seq, "pages per rpc         rpcs   %% cum %% |");
+       seq_printf(seq, "       rpcs   %% cum %%\n");
+
+       read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
+       write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
+
+       read_cum = 0;
+       write_cum = 0;
+       for (i = 0; i < OBD_HIST_MAX; i++) {
+               unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
+               unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
+
+               read_cum += r;
+               write_cum += w;
+               seq_printf(seq, "%d:\t\t%10lu %3lu %3lu   | %10lu %3lu %3lu\n",
+                          1 << i, r, pct(r, read_tot),
+                          pct(read_cum, read_tot), w,
+                          pct(w, write_tot),
+                          pct(write_cum, write_tot));
+               if (read_cum == read_tot && write_cum == write_tot)
+                       break;
+       }
 
-        seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
-        seq_printf(seq, "rpcs in flight        rpcs   %% cum %% |");
-        seq_printf(seq, "       rpcs   %% cum %%\n");
+       seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+       seq_printf(seq, "rpcs in flight        rpcs   %% cum %% |");
+       seq_printf(seq, "       rpcs   %% cum %%\n");
 
-        read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
-        write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
+       read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
+       write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
 
         read_cum = 0;
         write_cum = 0;
index e6b111b..6887309 100644 (file)
@@ -102,28 +102,28 @@ static inline char list_empty_marker(cfs_list_t *list)
 static const char *oes_strings[] = {
        "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
 
-#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do {                          \
-       struct osc_extent *__ext = (extent);                                  \
-       char __buf[16];                                                       \
-                                                                             \
-       CDEBUG(lvl,                                                           \
-               "extent %p@{" EXTSTR ", "                                     \
-               "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt,          \
-               /* ----- extent part 0 ----- */                               \
-               __ext, EXTPARA(__ext),                                        \
-               /* ----- part 1 ----- */                                      \
-               cfs_atomic_read(&__ext->oe_refc),                             \
-               cfs_atomic_read(&__ext->oe_users),                            \
-               list_empty_marker(&__ext->oe_link),                           \
-               oes_strings[__ext->oe_state], ext_flags(__ext, __buf),        \
-               __ext->oe_obj,                                                \
-               /* ----- part 2 ----- */                                      \
-               __ext->oe_grants, __ext->oe_nr_pages,                         \
-               list_empty_marker(&__ext->oe_pages),                          \
-               waitqueue_active(&__ext->oe_waitq) ? '+' : '-',               \
-               __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner,           \
-               /* ----- part 4 ----- */                                      \
-               ## __VA_ARGS__);                                              \
+#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do {                    \
+       struct osc_extent *__ext = (extent);                            \
+       char __buf[16];                                                 \
+                                                                       \
+       CDEBUG(lvl,                                                     \
+               "extent %p@{" EXTSTR ", "                               \
+               "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt,    \
+               /* ----- extent part 0 ----- */                         \
+               __ext, EXTPARA(__ext),                                  \
+               /* ----- part 1 ----- */                                \
+               atomic_read(&__ext->oe_refc),                           \
+               atomic_read(&__ext->oe_users),                          \
+               list_empty_marker(&__ext->oe_link),                     \
+               oes_strings[__ext->oe_state], ext_flags(__ext, __buf),  \
+               __ext->oe_obj,                                          \
+               /* ----- part 2 ----- */                                \
+               __ext->oe_grants, __ext->oe_nr_pages,                   \
+               list_empty_marker(&__ext->oe_pages),                    \
+               waitqueue_active(&__ext->oe_waitq) ? '+' : '-',         \
+               __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner,     \
+               /* ----- part 4 ----- */                                \
+               ## __VA_ARGS__);                                        \
 } while (0)
 
 #undef EASSERTF
@@ -184,10 +184,10 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
        if (ext->oe_state >= OES_STATE_MAX)
                GOTO(out, rc = 10);
 
-       if (cfs_atomic_read(&ext->oe_refc) <= 0)
+       if (atomic_read(&ext->oe_refc) <= 0)
                GOTO(out, rc = 20);
 
-       if (cfs_atomic_read(&ext->oe_refc) < cfs_atomic_read(&ext->oe_users))
+       if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
                GOTO(out, rc = 30);
 
        switch (ext->oe_state) {
@@ -197,7 +197,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
                GOTO(out, rc = 0);
                break;
        case OES_ACTIVE:
-               if (cfs_atomic_read(&ext->oe_users) == 0)
+               if (atomic_read(&ext->oe_users) == 0)
                        GOTO(out, rc = 40);
                if (ext->oe_hp)
                        GOTO(out, rc = 50);
@@ -210,7 +210,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
                if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
                        GOTO(out, rc = 65);
        default:
-               if (cfs_atomic_read(&ext->oe_users) > 0)
+               if (atomic_read(&ext->oe_users) > 0)
                        GOTO(out, rc = 70);
        }
 
@@ -315,8 +315,8 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
 
        RB_CLEAR_NODE(&ext->oe_node);
        ext->oe_obj = obj;
-       cfs_atomic_set(&ext->oe_refc, 1);
-       cfs_atomic_set(&ext->oe_users, 0);
+       atomic_set(&ext->oe_refc, 1);
+       atomic_set(&ext->oe_users, 0);
        CFS_INIT_LIST_HEAD(&ext->oe_link);
        ext->oe_state = OES_INV;
        CFS_INIT_LIST_HEAD(&ext->oe_pages);
@@ -333,17 +333,17 @@ static void osc_extent_free(struct osc_extent *ext)
 
 static struct osc_extent *osc_extent_get(struct osc_extent *ext)
 {
-       LASSERT(cfs_atomic_read(&ext->oe_refc) >= 0);
-       cfs_atomic_inc(&ext->oe_refc);
+       LASSERT(atomic_read(&ext->oe_refc) >= 0);
+       atomic_inc(&ext->oe_refc);
        return ext;
 }
 
 static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
 {
-       LASSERT(cfs_atomic_read(&ext->oe_refc) > 0);
-       if (cfs_atomic_dec_and_test(&ext->oe_refc)) {
+       LASSERT(atomic_read(&ext->oe_refc) > 0);
+       if (atomic_dec_and_test(&ext->oe_refc)) {
                LASSERT(cfs_list_empty(&ext->oe_link));
-               LASSERT(cfs_atomic_read(&ext->oe_users) == 0);
+               LASSERT(atomic_read(&ext->oe_users) == 0);
                LASSERT(ext->oe_state == OES_INV);
                LASSERT(!ext->oe_intree);
 
@@ -362,9 +362,9 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
  */
 static void osc_extent_put_trust(struct osc_extent *ext)
 {
-       LASSERT(cfs_atomic_read(&ext->oe_refc) > 1);
+       LASSERT(atomic_read(&ext->oe_refc) > 1);
        LASSERT(osc_object_is_locked(ext->oe_obj));
-       cfs_atomic_dec(&ext->oe_refc);
+       atomic_dec(&ext->oe_refc);
 }
 
 /**
@@ -457,7 +457,7 @@ static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
                osc_extent_state_set(ext, OES_ACTIVE);
                osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
        }
-       cfs_atomic_inc(&ext->oe_users);
+       atomic_inc(&ext->oe_users);
        cfs_list_del_init(&ext->oe_link);
        return osc_extent_get(ext);
 }
@@ -542,11 +542,11 @@ int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
        int rc = 0;
        ENTRY;
 
-       LASSERT(cfs_atomic_read(&ext->oe_users) > 0);
+       LASSERT(atomic_read(&ext->oe_users) > 0);
        LASSERT(sanity_check(ext) == 0);
        LASSERT(ext->oe_grants > 0);
 
-       if (cfs_atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
+       if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
                LASSERT(ext->oe_state == OES_ACTIVE);
                if (ext->oe_trunc_pending) {
                        /* a truncate process is waiting for this extent.
@@ -1197,10 +1197,10 @@ static inline int osc_is_ready(struct osc_object *osc)
               (OSC), osc_is_ready(OSC),                                       \
               list_empty_marker(&(OSC)->oo_hp_ready_item),                    \
               list_empty_marker(&(OSC)->oo_ready_item),                       \
-              cfs_atomic_read(&(OSC)->oo_nr_writes),                          \
+              atomic_read(&(OSC)->oo_nr_writes),                              \
               list_empty_marker(&(OSC)->oo_hp_exts),                          \
               list_empty_marker(&(OSC)->oo_urgent_exts),                      \
-              cfs_atomic_read(&(OSC)->oo_nr_reads),                           \
+              atomic_read(&(OSC)->oo_nr_reads),                               \
               list_empty_marker(&(OSC)->oo_reading_exts),                     \
               ##args)
 
@@ -1316,21 +1316,21 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
        RETURN(0);
 }
 
-#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do {                          \
-       struct client_obd *__tmp = (cli);                                     \
-       CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d "          \
-              "unstable_pages: %d/%d dropped: %ld avail: %ld, "              \
-              "reserved: %ld, flight: %d } lru {in list: %d, "               \
-              "left: %d, waiters: %d }" fmt,                                 \
-              __tmp->cl_import->imp_obd->obd_name,                           \
-              __tmp->cl_dirty, __tmp->cl_dirty_max,                          \
-              cfs_atomic_read(&obd_dirty_pages), obd_max_dirty_pages,        \
-              cfs_atomic_read(&obd_unstable_pages), obd_max_dirty_pages,     \
-              __tmp->cl_lost_grant, __tmp->cl_avail_grant,                   \
-              __tmp->cl_reserved_grant, __tmp->cl_w_in_flight,               \
-              cfs_atomic_read(&__tmp->cl_lru_in_list),                       \
-              cfs_atomic_read(&__tmp->cl_lru_busy),                          \
-              cfs_atomic_read(&__tmp->cl_lru_shrinkers), ##args);            \
+#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do {                    \
+       struct client_obd *__tmp = (cli);                               \
+       CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d "    \
+              "unstable_pages: %d/%d dropped: %ld avail: %ld, "        \
+              "reserved: %ld, flight: %d } lru {in list: %d, "         \
+              "left: %d, waiters: %d }" fmt,                           \
+              __tmp->cl_import->imp_obd->obd_name,                     \
+              __tmp->cl_dirty, __tmp->cl_dirty_max,                    \
+              atomic_read(&obd_dirty_pages), obd_max_dirty_pages,      \
+              atomic_read(&obd_unstable_pages), obd_max_dirty_pages,   \
+              __tmp->cl_lost_grant, __tmp->cl_avail_grant,             \
+              __tmp->cl_reserved_grant, __tmp->cl_w_in_flight,         \
+              atomic_read(&__tmp->cl_lru_in_list),                     \
+              atomic_read(&__tmp->cl_lru_busy),                        \
+              atomic_read(&__tmp->cl_lru_shrinkers), ##args);          \
 } while (0)
 
 /* caller must hold loi_list_lock */
@@ -1339,7 +1339,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
 {
        LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
        LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
-       cfs_atomic_inc(&obd_dirty_pages);
+       atomic_inc(&obd_dirty_pages);
        cli->cl_dirty += PAGE_CACHE_SIZE;
        pga->flag |= OBD_BRW_FROM_GRANT;
        CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
@@ -1361,11 +1361,11 @@ static void osc_release_write_grant(struct client_obd *cli,
        }
 
        pga->flag &= ~OBD_BRW_FROM_GRANT;
-       cfs_atomic_dec(&obd_dirty_pages);
+       atomic_dec(&obd_dirty_pages);
        cli->cl_dirty -= PAGE_CACHE_SIZE;
        if (pga->flag & OBD_BRW_NOCACHE) {
                pga->flag &= ~OBD_BRW_NOCACHE;
-               cfs_atomic_dec(&obd_dirty_transit_pages);
+               atomic_dec(&obd_dirty_transit_pages);
                cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
        }
        EXIT;
@@ -1434,7 +1434,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
        int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
-       cfs_atomic_sub(nr_pages, &obd_dirty_pages);
+       atomic_sub(nr_pages, &obd_dirty_pages);
        cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
        cli->cl_lost_grant += lost_grant;
        if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
@@ -1478,12 +1478,12 @@ static int osc_enter_cache_try(struct client_obd *cli,
                return 0;
 
        if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
-           cfs_atomic_read(&obd_unstable_pages) + 1 +
-           cfs_atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
+           atomic_read(&obd_unstable_pages) + 1 +
+           atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
                osc_consume_write_grant(cli, &oap->oap_brw_page);
                if (transient) {
                        cli->cl_dirty_transit += PAGE_CACHE_SIZE;
-                       cfs_atomic_inc(&obd_dirty_transit_pages);
+                       atomic_inc(&obd_dirty_transit_pages);
                        oap->oap_brw_flags |= OBD_BRW_NOCACHE;
                }
                rc = 1;
@@ -1612,8 +1612,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
                ocw->ocw_rc = -EDQUOT;
                /* we can't dirty more */
                if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
-                   (cfs_atomic_read(&obd_unstable_pages) + 1 +
-                    cfs_atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
+                   (atomic_read(&obd_unstable_pages) + 1 +
+                    atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
                        CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
                               "osc max %ld, sys max %d\n", cli->cl_dirty,
                               cli->cl_dirty_max, obd_max_dirty_pages);
@@ -1657,7 +1657,7 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
                invalid_import = 1;
 
        if (cmd & OBD_BRW_WRITE) {
-               if (cfs_atomic_read(&osc->oo_nr_writes) == 0)
+               if (atomic_read(&osc->oo_nr_writes) == 0)
                        RETURN(0);
                if (invalid_import) {
                        CDEBUG(D_CACHE, "invalid import forcing RPC\n");
@@ -1678,11 +1678,11 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
                        CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
                        RETURN(1);
                }
-               if (cfs_atomic_read(&osc->oo_nr_writes) >=
+               if (atomic_read(&osc->oo_nr_writes) >=
                    cli->cl_max_pages_per_rpc)
                        RETURN(1);
        } else {
-               if (cfs_atomic_read(&osc->oo_nr_reads) == 0)
+               if (atomic_read(&osc->oo_nr_reads) == 0)
                        RETURN(0);
                if (invalid_import) {
                        CDEBUG(D_CACHE, "invalid import forcing RPC\n");
@@ -1700,13 +1700,13 @@ static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
 {
        struct client_obd *cli = osc_cli(obj);
        if (cmd & OBD_BRW_WRITE) {
-               cfs_atomic_add(delta, &obj->oo_nr_writes);
-               cfs_atomic_add(delta, &cli->cl_pending_w_pages);
-               LASSERT(cfs_atomic_read(&obj->oo_nr_writes) >= 0);
+               atomic_add(delta, &obj->oo_nr_writes);
+               atomic_add(delta, &cli->cl_pending_w_pages);
+               LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
        } else {
-               cfs_atomic_add(delta, &obj->oo_nr_reads);
-               cfs_atomic_add(delta, &cli->cl_pending_r_pages);
-               LASSERT(cfs_atomic_read(&obj->oo_nr_reads) >= 0);
+               atomic_add(delta, &obj->oo_nr_reads);
+               atomic_add(delta, &cli->cl_pending_r_pages);
+               LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
        }
        OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
 }
@@ -1740,10 +1740,10 @@ static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
        }
 
        on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
-               cfs_atomic_read(&osc->oo_nr_writes) > 0);
+               atomic_read(&osc->oo_nr_writes) > 0);
 
        on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
-               cfs_atomic_read(&osc->oo_nr_reads) > 0);
+               atomic_read(&osc->oo_nr_reads) > 0);
 
        return osc_is_ready(osc);
 }
@@ -1802,14 +1802,14 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
        for (i = 0; i < page_count; i++)
                dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
 
-       cfs_atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
-       LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+       atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
+       LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
 
-       cfs_atomic_sub(page_count, &cli->cl_unstable_count);
-       LASSERT(cfs_atomic_read(&cli->cl_unstable_count) >= 0);
+       atomic_sub(page_count, &cli->cl_unstable_count);
+       LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
 
-       cfs_atomic_sub(page_count, &obd_unstable_pages);
-       LASSERT(cfs_atomic_read(&obd_unstable_pages) >= 0);
+       atomic_sub(page_count, &obd_unstable_pages);
+       LASSERT(atomic_read(&obd_unstable_pages) >= 0);
 
        spin_lock(&req->rq_lock);
        req->rq_committed = 1;
@@ -1836,14 +1836,14 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
        for (i = 0; i < page_count; i++)
                inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
 
-       LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
-       cfs_atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+       LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+       atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
 
-       LASSERT(cfs_atomic_read(&cli->cl_unstable_count) >= 0);
-       cfs_atomic_add(page_count, &cli->cl_unstable_count);
+       LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+       atomic_add(page_count, &cli->cl_unstable_count);
 
-       LASSERT(cfs_atomic_read(&obd_unstable_pages) >= 0);
-       cfs_atomic_add(page_count, &obd_unstable_pages);
+       LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+       atomic_add(page_count, &obd_unstable_pages);
 
        spin_lock(&req->rq_lock);
 
@@ -2255,11 +2255,11 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
        if (!async) {
                /* disable osc_lru_shrink() temporarily to avoid
                 * potential stack overrun problem. LU-2859 */
-               cfs_atomic_inc(&cli->cl_lru_shrinkers);
+               atomic_inc(&cli->cl_lru_shrinkers);
                client_obd_list_lock(&cli->cl_loi_list_lock);
                osc_check_rpcs(env, cli, pol);
                client_obd_list_unlock(&cli->cl_loi_list_lock);
-               cfs_atomic_dec(&cli->cl_lru_shrinkers);
+               atomic_dec(&cli->cl_lru_shrinkers);
        } else {
                CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
                LASSERT(cli->cl_writeback_work != NULL);
index b8d0e3c..08082da 100644 (file)
@@ -170,8 +170,8 @@ struct osc_object {
 
        cfs_list_t         oo_reading_exts;
 
-       cfs_atomic_t     oo_nr_reads;
-       cfs_atomic_t     oo_nr_writes;
+       atomic_t         oo_nr_reads;
+       atomic_t         oo_nr_writes;
 
        /** Protect extent tree. Will be used to protect
         * oo_{read|write}_pages soon. */
@@ -614,70 +614,70 @@ enum osc_extent_state {
  */
 struct osc_extent {
        /** red-black tree node */
-       struct rb_node     oe_node;
+       struct rb_node          oe_node;
        /** osc_object of this extent */
-       struct osc_object *oe_obj;
+       struct osc_object       *oe_obj;
        /** refcount, removed from red-black tree if reaches zero. */
-       cfs_atomic_t       oe_refc;
+       atomic_t                oe_refc;
        /** busy if non-zero */
-       cfs_atomic_t       oe_users;
+       atomic_t                oe_users;
        /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
-       cfs_list_t       oe_link;
+       cfs_list_t              oe_link;
        /** state of this extent */
-       unsigned int       oe_state;
+       unsigned int            oe_state;
        /** flags for this extent. */
-       unsigned int       oe_intree:1,
+       unsigned int            oe_intree:1,
        /** 0 is write, 1 is read */
-                          oe_rw:1,
-                          oe_srvlock:1,
-                          oe_memalloc:1,
+                               oe_rw:1,
+                               oe_srvlock:1,
+                               oe_memalloc:1,
        /** an ACTIVE extent is going to be truncated, so when this extent
         * is released, it will turn into TRUNC state instead of CACHE. */
-                          oe_trunc_pending:1,
+                               oe_trunc_pending:1,
        /** this extent should be written asap and someone may wait for the
         * write to finish. This bit is usually set along with urgent if
         * the extent was CACHE state.
         * fsync_wait extent can't be merged because new extent region may
         * exceed fsync range. */
-                          oe_fsync_wait:1,
+                               oe_fsync_wait:1,
        /** covering lock is being canceled */
-                          oe_hp:1,
+                               oe_hp:1,
        /** this extent should be written back asap. set if one of pages is
         * called by page WB daemon, or sync write or reading requests. */
-                          oe_urgent:1;
+                               oe_urgent:1;
        /** how many grants allocated for this extent.
         *  Grant allocated for this extent. There is no grant allocated
         *  for reading extents and sync write extents. */
-       unsigned int       oe_grants;
+       unsigned int            oe_grants;
        /** # of dirty pages in this extent */
-       unsigned int       oe_nr_pages;
+       unsigned int            oe_nr_pages;
        /** list of pending oap pages. Pages in this list are NOT sorted. */
-       cfs_list_t         oe_pages;
+       cfs_list_t              oe_pages;
        /** Since an extent has to be written out in atomic, this is used to
         * remember the next page need to be locked to write this extent out.
         * Not used right now.
         */
-       struct osc_page   *oe_next_page;
+       struct osc_page         *oe_next_page;
        /** start and end index of this extent, include start and end
         * themselves. Page offset here is the page index of osc_pages.
         * oe_start is used as keyword for red-black tree. */
-       pgoff_t            oe_start;
-       pgoff_t            oe_end;
+       pgoff_t                 oe_start;
+       pgoff_t                 oe_end;
        /** maximum ending index of this extent, this is limited by
         * max_pages_per_rpc, lock extent and chunk size. */
-       pgoff_t            oe_max_end;
+       pgoff_t                 oe_max_end;
        /** waitqueue - for those who want to be notified if this extent's
         * state has changed. */
-       wait_queue_head_t        oe_waitq;
+       wait_queue_head_t       oe_waitq;
        /** lock covering this extent */
-       struct cl_lock    *oe_osclock;
+       struct cl_lock          *oe_osclock;
        /** terminator of this extent. Must be true if this extent is in IO. */
-       struct task_struct        *oe_owner;
+       struct task_struct      *oe_owner;
        /** return value of writeback. If somebody is waiting for this extent,
         * this value can be known by outside world. */
-       int                oe_rc;
+       int                     oe_rc;
        /** max pages per rpc when this extent was created */
-       unsigned int       oe_mppr;
+       unsigned int            oe_mppr;
 };
 
 int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
index 3ded607..8724520 100644 (file)
@@ -331,15 +331,15 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
        if (npages > max_pages)
                npages = max_pages;
 
-       c = cfs_atomic_read(cli->cl_lru_left);
+       c = atomic_read(cli->cl_lru_left);
        if (c < npages && osc_lru_reclaim(cli) > 0)
-               c = cfs_atomic_read(cli->cl_lru_left);
+               c = atomic_read(cli->cl_lru_left);
        while (c >= npages) {
-               if (c == cfs_atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+               if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
                        oio->oi_lru_reserved = npages;
                        break;
                }
-               c = cfs_atomic_read(cli->cl_lru_left);
+               c = atomic_read(cli->cl_lru_left);
        }
 
        RETURN(0);
@@ -353,7 +353,7 @@ static void osc_io_rw_iter_fini(const struct lu_env *env,
        struct client_obd *cli = osc_cli(osc);
 
        if (oio->oi_lru_reserved > 0) {
-               cfs_atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+               atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
                oio->oi_lru_reserved = 0;
        }
 }
index 91e3798..b68eb30 100644 (file)
@@ -95,8 +95,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
        CFS_INIT_LIST_HEAD(&osc->oo_urgent_exts);
        CFS_INIT_LIST_HEAD(&osc->oo_rpc_exts);
        CFS_INIT_LIST_HEAD(&osc->oo_reading_exts);
-       cfs_atomic_set(&osc->oo_nr_reads, 0);
-       cfs_atomic_set(&osc->oo_nr_writes, 0);
+       atomic_set(&osc->oo_nr_reads, 0);
+       atomic_set(&osc->oo_nr_writes, 0);
        spin_lock_init(&osc->oo_lock);
        spin_lock_init(&osc->oo_tree_lock);
 
@@ -123,8 +123,8 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
        LASSERT(cfs_list_empty(&osc->oo_urgent_exts));
        LASSERT(cfs_list_empty(&osc->oo_rpc_exts));
        LASSERT(cfs_list_empty(&osc->oo_reading_exts));
-       LASSERT(cfs_atomic_read(&osc->oo_nr_reads) == 0);
-       LASSERT(cfs_atomic_read(&osc->oo_nr_writes) == 0);
+       LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
+       LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
 
        lu_object_fini(obj);
        OBD_SLAB_FREE_PTR(osc, osc_object_kmem);
index 97ad0d0..f722052 100644 (file)
@@ -306,9 +306,9 @@ static int osc_page_print(const struct lu_env *env,
                          osc_list(&obj->oo_hp_ready_item),
                          osc_list(&obj->oo_write_item),
                          osc_list(&obj->oo_read_item),
-                         cfs_atomic_read(&obj->oo_nr_reads),
+                         atomic_read(&obj->oo_nr_reads),
                          osc_list(&obj->oo_reading_exts),
-                         cfs_atomic_read(&obj->oo_nr_writes),
+                         atomic_read(&obj->oo_nr_writes),
                          osc_list(&obj->oo_hp_exts),
                          osc_list(&obj->oo_urgent_exts));
 }
@@ -464,10 +464,10 @@ int osc_over_unstable_soft_limit(struct client_obd *cli)
        if (cli == NULL)
                return 0;
 
-       obd_upages = cfs_atomic_read(&obd_unstable_pages);
-       obd_dpages = cfs_atomic_read(&obd_dirty_pages);
+       obd_upages = atomic_read(&obd_unstable_pages);
+       obd_dpages = atomic_read(&obd_dirty_pages);
 
-       osc_upages = cfs_atomic_read(&cli->cl_unstable_count);
+       osc_upages = atomic_read(&cli->cl_unstable_count);
 
        /* obd_max_dirty_pages is the max number of (dirty + unstable)
         * pages allowed at any given time. To simulate an unstable page
@@ -543,14 +543,14 @@ static const int lru_shrink_max = 8 << (20 - PAGE_CACHE_SHIFT); /* 8M */
 static int osc_cache_too_much(struct client_obd *cli)
 {
        struct cl_client_cache *cache = cli->cl_cache;
-       int pages = cfs_atomic_read(&cli->cl_lru_in_list);
+       int pages = atomic_read(&cli->cl_lru_in_list);
        unsigned long budget;
 
-       budget = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
+       budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
 
        /* if it's going to run out LRU slots, we should free some, but not
         * too much to maintain faireness among OSCs. */
-       if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+       if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
                if (pages >= budget)
                        return lru_shrink_max;
                else if (pages >= budget / 2)
@@ -592,8 +592,8 @@ void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *plist)
        if (npages > 0) {
                client_obd_list_lock(&cli->cl_lru_list_lock);
                cfs_list_splice_tail(&lru, &cli->cl_lru_list);
-               cfs_atomic_sub(npages, &cli->cl_lru_busy);
-               cfs_atomic_add(npages, &cli->cl_lru_in_list);
+               atomic_sub(npages, &cli->cl_lru_busy);
+               atomic_add(npages, &cli->cl_lru_in_list);
                client_obd_list_unlock(&cli->cl_lru_list_lock);
 
                /* XXX: May set force to be true for better performance */
@@ -604,9 +604,9 @@ void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *plist)
 
 static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
 {
-       LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
+       LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
        cfs_list_del_init(&opg->ops_lru);
-       cfs_atomic_dec(&cli->cl_lru_in_list);
+       atomic_dec(&cli->cl_lru_in_list);
 }
 
 /**
@@ -620,12 +620,12 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
                if (!cfs_list_empty(&opg->ops_lru)) {
                        __osc_lru_del(cli, opg);
                } else {
-                       LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
-                       cfs_atomic_dec(&cli->cl_lru_busy);
+                       LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
+                       atomic_dec(&cli->cl_lru_busy);
                }
                client_obd_list_unlock(&cli->cl_lru_list_lock);
 
-               cfs_atomic_inc(cli->cl_lru_left);
+               atomic_inc(cli->cl_lru_left);
                /* this is a great place to release more LRU pages if
                 * this osc occupies too many LRU pages and kernel is
                 * stealing one of them. */
@@ -648,7 +648,7 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
                client_obd_list_lock(&cli->cl_lru_list_lock);
                __osc_lru_del(cli, opg);
                client_obd_list_unlock(&cli->cl_lru_list_lock);
-               cfs_atomic_inc(&cli->cl_lru_busy);
+               atomic_inc(&cli->cl_lru_busy);
        }
 }
 
@@ -685,27 +685,27 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
        int rc = 0;
        ENTRY;
 
-       LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) >= 0);
-       if (cfs_atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+       LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
+       if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
                RETURN(0);
 
        if (!force) {
-               if (cfs_atomic_read(&cli->cl_lru_shrinkers) > 0)
+               if (atomic_read(&cli->cl_lru_shrinkers) > 0)
                        RETURN(-EBUSY);
 
-               if (cfs_atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
-                       cfs_atomic_dec(&cli->cl_lru_shrinkers);
+               if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+                       atomic_dec(&cli->cl_lru_shrinkers);
                        RETURN(-EBUSY);
                }
        } else {
-               cfs_atomic_inc(&cli->cl_lru_shrinkers);
+               atomic_inc(&cli->cl_lru_shrinkers);
        }
 
        pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
        io = &osc_env_info(env)->oti_io;
 
        client_obd_list_lock(&cli->cl_lru_list_lock);
-       maxscan = min(target << 1, cfs_atomic_read(&cli->cl_lru_in_list));
+       maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
        while (!cfs_list_empty(&cli->cl_lru_list)) {
                struct cl_page *page;
                bool will_free = false;
@@ -792,9 +792,9 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                cl_object_put(env, clobj);
        }
 
-       cfs_atomic_dec(&cli->cl_lru_shrinkers);
+       atomic_dec(&cli->cl_lru_shrinkers);
        if (count > 0) {
-               cfs_atomic_add(count, cli->cl_lru_left);
+               atomic_add(count, cli->cl_lru_left);
                wake_up_all(&osc_lru_waitq);
        }
        RETURN(count > 0 ? count : rc);
@@ -802,7 +802,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 
 static inline int max_to_shrink(struct client_obd *cli)
 {
-       return min(cfs_atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
+       return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
 }
 
 int osc_lru_reclaim(struct client_obd *cli)
@@ -833,8 +833,8 @@ int osc_lru_reclaim(struct client_obd *cli)
 
        CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
                cli->cl_import->imp_obd->obd_name, cli,
-               cfs_atomic_read(&cli->cl_lru_in_list),
-               cfs_atomic_read(&cli->cl_lru_busy));
+               atomic_read(&cli->cl_lru_in_list),
+               atomic_read(&cli->cl_lru_busy));
 
        /* Reclaim LRU slots from other client_obd as it can't free enough
         * from its own. This should rarely happen. */
@@ -842,15 +842,15 @@ int osc_lru_reclaim(struct client_obd *cli)
        cache->ccc_lru_shrinkers++;
        cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
 
-       max_scans = cfs_atomic_read(&cache->ccc_users);
+       max_scans = atomic_read(&cache->ccc_users);
        while (--max_scans > 0 && !cfs_list_empty(&cache->ccc_lru)) {
                cli = cfs_list_entry(cache->ccc_lru.next, struct client_obd,
                                        cl_lru_osc);
 
                CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
                        cli->cl_import->imp_obd->obd_name, cli,
-                       cfs_atomic_read(&cli->cl_lru_in_list),
-                       cfs_atomic_read(&cli->cl_lru_busy));
+                       atomic_read(&cli->cl_lru_in_list),
+                       atomic_read(&cli->cl_lru_busy));
 
                cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
                if (osc_cache_too_much(cli) > 0) {
@@ -889,8 +889,8 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                goto out;
        }
 
-       LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
-       while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+       LASSERT(atomic_read(cli->cl_lru_left) >= 0);
+       while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
 
                /* run out of LRU spaces, try to drop some by itself */
                rc = osc_lru_reclaim(cli);
@@ -901,7 +901,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
 
                cond_resched();
                rc = l_wait_event(osc_lru_waitq,
-                               cfs_atomic_read(cli->cl_lru_left) > 0,
+                               atomic_read(cli->cl_lru_left) > 0,
                                &lwi);
                if (rc < 0)
                        break;
@@ -909,7 +909,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
 
 out:
        if (rc >= 0) {
-               cfs_atomic_inc(&cli->cl_lru_busy);
+               atomic_inc(&cli->cl_lru_busy);
                opg->ops_in_lru = 1;
                rc = 0;
        }
index 0c14415..b9d1b71 100644 (file)
@@ -695,19 +695,19 @@ static int osc_destroy_interpret(const struct lu_env *env,
 {
        struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
 
-       cfs_atomic_dec(&cli->cl_destroy_in_flight);
+       atomic_dec(&cli->cl_destroy_in_flight);
        wake_up(&cli->cl_destroy_waitq);
        return 0;
 }
 
 static int osc_can_send_destroy(struct client_obd *cli)
 {
-       if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
+       if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
            cli->cl_max_rpcs_in_flight) {
                /* The destroy request can be sent */
                return 1;
        }
-       if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
+       if (atomic_dec_return(&cli->cl_destroy_in_flight) <
            cli->cl_max_rpcs_in_flight) {
                /*
                 * The counter has been modified between the two atomic
@@ -838,18 +838,18 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                CERROR("dirty %lu - %lu > dirty_max %lu\n",
                       cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
                oa->o_undirty = 0;
-       } else if (unlikely(cfs_atomic_read(&obd_unstable_pages) +
-                           cfs_atomic_read(&obd_dirty_pages) -
-                           cfs_atomic_read(&obd_dirty_transit_pages) >
+       } else if (unlikely(atomic_read(&obd_unstable_pages) +
+                           atomic_read(&obd_dirty_pages) -
+                           atomic_read(&obd_dirty_transit_pages) >
                            (long)(obd_max_dirty_pages + 1))) {
-               /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
+               /* The atomic_read() allowing the atomic_inc() are
                 * not covered by a lock thus they may safely race and trip
                 * this CERROR() unless we add in a small fudge factor (+1). */
                CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
                       cli->cl_import->imp_obd->obd_name,
-                      cfs_atomic_read(&obd_unstable_pages),
-                      cfs_atomic_read(&obd_dirty_pages),
-                      cfs_atomic_read(&obd_dirty_transit_pages),
+                      atomic_read(&obd_unstable_pages),
+                      atomic_read(&obd_dirty_pages),
+                      atomic_read(&obd_dirty_transit_pages),
                       obd_max_dirty_pages);
                oa->o_undirty = 0;
        } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
@@ -3234,7 +3234,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
 
                LASSERT(cli->cl_cache == NULL); /* only once */
                cli->cl_cache = (struct cl_client_cache *)val;
-               cfs_atomic_inc(&cli->cl_cache->ccc_users);
+               atomic_inc(&cli->cl_cache->ccc_users);
                cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
 
                /* add this osc into entity list */
@@ -3248,7 +3248,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
 
        if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
                struct client_obd *cli = &obd->u.cli;
-               int nr = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
+               int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
                int target = *(int *)val;
 
                nr = osc_lru_shrink(env, cli, min(nr, target), true);
@@ -3647,12 +3647,12 @@ int osc_cleanup(struct obd_device *obd)
 
        /* lru cleanup */
        if (cli->cl_cache != NULL) {
-               LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
+               LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
                spin_lock(&cli->cl_cache->ccc_lru_lock);
                cfs_list_del_init(&cli->cl_lru_osc);
                spin_unlock(&cli->cl_cache->ccc_lru_lock);
                cli->cl_lru_left = NULL;
-               cfs_atomic_dec(&cli->cl_cache->ccc_users);
+               atomic_dec(&cli->cl_cache->ccc_users);
                cli->cl_cache = NULL;
        }
 
index 7339837..162668e 100644 (file)
@@ -827,16 +827,16 @@ static void osd_trans_commit_cb(struct super_block *sb,
 }
 
 static struct thandle *osd_trans_create(const struct lu_env *env,
-                                        struct dt_device *d)
+                                       struct dt_device *d)
 {
-        struct osd_thread_info *oti = osd_oti_get(env);
-        struct osd_iobuf       *iobuf = &oti->oti_iobuf;
-        struct osd_thandle     *oh;
-        struct thandle         *th;
-        ENTRY;
+       struct osd_thread_info  *oti = osd_oti_get(env);
+       struct osd_iobuf        *iobuf = &oti->oti_iobuf;
+       struct osd_thandle      *oh;
+       struct thandle          *th;
+       ENTRY;
 
-        /* on pending IO in this thread should left from prev. request */
-        LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+       /* on pending IO in this thread should left from prev. request */
+       LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
 
         th = ERR_PTR(-ENOMEM);
        OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
@@ -1045,7 +1045,7 @@ static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
         * completed otherwise iobuf may be corrupted by different request
         */
        wait_event(iobuf->dr_wait,
-                      cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+                      atomic_read(&iobuf->dr_numreqs) == 0);
        if (!rc)
                rc = iobuf->dr_error;
 
index e9dccbc..8436d1f 100644 (file)
@@ -261,13 +261,13 @@ struct osd_device {
        struct osd_obj_map      *od_ost_map;
        struct osd_mdobj_map    *od_mdt_map;
 
-        unsigned long long        od_readcache_max_filesize;
-        int                       od_read_cache;
-        int                       od_writethrough_cache;
+       unsigned long long      od_readcache_max_filesize;
+       int                     od_read_cache;
+       int                     od_writethrough_cache;
 
-        struct brw_stats          od_brw_stats;
-        cfs_atomic_t              od_r_in_flight;
-        cfs_atomic_t              od_w_in_flight;
+       struct brw_stats        od_brw_stats;
+       atomic_t                od_r_in_flight;
+       atomic_t                od_w_in_flight;
 
        struct mutex              od_otable_mutex;
        struct osd_otable_it     *od_otable_it;
@@ -455,7 +455,7 @@ struct osd_it_quota {
 
 struct osd_iobuf {
        wait_queue_head_t  dr_wait;
-       cfs_atomic_t       dr_numreqs;  /* number of reqs being processed */
+       atomic_t       dr_numreqs;  /* number of reqs being processed */
        int                dr_max_pages;
        int                dr_npages;
        int                dr_error;
index ace930d..2ef7771 100644 (file)
@@ -67,12 +67,12 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
 
        LASSERTF(iobuf->dr_elapsed_valid == 0,
                 "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
-                cfs_atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
+                atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
                 iobuf->dr_init_at);
        LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
 
        init_waitqueue_head(&iobuf->dr_wait);
-       cfs_atomic_set(&iobuf->dr_numreqs, 0);
+       atomic_set(&iobuf->dr_numreqs, 0);
        iobuf->dr_npages = 0;
        iobuf->dr_error = 0;
        iobuf->dr_dev = d;
@@ -154,23 +154,23 @@ static void dio_complete_routine(struct bio *bio, int error)
         /* CAVEAT EMPTOR: possibly in IRQ context
          * DO NOT record procfs stats here!!! */
 
-        if (unlikely(iobuf == NULL)) {
-                CERROR("***** bio->bi_private is NULL!  This should never "
-                       "happen.  Normally, I would crash here, but instead I "
-                       "will dump the bio contents to the console.  Please "
-                       "report this to <http://jira.whamcloud.com/> , along "
-                       "with any interesting messages leading up to this point "
-                       "(like SCSI errors, perhaps).  Because bi_private is "
-                       "NULL, I can't wake up the thread that initiated this "
-                       "IO - you will probably have to reboot this node.\n");
-                CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
-                       "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
-                       "bi_private: %p\n", bio->bi_next, bio->bi_flags,
-                       bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
-                       bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
-                       bio->bi_private);
+       if (unlikely(iobuf == NULL)) {
+               CERROR("***** bio->bi_private is NULL!  This should never "
+                      "happen.  Normally, I would crash here, but instead I "
+                      "will dump the bio contents to the console.  Please "
+                      "report this to <http://jira.whamcloud.com/> , along "
+                      "with any interesting messages leading up to this point "
+                      "(like SCSI errors, perhaps).  Because bi_private is "
+                      "NULL, I can't wake up the thread that initiated this "
+                      "IO - you will probably have to reboot this node.\n");
+               CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
+                      "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
+                      "bi_private: %p\n", bio->bi_next, bio->bi_flags,
+                      bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
+                      bio->bi_end_io, atomic_read(&bio->bi_cnt),
+                      bio->bi_private);
                return;
-        }
+       }
 
         /* the check is outside of the cycle for performance reason -bzzz */
        if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
@@ -179,14 +179,14 @@ static void dio_complete_routine(struct bio *bio, int error)
                                 SetPageUptodate(bvl->bv_page);
                         LASSERT(PageLocked(bvl->bv_page));
                 }
-                cfs_atomic_dec(&iobuf->dr_dev->od_r_in_flight);
+               atomic_dec(&iobuf->dr_dev->od_r_in_flight);
         } else {
-                cfs_atomic_dec(&iobuf->dr_dev->od_w_in_flight);
+               atomic_dec(&iobuf->dr_dev->od_w_in_flight);
         }
 
-        /* any real error is good enough -bzzz */
-        if (error != 0 && iobuf->dr_error == 0)
-                iobuf->dr_error = error;
+       /* any real error is good enough -bzzz */
+       if (error != 0 && iobuf->dr_error == 0)
+               iobuf->dr_error = error;
 
        /*
         * set dr_elapsed before dr_numreqs turns to 0, otherwise
@@ -195,42 +195,42 @@ static void dio_complete_routine(struct bio *bio, int error)
         * data in this processing and an assertion in a subsequent
         * call to OSD.
         */
-       if (cfs_atomic_read(&iobuf->dr_numreqs) == 1) {
+       if (atomic_read(&iobuf->dr_numreqs) == 1) {
                iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
                iobuf->dr_elapsed_valid = 1;
        }
-       if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+       if (atomic_dec_and_test(&iobuf->dr_numreqs))
                wake_up(&iobuf->dr_wait);
 
-        /* Completed bios used to be chained off iobuf->dr_bios and freed in
-         * filter_clear_dreq().  It was then possible to exhaust the biovec-256
-         * mempool when serious on-disk fragmentation was encountered,
-         * deadlocking the OST.  The bios are now released as soon as complete
-         * so the pool cannot be exhausted while IOs are competing. bug 10076 */
-        bio_put(bio);
+       /* Completed bios used to be chained off iobuf->dr_bios and freed in
+        * filter_clear_dreq().  It was then possible to exhaust the biovec-256
+        * mempool when serious on-disk fragmentation was encountered,
+        * deadlocking the OST.  The bios are now released as soon as complete
+        * so the pool cannot be exhausted while IOs are competing. bug 10076 */
+       bio_put(bio);
 }
 
 static void record_start_io(struct osd_iobuf *iobuf, int size)
 {
-        struct osd_device    *osd = iobuf->dr_dev;
-        struct obd_histogram *h = osd->od_brw_stats.hist;
-
-        iobuf->dr_frags++;
-        cfs_atomic_inc(&iobuf->dr_numreqs);
-
-        if (iobuf->dr_rw == 0) {
-                cfs_atomic_inc(&osd->od_r_in_flight);
-                lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
-                                 cfs_atomic_read(&osd->od_r_in_flight));
-                lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
-        } else if (iobuf->dr_rw == 1) {
-                cfs_atomic_inc(&osd->od_w_in_flight);
-                lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
-                                 cfs_atomic_read(&osd->od_w_in_flight));
-                lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
-        } else {
-                LBUG();
-        }
+       struct osd_device    *osd = iobuf->dr_dev;
+       struct obd_histogram *h = osd->od_brw_stats.hist;
+
+       iobuf->dr_frags++;
+       atomic_inc(&iobuf->dr_numreqs);
+
+       if (iobuf->dr_rw == 0) {
+               atomic_inc(&osd->od_r_in_flight);
+               lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
+                                atomic_read(&osd->od_r_in_flight));
+               lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
+       } else if (iobuf->dr_rw == 1) {
+               atomic_inc(&osd->od_w_in_flight);
+               lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
+                                atomic_read(&osd->od_w_in_flight));
+               lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
+       } else {
+               LBUG();
+       }
 }
 
 static void osd_submit_bio(int rw, struct bio *bio)
@@ -364,19 +364,19 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode,
                 rc = 0;
         }
 
- out:
-        /* in order to achieve better IO throughput, we don't wait for writes
-         * completion here. instead we proceed with transaction commit in
-         * parallel and wait for IO completion once transaction is stopped
-         * see osd_trans_stop() for more details -bzzz */
-        if (iobuf->dr_rw == 0) {
+out:
+       /* in order to achieve better IO throughput, we don't wait for writes
+        * completion here. instead we proceed with transaction commit in
+        * parallel and wait for IO completion once transaction is stopped
+        * see osd_trans_stop() for more details -bzzz */
+       if (iobuf->dr_rw == 0) {
                wait_event(iobuf->dr_wait,
-                               cfs_atomic_read(&iobuf->dr_numreqs) == 0);
-        }
+                          atomic_read(&iobuf->dr_numreqs) == 0);
+       }
 
-        if (rc == 0)
-                rc = iobuf->dr_error;
-        RETURN(rc);
+       if (rc == 0)
+               rc = iobuf->dr_error;
+       RETURN(rc);
 }
 
 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
index 292b1a8..d4193cd 100644 (file)
@@ -604,15 +604,15 @@ static void osd_umount(const struct lu_env *env, struct osd_device *o)
 {
        ENTRY;
 
-       if (cfs_atomic_read(&o->od_zerocopy_alloc))
+       if (atomic_read(&o->od_zerocopy_alloc))
                CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
-                      cfs_atomic_read(&o->od_zerocopy_alloc));
-       if (cfs_atomic_read(&o->od_zerocopy_loan))
+                      atomic_read(&o->od_zerocopy_alloc));
+       if (atomic_read(&o->od_zerocopy_loan))
                CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
-                      cfs_atomic_read(&o->od_zerocopy_loan));
-       if (cfs_atomic_read(&o->od_zerocopy_pin))
+                      atomic_read(&o->od_zerocopy_loan));
+       if (atomic_read(&o->od_zerocopy_pin))
                CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
-                      cfs_atomic_read(&o->od_zerocopy_pin));
+                      atomic_read(&o->od_zerocopy_pin));
 
        if (o->od_objset.os != NULL)
                udmu_objset_close(&o->od_objset);
index 0e202d2..bc51cb1 100644 (file)
@@ -274,9 +274,9 @@ struct osd_device {
        /* used to debug zerocopy logic: the fields track all
         * allocated, loaned and referenced buffers in use.
         * to be removed once the change is tested well. */
-       cfs_atomic_t             od_zerocopy_alloc;
-       cfs_atomic_t             od_zerocopy_loan;
-       cfs_atomic_t             od_zerocopy_pin;
+       atomic_t                 od_zerocopy_alloc;
+       atomic_t                 od_zerocopy_loan;
+       atomic_t                 od_zerocopy_pin;
 
        arc_prune_t             *arc_prune_cb;
 };
index 3fe29ef..6745ea5 100644 (file)
@@ -218,17 +218,17 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
                        /* this is anonymous page allocated for copy-write */
                        lnb[i].page->mapping = NULL;
                        __free_page(lnb[i].page);
-                       cfs_atomic_dec(&osd->od_zerocopy_alloc);
+                       atomic_dec(&osd->od_zerocopy_alloc);
                } else {
                        /* see comment in osd_bufs_get_read() */
                        ptr = (unsigned long)lnb[i].dentry;
                        if (ptr & 1UL) {
                                ptr &= ~1UL;
                                dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
-                               cfs_atomic_dec(&osd->od_zerocopy_pin);
+                               atomic_dec(&osd->od_zerocopy_pin);
                        } else if (lnb[i].dentry != NULL) {
                                dmu_return_arcbuf((void *)lnb[i].dentry);
-                               cfs_atomic_dec(&osd->od_zerocopy_loan);
+                               atomic_dec(&osd->od_zerocopy_loan);
                        }
                }
                lnb[i].page = NULL;
@@ -274,7 +274,7 @@ static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
 
                        LASSERT(len > 0);
 
-                       cfs_atomic_inc(&osd->od_zerocopy_pin);
+                       atomic_inc(&osd->od_zerocopy_pin);
 
                        bufoff = off - dbp[i]->db_offset;
                        tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
@@ -353,7 +353,7 @@ static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
                        if (unlikely(abuf == NULL))
                                GOTO(out_err, rc = -ENOMEM);
 
-                       cfs_atomic_inc(&osd->od_zerocopy_loan);
+                       atomic_inc(&osd->od_zerocopy_loan);
 
                        /* go over pages arcbuf contains, put them as
                         * local niobufs for ptlrpc's bulks */
@@ -407,7 +407,7 @@ static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
                                LASSERT(lnb[i].page->mapping == NULL);
                                lnb[i].page->mapping = (void *)obj;
 
-                               cfs_atomic_inc(&osd->od_zerocopy_alloc);
+                               atomic_inc(&osd->od_zerocopy_alloc);
                                lprocfs_counter_add(osd->od_stats,
                                                LPROC_OSD_COPY_IO, 1);
 
@@ -680,7 +680,7 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                        /* drop the reference, otherwise osd_put_bufs()
                         * will be releasing it - bad! */
                        lnb[i].dentry = NULL;
-                       cfs_atomic_dec(&osd->od_zerocopy_loan);
+                       atomic_dec(&osd->od_zerocopy_loan);
                }
 
                if (new_size < lnb[i].lnb_file_offset + lnb[i].len)