ofd_stack_fini(env, m, &m->ofd_dt_dev.dd_lu_dev);
ofd_procfs_fini(m);
- LASSERT(cfs_atomic_read(&d->ld_ref) == 0);
+ LASSERT(atomic_read(&d->ld_ref) == 0);
server_put_mount(obd->obd_name);
EXIT;
}
read_lock(&ofd->ofd_seq_list_lock);
cfs_list_for_each_entry(oseq, &ofd->ofd_seq_list, os_list) {
if (ostid_seq(&oseq->os_oi) == seq) {
- cfs_atomic_inc(&oseq->os_refc);
+ atomic_inc(&oseq->os_refc);
read_unlock(&ofd->ofd_seq_list_lock);
return oseq;
}
void ofd_seq_put(const struct lu_env *env, struct ofd_seq *oseq)
{
- if (cfs_atomic_dec_and_test(&oseq->os_refc))
+ if (atomic_dec_and_test(&oseq->os_refc))
ofd_seq_destroy(env, oseq);
}
write_lock(&ofd->ofd_seq_list_lock);
cfs_list_for_each_entry(os, &ofd->ofd_seq_list, os_list) {
if (ostid_seq(&os->os_oi) == ostid_seq(&new_seq->os_oi)) {
- cfs_atomic_inc(&os->os_refc);
+ atomic_inc(&os->os_refc);
write_unlock(&ofd->ofd_seq_list_lock);
/* The seq has not been added to the list */
ofd_seq_put(env, new_seq);
return os;
}
}
- cfs_atomic_inc(&new_seq->os_refc);
+ atomic_inc(&new_seq->os_refc);
cfs_list_add_tail(&new_seq->os_list, &ofd->ofd_seq_list);
ofd->ofd_seq_count++;
write_unlock(&ofd->ofd_seq_list_lock);
spin_lock_init(&oseq->os_last_oid_lock);
ostid_set_seq(&oseq->os_oi, seq);
- cfs_atomic_set(&oseq->os_refc, 1);
+ atomic_set(&oseq->os_refc, 1);
rc = dt_attr_get(env, dob, &info->fti_attr, BYPASS_CAPA);
if (rc)
struct ost_id os_oi;
spinlock_t os_last_oid_lock;
struct mutex os_create_lock;
- cfs_atomic_t os_refc;
+ atomic_t os_refc;
struct dt_object *os_lastid_obj;
unsigned long os_destroys_in_progress:1;
};
rc = seq_printf(m,
"used_mb: %d\n"
"busy_cnt: %d\n",
- (cfs_atomic_read(&cli->cl_lru_in_list) +
- cfs_atomic_read(&cli->cl_lru_busy)) >> shift,
- cfs_atomic_read(&cli->cl_lru_busy));
+ (atomic_read(&cli->cl_lru_in_list) +
+ atomic_read(&cli->cl_lru_busy)) >> shift,
+ atomic_read(&cli->cl_lru_busy));
return rc;
}
if (pages_number < 0)
return -ERANGE;
- rc = cfs_atomic_read(&cli->cl_lru_in_list) - pages_number;
+ rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
if (rc > 0) {
struct lu_env *env;
int refcheck;
{
struct obd_device *obd = m->private;
- return seq_printf(m, "%u\n", cfs_atomic_read(&obd->u.cli.cl_resends));
+ return seq_printf(m, "%u\n", atomic_read(&obd->u.cli.cl_resends));
}
static ssize_t osc_resend_count_seq_write(struct file *file, const char *buffer,
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
int val, rc;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
- if (val < 0)
- return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
- cfs_atomic_set(&obd->u.cli.cl_resends, val);
+ atomic_set(&obd->u.cli.cl_resends, val);
- return count;
+ return count;
}
LPROC_SEQ_FOPS(osc_resend_count);
{
struct obd_device *obd = m->private;
return seq_printf(m, "%u\n",
- cfs_atomic_read(&obd->u.cli.cl_destroy_in_flight));
+ atomic_read(&obd->u.cli.cl_destroy_in_flight));
}
LPROC_SEQ_FOPS_RO(osc_destroys_in_flight);
struct client_obd *cli = &dev->u.cli;
int pages, mb;
- pages = cfs_atomic_read(&cli->cl_unstable_count);
+ pages = atomic_read(&cli->cl_unstable_count);
mb = (pages * PAGE_CACHE_SIZE) >> 20;
return seq_printf(m, "unstable_pages: %8d\n"
static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timeval now;
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
+ struct timeval now;
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+ unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
int i;
do_gettimeofday(&now);
client_obd_list_lock(&cli->cl_loi_list_lock);
- seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
- seq_printf(seq, "read RPCs in flight: %d\n",
- cli->cl_r_in_flight);
- seq_printf(seq, "write RPCs in flight: %d\n",
- cli->cl_w_in_flight);
- seq_printf(seq, "pending write pages: %d\n",
- cfs_atomic_read(&cli->cl_pending_w_pages));
- seq_printf(seq, "pending read pages: %d\n",
- cfs_atomic_read(&cli->cl_pending_r_pages));
-
- seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_printf(seq, "pages per rpc rpcs %% cum %% |");
- seq_printf(seq, " rpcs %% cum %%\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- 1 << i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+ seq_printf(seq, "read RPCs in flight: %d\n",
+ cli->cl_r_in_flight);
+ seq_printf(seq, "write RPCs in flight: %d\n",
+ cli->cl_w_in_flight);
+ seq_printf(seq, "pending write pages: %d\n",
+ atomic_read(&cli->cl_pending_w_pages));
+ seq_printf(seq, "pending read pages: %d\n",
+ atomic_read(&cli->cl_pending_r_pages));
+
+ seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_printf(seq, "pages per rpc rpcs %% cum %% |");
+ seq_printf(seq, " rpcs %% cum %%\n");
+
+ read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
+ write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
+
+ read_cum = 0;
+ write_cum = 0;
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
+ unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
+
+ read_cum += r;
+ write_cum += w;
+ seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
+ 1 << i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
+ if (read_cum == read_tot && write_cum == write_tot)
+ break;
+ }
- seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_printf(seq, "rpcs in flight rpcs %% cum %% |");
- seq_printf(seq, " rpcs %% cum %%\n");
+ seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_printf(seq, "rpcs in flight rpcs %% cum %% |");
+ seq_printf(seq, " rpcs %% cum %%\n");
- read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
+ read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
+ write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
read_cum = 0;
write_cum = 0;
static const char *oes_strings[] = {
"inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
-#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
- struct osc_extent *__ext = (extent); \
- char __buf[16]; \
- \
- CDEBUG(lvl, \
- "extent %p@{" EXTSTR ", " \
- "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
- /* ----- extent part 0 ----- */ \
- __ext, EXTPARA(__ext), \
- /* ----- part 1 ----- */ \
- cfs_atomic_read(&__ext->oe_refc), \
- cfs_atomic_read(&__ext->oe_users), \
- list_empty_marker(&__ext->oe_link), \
- oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
- __ext->oe_obj, \
- /* ----- part 2 ----- */ \
- __ext->oe_grants, __ext->oe_nr_pages, \
- list_empty_marker(&__ext->oe_pages), \
- waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
- /* ----- part 4 ----- */ \
- ## __VA_ARGS__); \
+#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
+ struct osc_extent *__ext = (extent); \
+ char __buf[16]; \
+ \
+ CDEBUG(lvl, \
+ "extent %p@{" EXTSTR ", " \
+ "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
+ /* ----- extent part 0 ----- */ \
+ __ext, EXTPARA(__ext), \
+ /* ----- part 1 ----- */ \
+ atomic_read(&__ext->oe_refc), \
+ atomic_read(&__ext->oe_users), \
+ list_empty_marker(&__ext->oe_link), \
+ oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
+ __ext->oe_obj, \
+ /* ----- part 2 ----- */ \
+ __ext->oe_grants, __ext->oe_nr_pages, \
+ list_empty_marker(&__ext->oe_pages), \
+ waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
+ __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
+ /* ----- part 4 ----- */ \
+ ## __VA_ARGS__); \
} while (0)
#undef EASSERTF
if (ext->oe_state >= OES_STATE_MAX)
GOTO(out, rc = 10);
- if (cfs_atomic_read(&ext->oe_refc) <= 0)
+ if (atomic_read(&ext->oe_refc) <= 0)
GOTO(out, rc = 20);
- if (cfs_atomic_read(&ext->oe_refc) < cfs_atomic_read(&ext->oe_users))
+ if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
GOTO(out, rc = 30);
switch (ext->oe_state) {
GOTO(out, rc = 0);
break;
case OES_ACTIVE:
- if (cfs_atomic_read(&ext->oe_users) == 0)
+ if (atomic_read(&ext->oe_users) == 0)
GOTO(out, rc = 40);
if (ext->oe_hp)
GOTO(out, rc = 50);
if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
GOTO(out, rc = 65);
default:
- if (cfs_atomic_read(&ext->oe_users) > 0)
+ if (atomic_read(&ext->oe_users) > 0)
GOTO(out, rc = 70);
}
RB_CLEAR_NODE(&ext->oe_node);
ext->oe_obj = obj;
- cfs_atomic_set(&ext->oe_refc, 1);
- cfs_atomic_set(&ext->oe_users, 0);
+ atomic_set(&ext->oe_refc, 1);
+ atomic_set(&ext->oe_users, 0);
CFS_INIT_LIST_HEAD(&ext->oe_link);
ext->oe_state = OES_INV;
CFS_INIT_LIST_HEAD(&ext->oe_pages);
static struct osc_extent *osc_extent_get(struct osc_extent *ext)
{
- LASSERT(cfs_atomic_read(&ext->oe_refc) >= 0);
- cfs_atomic_inc(&ext->oe_refc);
+ LASSERT(atomic_read(&ext->oe_refc) >= 0);
+ atomic_inc(&ext->oe_refc);
return ext;
}
static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
{
- LASSERT(cfs_atomic_read(&ext->oe_refc) > 0);
- if (cfs_atomic_dec_and_test(&ext->oe_refc)) {
+ LASSERT(atomic_read(&ext->oe_refc) > 0);
+ if (atomic_dec_and_test(&ext->oe_refc)) {
LASSERT(cfs_list_empty(&ext->oe_link));
- LASSERT(cfs_atomic_read(&ext->oe_users) == 0);
+ LASSERT(atomic_read(&ext->oe_users) == 0);
LASSERT(ext->oe_state == OES_INV);
LASSERT(!ext->oe_intree);
*/
static void osc_extent_put_trust(struct osc_extent *ext)
{
- LASSERT(cfs_atomic_read(&ext->oe_refc) > 1);
+ LASSERT(atomic_read(&ext->oe_refc) > 1);
LASSERT(osc_object_is_locked(ext->oe_obj));
- cfs_atomic_dec(&ext->oe_refc);
+ atomic_dec(&ext->oe_refc);
}
/**
osc_extent_state_set(ext, OES_ACTIVE);
osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
}
- cfs_atomic_inc(&ext->oe_users);
+ atomic_inc(&ext->oe_users);
cfs_list_del_init(&ext->oe_link);
return osc_extent_get(ext);
}
int rc = 0;
ENTRY;
- LASSERT(cfs_atomic_read(&ext->oe_users) > 0);
+ LASSERT(atomic_read(&ext->oe_users) > 0);
LASSERT(sanity_check(ext) == 0);
LASSERT(ext->oe_grants > 0);
- if (cfs_atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
+ if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
LASSERT(ext->oe_state == OES_ACTIVE);
if (ext->oe_trunc_pending) {
/* a truncate process is waiting for this extent.
(OSC), osc_is_ready(OSC), \
list_empty_marker(&(OSC)->oo_hp_ready_item), \
list_empty_marker(&(OSC)->oo_ready_item), \
- cfs_atomic_read(&(OSC)->oo_nr_writes), \
+ atomic_read(&(OSC)->oo_nr_writes), \
list_empty_marker(&(OSC)->oo_hp_exts), \
list_empty_marker(&(OSC)->oo_urgent_exts), \
- cfs_atomic_read(&(OSC)->oo_nr_reads), \
+ atomic_read(&(OSC)->oo_nr_reads), \
list_empty_marker(&(OSC)->oo_reading_exts), \
##args)
RETURN(0);
}
-#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
- struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
- "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
- "reserved: %ld, flight: %d } lru {in list: %d, " \
- "left: %d, waiters: %d }" fmt, \
- __tmp->cl_import->imp_obd->obd_name, \
- __tmp->cl_dirty, __tmp->cl_dirty_max, \
- cfs_atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
- cfs_atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
- __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
- __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
- cfs_atomic_read(&__tmp->cl_lru_in_list), \
- cfs_atomic_read(&__tmp->cl_lru_busy), \
- cfs_atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
+#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
+ struct client_obd *__tmp = (cli); \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
+ "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
+ "reserved: %ld, flight: %d } lru {in list: %d, " \
+ "left: %d, waiters: %d }" fmt, \
+ __tmp->cl_import->imp_obd->obd_name, \
+ __tmp->cl_dirty, __tmp->cl_dirty_max, \
+ atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
+ atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
+ __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
+ __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
+ atomic_read(&__tmp->cl_lru_in_list), \
+ atomic_read(&__tmp->cl_lru_busy), \
+ atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
/* caller must hold loi_list_lock */
{
LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- cfs_atomic_inc(&obd_dirty_pages);
+ atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_CACHE_SIZE;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
- cfs_atomic_dec(&obd_dirty_pages);
+ atomic_dec(&obd_dirty_pages);
cli->cl_dirty -= PAGE_CACHE_SIZE;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
- cfs_atomic_dec(&obd_dirty_transit_pages);
+ atomic_dec(&obd_dirty_transit_pages);
cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
}
EXIT;
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
client_obd_list_lock(&cli->cl_loi_list_lock);
- cfs_atomic_sub(nr_pages, &obd_dirty_pages);
+ atomic_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
return 0;
if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
- cfs_atomic_read(&obd_unstable_pages) + 1 +
- cfs_atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
+ atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit += PAGE_CACHE_SIZE;
- cfs_atomic_inc(&obd_dirty_transit_pages);
+ atomic_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
rc = 1;
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
- (cfs_atomic_read(&obd_unstable_pages) + 1 +
- cfs_atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
+ (atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
"osc max %ld, sys max %d\n", cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
invalid_import = 1;
if (cmd & OBD_BRW_WRITE) {
- if (cfs_atomic_read(&osc->oo_nr_writes) == 0)
+ if (atomic_read(&osc->oo_nr_writes) == 0)
RETURN(0);
if (invalid_import) {
CDEBUG(D_CACHE, "invalid import forcing RPC\n");
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
}
- if (cfs_atomic_read(&osc->oo_nr_writes) >=
+ if (atomic_read(&osc->oo_nr_writes) >=
cli->cl_max_pages_per_rpc)
RETURN(1);
} else {
- if (cfs_atomic_read(&osc->oo_nr_reads) == 0)
+ if (atomic_read(&osc->oo_nr_reads) == 0)
RETURN(0);
if (invalid_import) {
CDEBUG(D_CACHE, "invalid import forcing RPC\n");
{
struct client_obd *cli = osc_cli(obj);
if (cmd & OBD_BRW_WRITE) {
- cfs_atomic_add(delta, &obj->oo_nr_writes);
- cfs_atomic_add(delta, &cli->cl_pending_w_pages);
- LASSERT(cfs_atomic_read(&obj->oo_nr_writes) >= 0);
+ atomic_add(delta, &obj->oo_nr_writes);
+ atomic_add(delta, &cli->cl_pending_w_pages);
+ LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
} else {
- cfs_atomic_add(delta, &obj->oo_nr_reads);
- cfs_atomic_add(delta, &cli->cl_pending_r_pages);
- LASSERT(cfs_atomic_read(&obj->oo_nr_reads) >= 0);
+ atomic_add(delta, &obj->oo_nr_reads);
+ atomic_add(delta, &cli->cl_pending_r_pages);
+ LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
}
OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
}
}
on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
- cfs_atomic_read(&osc->oo_nr_writes) > 0);
+ atomic_read(&osc->oo_nr_writes) > 0);
on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
- cfs_atomic_read(&osc->oo_nr_reads) > 0);
+ atomic_read(&osc->oo_nr_reads) > 0);
return osc_is_ready(osc);
}
for (i = 0; i < page_count; i++)
dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
- cfs_atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
- LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+ atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
- cfs_atomic_sub(page_count, &cli->cl_unstable_count);
- LASSERT(cfs_atomic_read(&cli->cl_unstable_count) >= 0);
+ atomic_sub(page_count, &cli->cl_unstable_count);
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
- cfs_atomic_sub(page_count, &obd_unstable_pages);
- LASSERT(cfs_atomic_read(&obd_unstable_pages) >= 0);
+ atomic_sub(page_count, &obd_unstable_pages);
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
spin_lock(&req->rq_lock);
req->rq_committed = 1;
for (i = 0; i < page_count; i++)
inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
- LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
- cfs_atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+ atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
- LASSERT(cfs_atomic_read(&cli->cl_unstable_count) >= 0);
- cfs_atomic_add(page_count, &cli->cl_unstable_count);
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+ atomic_add(page_count, &cli->cl_unstable_count);
- LASSERT(cfs_atomic_read(&obd_unstable_pages) >= 0);
- cfs_atomic_add(page_count, &obd_unstable_pages);
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+ atomic_add(page_count, &obd_unstable_pages);
spin_lock(&req->rq_lock);
if (!async) {
/* disable osc_lru_shrink() temporarily to avoid
* potential stack overrun problem. LU-2859 */
- cfs_atomic_inc(&cli->cl_lru_shrinkers);
+ atomic_inc(&cli->cl_lru_shrinkers);
client_obd_list_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli, pol);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- cfs_atomic_dec(&cli->cl_lru_shrinkers);
+ atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
LASSERT(cli->cl_writeback_work != NULL);
cfs_list_t oo_reading_exts;
- cfs_atomic_t oo_nr_reads;
- cfs_atomic_t oo_nr_writes;
+ atomic_t oo_nr_reads;
+ atomic_t oo_nr_writes;
/** Protect extent tree. Will be used to protect
* oo_{read|write}_pages soon. */
*/
struct osc_extent {
/** red-black tree node */
- struct rb_node oe_node;
+ struct rb_node oe_node;
/** osc_object of this extent */
- struct osc_object *oe_obj;
+ struct osc_object *oe_obj;
/** refcount, removed from red-black tree if reaches zero. */
- cfs_atomic_t oe_refc;
+ atomic_t oe_refc;
/** busy if non-zero */
- cfs_atomic_t oe_users;
+ atomic_t oe_users;
/** link list of osc_object's oo_{hp|urgent|locking}_exts. */
- cfs_list_t oe_link;
+ cfs_list_t oe_link;
/** state of this extent */
- unsigned int oe_state;
+ unsigned int oe_state;
/** flags for this extent. */
- unsigned int oe_intree:1,
+ unsigned int oe_intree:1,
/** 0 is write, 1 is read */
- oe_rw:1,
- oe_srvlock:1,
- oe_memalloc:1,
+ oe_rw:1,
+ oe_srvlock:1,
+ oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
* is released, it will turn into TRUNC state instead of CACHE. */
- oe_trunc_pending:1,
+ oe_trunc_pending:1,
/** this extent should be written asap and someone may wait for the
* write to finish. This bit is usually set along with urgent if
* the extent was CACHE state.
* fsync_wait extent can't be merged because new extent region may
* exceed fsync range. */
- oe_fsync_wait:1,
+ oe_fsync_wait:1,
/** covering lock is being canceled */
- oe_hp:1,
+ oe_hp:1,
/** this extent should be written back asap. set if one of pages is
* called by page WB daemon, or sync write or reading requests. */
- oe_urgent:1;
+ oe_urgent:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */
- unsigned int oe_grants;
+ unsigned int oe_grants;
/** # of dirty pages in this extent */
- unsigned int oe_nr_pages;
+ unsigned int oe_nr_pages;
/** list of pending oap pages. Pages in this list are NOT sorted. */
- cfs_list_t oe_pages;
+ cfs_list_t oe_pages;
/** Since an extent has to be written out in atomic, this is used to
* remember the next page need to be locked to write this extent out.
* Not used right now.
*/
- struct osc_page *oe_next_page;
+ struct osc_page *oe_next_page;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
* oe_start is used as keyword for red-black tree. */
- pgoff_t oe_start;
- pgoff_t oe_end;
+ pgoff_t oe_start;
+ pgoff_t oe_end;
/** maximum ending index of this extent, this is limited by
* max_pages_per_rpc, lock extent and chunk size. */
- pgoff_t oe_max_end;
+ pgoff_t oe_max_end;
/** waitqueue - for those who want to be notified if this extent's
* state has changed. */
- wait_queue_head_t oe_waitq;
+ wait_queue_head_t oe_waitq;
/** lock covering this extent */
- struct cl_lock *oe_osclock;
+ struct cl_lock *oe_osclock;
/** terminator of this extent. Must be true if this extent is in IO. */
- struct task_struct *oe_owner;
+ struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
* this value can be known by outside world. */
- int oe_rc;
+ int oe_rc;
/** max pages per rpc when this extent was created */
- unsigned int oe_mppr;
+ unsigned int oe_mppr;
};
int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (npages > max_pages)
npages = max_pages;
- c = cfs_atomic_read(cli->cl_lru_left);
+ c = atomic_read(cli->cl_lru_left);
if (c < npages && osc_lru_reclaim(cli) > 0)
- c = cfs_atomic_read(cli->cl_lru_left);
+ c = atomic_read(cli->cl_lru_left);
while (c >= npages) {
- if (c == cfs_atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
oio->oi_lru_reserved = npages;
break;
}
- c = cfs_atomic_read(cli->cl_lru_left);
+ c = atomic_read(cli->cl_lru_left);
}
RETURN(0);
struct client_obd *cli = osc_cli(osc);
if (oio->oi_lru_reserved > 0) {
- cfs_atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
oio->oi_lru_reserved = 0;
}
}
CFS_INIT_LIST_HEAD(&osc->oo_urgent_exts);
CFS_INIT_LIST_HEAD(&osc->oo_rpc_exts);
CFS_INIT_LIST_HEAD(&osc->oo_reading_exts);
- cfs_atomic_set(&osc->oo_nr_reads, 0);
- cfs_atomic_set(&osc->oo_nr_writes, 0);
+ atomic_set(&osc->oo_nr_reads, 0);
+ atomic_set(&osc->oo_nr_writes, 0);
spin_lock_init(&osc->oo_lock);
spin_lock_init(&osc->oo_tree_lock);
LASSERT(cfs_list_empty(&osc->oo_urgent_exts));
LASSERT(cfs_list_empty(&osc->oo_rpc_exts));
LASSERT(cfs_list_empty(&osc->oo_reading_exts));
- LASSERT(cfs_atomic_read(&osc->oo_nr_reads) == 0);
- LASSERT(cfs_atomic_read(&osc->oo_nr_writes) == 0);
+ LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
+ LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
lu_object_fini(obj);
OBD_SLAB_FREE_PTR(osc, osc_object_kmem);
osc_list(&obj->oo_hp_ready_item),
osc_list(&obj->oo_write_item),
osc_list(&obj->oo_read_item),
- cfs_atomic_read(&obj->oo_nr_reads),
+ atomic_read(&obj->oo_nr_reads),
osc_list(&obj->oo_reading_exts),
- cfs_atomic_read(&obj->oo_nr_writes),
+ atomic_read(&obj->oo_nr_writes),
osc_list(&obj->oo_hp_exts),
osc_list(&obj->oo_urgent_exts));
}
if (cli == NULL)
return 0;
- obd_upages = cfs_atomic_read(&obd_unstable_pages);
- obd_dpages = cfs_atomic_read(&obd_dirty_pages);
+ obd_upages = atomic_read(&obd_unstable_pages);
+ obd_dpages = atomic_read(&obd_dirty_pages);
- osc_upages = cfs_atomic_read(&cli->cl_unstable_count);
+ osc_upages = atomic_read(&cli->cl_unstable_count);
/* obd_max_dirty_pages is the max number of (dirty + unstable)
* pages allowed at any given time. To simulate an unstable page
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = cfs_atomic_read(&cli->cl_lru_in_list);
+ int pages = atomic_read(&cli->cl_lru_in_list);
unsigned long budget;
- budget = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
+ budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
- if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
if (pages >= budget)
return lru_shrink_max;
else if (pages >= budget / 2)
if (npages > 0) {
client_obd_list_lock(&cli->cl_lru_list_lock);
cfs_list_splice_tail(&lru, &cli->cl_lru_list);
- cfs_atomic_sub(npages, &cli->cl_lru_busy);
- cfs_atomic_add(npages, &cli->cl_lru_in_list);
+ atomic_sub(npages, &cli->cl_lru_busy);
+ atomic_add(npages, &cli->cl_lru_in_list);
client_obd_list_unlock(&cli->cl_lru_list_lock);
/* XXX: May set force to be true for better performance */
static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
- LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
+ LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
cfs_list_del_init(&opg->ops_lru);
- cfs_atomic_dec(&cli->cl_lru_in_list);
+ atomic_dec(&cli->cl_lru_in_list);
}
/**
if (!cfs_list_empty(&opg->ops_lru)) {
__osc_lru_del(cli, opg);
} else {
- LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
- cfs_atomic_dec(&cli->cl_lru_busy);
+ LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
+ atomic_dec(&cli->cl_lru_busy);
}
client_obd_list_unlock(&cli->cl_lru_list_lock);
- cfs_atomic_inc(cli->cl_lru_left);
+ atomic_inc(cli->cl_lru_left);
/* this is a great place to release more LRU pages if
* this osc occupies too many LRU pages and kernel is
* stealing one of them. */
client_obd_list_lock(&cli->cl_lru_list_lock);
__osc_lru_del(cli, opg);
client_obd_list_unlock(&cli->cl_lru_list_lock);
- cfs_atomic_inc(&cli->cl_lru_busy);
+ atomic_inc(&cli->cl_lru_busy);
}
}
int rc = 0;
ENTRY;
- LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) >= 0);
- if (cfs_atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
RETURN(0);
if (!force) {
- if (cfs_atomic_read(&cli->cl_lru_shrinkers) > 0)
+ if (atomic_read(&cli->cl_lru_shrinkers) > 0)
RETURN(-EBUSY);
- if (cfs_atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
- cfs_atomic_dec(&cli->cl_lru_shrinkers);
+ if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+ atomic_dec(&cli->cl_lru_shrinkers);
RETURN(-EBUSY);
}
} else {
- cfs_atomic_inc(&cli->cl_lru_shrinkers);
+ atomic_inc(&cli->cl_lru_shrinkers);
}
pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
client_obd_list_lock(&cli->cl_lru_list_lock);
- maxscan = min(target << 1, cfs_atomic_read(&cli->cl_lru_in_list));
+ maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
while (!cfs_list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
bool will_free = false;
cl_object_put(env, clobj);
}
- cfs_atomic_dec(&cli->cl_lru_shrinkers);
+ atomic_dec(&cli->cl_lru_shrinkers);
if (count > 0) {
- cfs_atomic_add(count, cli->cl_lru_left);
+ atomic_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
RETURN(count > 0 ? count : rc);
static inline int max_to_shrink(struct client_obd *cli)
{
- return min(cfs_atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
+ return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
}
int osc_lru_reclaim(struct client_obd *cli)
CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
cli->cl_import->imp_obd->obd_name, cli,
- cfs_atomic_read(&cli->cl_lru_in_list),
- cfs_atomic_read(&cli->cl_lru_busy));
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
cache->ccc_lru_shrinkers++;
cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- max_scans = cfs_atomic_read(&cache->ccc_users);
+ max_scans = atomic_read(&cache->ccc_users);
while (--max_scans > 0 && !cfs_list_empty(&cache->ccc_lru)) {
cli = cfs_list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc);
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
cli->cl_import->imp_obd->obd_name, cli,
- cfs_atomic_read(&cli->cl_lru_in_list),
- cfs_atomic_read(&cli->cl_lru_busy));
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (osc_cache_too_much(cli) > 0) {
goto out;
}
- LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
- while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ LASSERT(atomic_read(cli->cl_lru_left) >= 0);
+ while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
cond_resched();
rc = l_wait_event(osc_lru_waitq,
- cfs_atomic_read(cli->cl_lru_left) > 0,
+ atomic_read(cli->cl_lru_left) > 0,
&lwi);
if (rc < 0)
break;
out:
if (rc >= 0) {
- cfs_atomic_inc(&cli->cl_lru_busy);
+ atomic_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
rc = 0;
}
{
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- cfs_atomic_dec(&cli->cl_destroy_in_flight);
+ atomic_dec(&cli->cl_destroy_in_flight);
wake_up(&cli->cl_destroy_waitq);
return 0;
}
static int osc_can_send_destroy(struct client_obd *cli)
{
- if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
+ if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
cli->cl_max_rpcs_in_flight) {
/* The destroy request can be sent */
return 1;
}
- if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
+ if (atomic_dec_return(&cli->cl_destroy_in_flight) <
cli->cl_max_rpcs_in_flight) {
/*
* The counter has been modified between the two atomic
CERROR("dirty %lu - %lu > dirty_max %lu\n",
cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
- } else if (unlikely(cfs_atomic_read(&obd_unstable_pages) +
- cfs_atomic_read(&obd_dirty_pages) -
- cfs_atomic_read(&obd_dirty_transit_pages) >
+ } else if (unlikely(atomic_read(&obd_unstable_pages) +
+ atomic_read(&obd_dirty_pages) -
+ atomic_read(&obd_dirty_transit_pages) >
(long)(obd_max_dirty_pages + 1))) {
- /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
+ /* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1). */
CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
cli->cl_import->imp_obd->obd_name,
- cfs_atomic_read(&obd_unstable_pages),
- cfs_atomic_read(&obd_dirty_pages),
- cfs_atomic_read(&obd_dirty_transit_pages),
+ atomic_read(&obd_unstable_pages),
+ atomic_read(&obd_dirty_pages),
+ atomic_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
} else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
LASSERT(cli->cl_cache == NULL); /* only once */
cli->cl_cache = (struct cl_client_cache *)val;
- cfs_atomic_inc(&cli->cl_cache->ccc_users);
+ atomic_inc(&cli->cl_cache->ccc_users);
cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
- int nr = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
+ int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
nr = osc_lru_shrink(env, cli, min(nr, target), true);
/* lru cleanup */
if (cli->cl_cache != NULL) {
- LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
+ LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
spin_lock(&cli->cl_cache->ccc_lru_lock);
cfs_list_del_init(&cli->cl_lru_osc);
spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
- cfs_atomic_dec(&cli->cl_cache->ccc_users);
+ atomic_dec(&cli->cl_cache->ccc_users);
cli->cl_cache = NULL;
}
}
static struct thandle *osd_trans_create(const struct lu_env *env,
- struct dt_device *d)
+ struct dt_device *d)
{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_iobuf *iobuf = &oti->oti_iobuf;
- struct osd_thandle *oh;
- struct thandle *th;
- ENTRY;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_iobuf *iobuf = &oti->oti_iobuf;
+ struct osd_thandle *oh;
+ struct thandle *th;
+ ENTRY;
- /* on pending IO in this thread should left from prev. request */
- LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ /* on pending IO in this thread should left from prev. request */
+ LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
th = ERR_PTR(-ENOMEM);
OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
* completed otherwise iobuf may be corrupted by different request
*/
wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ atomic_read(&iobuf->dr_numreqs) == 0);
if (!rc)
rc = iobuf->dr_error;
struct osd_obj_map *od_ost_map;
struct osd_mdobj_map *od_mdt_map;
- unsigned long long od_readcache_max_filesize;
- int od_read_cache;
- int od_writethrough_cache;
+ unsigned long long od_readcache_max_filesize;
+ int od_read_cache;
+ int od_writethrough_cache;
- struct brw_stats od_brw_stats;
- cfs_atomic_t od_r_in_flight;
- cfs_atomic_t od_w_in_flight;
+ struct brw_stats od_brw_stats;
+ atomic_t od_r_in_flight;
+ atomic_t od_w_in_flight;
struct mutex od_otable_mutex;
struct osd_otable_it *od_otable_it;
struct osd_iobuf {
wait_queue_head_t dr_wait;
- cfs_atomic_t dr_numreqs; /* number of reqs being processed */
+ atomic_t dr_numreqs; /* number of reqs being processed */
int dr_max_pages;
int dr_npages;
int dr_error;
LASSERTF(iobuf->dr_elapsed_valid == 0,
"iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
- cfs_atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
+ atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
iobuf->dr_init_at);
LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
init_waitqueue_head(&iobuf->dr_wait);
- cfs_atomic_set(&iobuf->dr_numreqs, 0);
+ atomic_set(&iobuf->dr_numreqs, 0);
iobuf->dr_npages = 0;
iobuf->dr_error = 0;
iobuf->dr_dev = d;
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
- if (unlikely(iobuf == NULL)) {
- CERROR("***** bio->bi_private is NULL! This should never "
- "happen. Normally, I would crash here, but instead I "
- "will dump the bio contents to the console. Please "
- "report this to <http://jira.whamcloud.com/> , along "
- "with any interesting messages leading up to this point "
- "(like SCSI errors, perhaps). Because bi_private is "
- "NULL, I can't wake up the thread that initiated this "
- "IO - you will probably have to reboot this node.\n");
- CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
- "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
- "bi_private: %p\n", bio->bi_next, bio->bi_flags,
- bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
- bio->bi_private);
+ if (unlikely(iobuf == NULL)) {
+ CERROR("***** bio->bi_private is NULL! This should never "
+ "happen. Normally, I would crash here, but instead I "
+ "will dump the bio contents to the console. Please "
+ "report this to <http://jira.whamcloud.com/> , along "
+ "with any interesting messages leading up to this point "
+ "(like SCSI errors, perhaps). Because bi_private is "
+ "NULL, I can't wake up the thread that initiated this "
+ "IO - you will probably have to reboot this node.\n");
+ CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
+ "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
+ "bi_private: %p\n", bio->bi_next, bio->bi_flags,
+ bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
+ bio->bi_end_io, atomic_read(&bio->bi_cnt),
+ bio->bi_private);
return;
- }
+ }
/* the check is outside of the cycle for performance reason -bzzz */
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
SetPageUptodate(bvl->bv_page);
LASSERT(PageLocked(bvl->bv_page));
}
- cfs_atomic_dec(&iobuf->dr_dev->od_r_in_flight);
+ atomic_dec(&iobuf->dr_dev->od_r_in_flight);
} else {
- cfs_atomic_dec(&iobuf->dr_dev->od_w_in_flight);
+ atomic_dec(&iobuf->dr_dev->od_w_in_flight);
}
- /* any real error is good enough -bzzz */
- if (error != 0 && iobuf->dr_error == 0)
- iobuf->dr_error = error;
+ /* any real error is good enough -bzzz */
+ if (error != 0 && iobuf->dr_error == 0)
+ iobuf->dr_error = error;
/*
* set dr_elapsed before dr_numreqs turns to 0, otherwise
* data in this processing and an assertion in a subsequent
* call to OSD.
*/
- if (cfs_atomic_read(&iobuf->dr_numreqs) == 1) {
+ if (atomic_read(&iobuf->dr_numreqs) == 1) {
iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
iobuf->dr_elapsed_valid = 1;
}
- if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+ if (atomic_dec_and_test(&iobuf->dr_numreqs))
wake_up(&iobuf->dr_wait);
- /* Completed bios used to be chained off iobuf->dr_bios and freed in
- * filter_clear_dreq(). It was then possible to exhaust the biovec-256
- * mempool when serious on-disk fragmentation was encountered,
- * deadlocking the OST. The bios are now released as soon as complete
- * so the pool cannot be exhausted while IOs are competing. bug 10076 */
- bio_put(bio);
+ /* Completed bios used to be chained off iobuf->dr_bios and freed in
+ * filter_clear_dreq(). It was then possible to exhaust the biovec-256
+ * mempool when serious on-disk fragmentation was encountered,
+ * deadlocking the OST. The bios are now released as soon as complete
+ * so the pool cannot be exhausted while IOs are competing. bug 10076 */
+ bio_put(bio);
}
static void record_start_io(struct osd_iobuf *iobuf, int size)
{
- struct osd_device *osd = iobuf->dr_dev;
- struct obd_histogram *h = osd->od_brw_stats.hist;
-
- iobuf->dr_frags++;
- cfs_atomic_inc(&iobuf->dr_numreqs);
-
- if (iobuf->dr_rw == 0) {
- cfs_atomic_inc(&osd->od_r_in_flight);
- lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
- cfs_atomic_read(&osd->od_r_in_flight));
- lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
- } else if (iobuf->dr_rw == 1) {
- cfs_atomic_inc(&osd->od_w_in_flight);
- lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
- cfs_atomic_read(&osd->od_w_in_flight));
- lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
- } else {
- LBUG();
- }
+ struct osd_device *osd = iobuf->dr_dev;
+ struct obd_histogram *h = osd->od_brw_stats.hist;
+
+ iobuf->dr_frags++;
+ atomic_inc(&iobuf->dr_numreqs);
+
+ if (iobuf->dr_rw == 0) {
+ atomic_inc(&osd->od_r_in_flight);
+ lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
+ atomic_read(&osd->od_r_in_flight));
+ lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
+ } else if (iobuf->dr_rw == 1) {
+ atomic_inc(&osd->od_w_in_flight);
+ lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
+ atomic_read(&osd->od_w_in_flight));
+ lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
+ } else {
+ LBUG();
+ }
}
static void osd_submit_bio(int rw, struct bio *bio)
rc = 0;
}
- out:
- /* in order to achieve better IO throughput, we don't wait for writes
- * completion here. instead we proceed with transaction commit in
- * parallel and wait for IO completion once transaction is stopped
- * see osd_trans_stop() for more details -bzzz */
- if (iobuf->dr_rw == 0) {
+out:
+ /* in order to achieve better IO throughput, we don't wait for writes
+ * completion here. instead we proceed with transaction commit in
+ * parallel and wait for IO completion once transaction is stopped
+ * see osd_trans_stop() for more details -bzzz */
+ if (iobuf->dr_rw == 0) {
wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
- }
+ atomic_read(&iobuf->dr_numreqs) == 0);
+ }
- if (rc == 0)
- rc = iobuf->dr_error;
- RETURN(rc);
+ if (rc == 0)
+ rc = iobuf->dr_error;
+ RETURN(rc);
}
static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
{
ENTRY;
- if (cfs_atomic_read(&o->od_zerocopy_alloc))
+ if (atomic_read(&o->od_zerocopy_alloc))
CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
- cfs_atomic_read(&o->od_zerocopy_alloc));
- if (cfs_atomic_read(&o->od_zerocopy_loan))
+ atomic_read(&o->od_zerocopy_alloc));
+ if (atomic_read(&o->od_zerocopy_loan))
CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
- cfs_atomic_read(&o->od_zerocopy_loan));
- if (cfs_atomic_read(&o->od_zerocopy_pin))
+ atomic_read(&o->od_zerocopy_loan));
+ if (atomic_read(&o->od_zerocopy_pin))
CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
- cfs_atomic_read(&o->od_zerocopy_pin));
+ atomic_read(&o->od_zerocopy_pin));
if (o->od_objset.os != NULL)
udmu_objset_close(&o->od_objset);
/* used to debug zerocopy logic: the fields track all
* allocated, loaned and referenced buffers in use.
* to be removed once the change is tested well. */
- cfs_atomic_t od_zerocopy_alloc;
- cfs_atomic_t od_zerocopy_loan;
- cfs_atomic_t od_zerocopy_pin;
+ atomic_t od_zerocopy_alloc;
+ atomic_t od_zerocopy_loan;
+ atomic_t od_zerocopy_pin;
arc_prune_t *arc_prune_cb;
};
/* this is anonymous page allocated for copy-write */
lnb[i].page->mapping = NULL;
__free_page(lnb[i].page);
- cfs_atomic_dec(&osd->od_zerocopy_alloc);
+ atomic_dec(&osd->od_zerocopy_alloc);
} else {
/* see comment in osd_bufs_get_read() */
ptr = (unsigned long)lnb[i].dentry;
if (ptr & 1UL) {
ptr &= ~1UL;
dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
- cfs_atomic_dec(&osd->od_zerocopy_pin);
+ atomic_dec(&osd->od_zerocopy_pin);
} else if (lnb[i].dentry != NULL) {
dmu_return_arcbuf((void *)lnb[i].dentry);
- cfs_atomic_dec(&osd->od_zerocopy_loan);
+ atomic_dec(&osd->od_zerocopy_loan);
}
}
lnb[i].page = NULL;
LASSERT(len > 0);
- cfs_atomic_inc(&osd->od_zerocopy_pin);
+ atomic_inc(&osd->od_zerocopy_pin);
bufoff = off - dbp[i]->db_offset;
tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
if (unlikely(abuf == NULL))
GOTO(out_err, rc = -ENOMEM);
- cfs_atomic_inc(&osd->od_zerocopy_loan);
+ atomic_inc(&osd->od_zerocopy_loan);
/* go over pages arcbuf contains, put them as
* local niobufs for ptlrpc's bulks */
LASSERT(lnb[i].page->mapping == NULL);
lnb[i].page->mapping = (void *)obj;
- cfs_atomic_inc(&osd->od_zerocopy_alloc);
+ atomic_inc(&osd->od_zerocopy_alloc);
lprocfs_counter_add(osd->od_stats,
LPROC_OSD_COPY_IO, 1);
/* drop the reference, otherwise osd_put_bufs()
* will be releasing it - bad! */
lnb[i].dentry = NULL;
- cfs_atomic_dec(&osd->od_zerocopy_loan);
+ atomic_dec(&osd->od_zerocopy_loan);
}
if (new_size < lnb[i].lnb_file_offset + lnb[i].len)