const struct osc_page *opg,
enum cl_lock_mode mode, int pending, int unref)
{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- ldlm_policy_data_t *policy;
- ldlm_mode_t dlmmode;
- int flags;
-
- cfs_might_sleep();
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
+ struct cl_page *page;
+ struct osc_object *obj;
+ struct osc_thread_info *info;
+ struct ldlm_res_id *resname;
+ struct lustre_handle *lockh;
+ ldlm_policy_data_t *policy;
+ ldlm_mode_t dlmmode;
+ __u64 flags;
+
+ might_sleep();
+
+ info = osc_env_info(env);
+ resname = &info->oti_resname;
+ policy = &info->oti_policy;
+ lockh = &info->oti_handle;
+ page = opg->ops_cl.cpl_page;
+ obj = cl2osc(opg->ops_cl.cpl_obj);
flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
if (pending)
spin_lock(&obj->oo_seatbelt);
cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = cfs_current();
+ opg->ops_submitter = current;
spin_unlock(&obj->oo_seatbelt);
}
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
int result;
opg->ops_from = 0;
- opg->ops_to = CFS_PAGE_SIZE;
+ opg->ops_to = PAGE_CACHE_SIZE;
result = osc_prep_async_page(osc, opg, vmpage,
cl_offset(obj, page->cp_index));
return result;
}
+int osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long obd_upages, obd_dpages, osc_upages;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (cli == NULL)
+ return 0;
+
+ obd_upages = cfs_atomic_read(&obd_unstable_pages);
+ obd_dpages = cfs_atomic_read(&obd_dirty_pages);
+
+ osc_upages = cfs_atomic_read(&cli->cl_unstable_count);
+
+ /* obd_max_dirty_pages is the max number of (dirty + unstable)
+ * pages allowed at any given time. To simulate an unstable page
+ * only limit, we subtract the current number of dirty pages
+ * from this max. This difference is roughly the amount of pages
+ * currently available for unstable pages. Thus, the soft limit
+ * is half of that difference. Check osc_upages to ensure we don't
+ * set SOFT_SYNC for OSCs without any outstanding unstable pages. */
+ return osc_upages != 0 &&
+ obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
+}
+
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
+ if (osc_over_unstable_soft_limit(oap->oap_cli))
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+
if (!client_is_remote(osc_export(obj)) &&
cfs_capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
static cfs_atomic_t osc_lru_waiters = CFS_ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
-static const int lru_shrink_min = 2 << (20 - CFS_PAGE_SHIFT); /* 2M */
+static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finsih. */
-static const int lru_shrink_max = 32 << (20 - CFS_PAGE_SHIFT); /* 32M */
+static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
if (wakeup) {
osc_lru_shrink(cli, osc_cache_too_much(cli));
- cfs_waitq_broadcast(&osc_lru_waitq);
+ wake_up_all(&osc_lru_waitq);
}
}
* cl_lru_shrinkers is to avoid recursive call in case
* we're already in the context of osc_lru_shrink(). */
if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 &&
- !cfs_memory_pressure_get())
+ !memory_pressure_get())
osc_lru_shrink(cli, osc_cache_too_much(cli));
- cfs_waitq_signal(&osc_lru_waitq);
+ wake_up(&osc_lru_waitq);
}
} else {
LASSERT(cfs_list_empty(&opg->ops_lru));
if (rc > 0)
continue;
- cfs_cond_resched();
+ cond_resched();
/* slowest case, all of caching pages are busy, notifying
* other OSCs that we're lack of LRU slots. */