cfs_list_t loi_hp_ready_item;
cfs_list_t loi_write_item;
cfs_list_t loi_read_item;
-
+ cfs_list_t loi_sync_fs_item;
unsigned long loi_kms_valid:1;
__u64 loi_kms; /* known minimum size */
struct ost_lvb loi_lvb;
CFS_INIT_LIST_HEAD(&loi->loi_hp_ready_item);
CFS_INIT_LIST_HEAD(&loi->loi_write_item);
CFS_INIT_LIST_HEAD(&loi->loi_read_item);
+ CFS_INIT_LIST_HEAD(&loi->loi_sync_fs_item);
}
struct lov_stripe_md {
typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
+struct osc_sync_fs_wait {
+ struct obd_info *sfw_oi;
+ obd_enqueue_update_f sfw_upcall;
+ int started;
+};
+
/* obd info for a particular level (lov, osc). */
struct obd_info {
/* Lock policy. It keeps an extent which is specific for a particular
cfs_list_t cl_loi_hp_ready_list;
cfs_list_t cl_loi_write_list;
cfs_list_t cl_loi_read_list;
+ cfs_list_t cl_loi_sync_fs_list;
int cl_r_in_flight;
int cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by /proc */
struct lu_client_seq *cl_seq;
cfs_atomic_t cl_resends; /* resend count */
+ struct osc_sync_fs_wait cl_sf_wait;
};
+
#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
char *ostname);
void (*o_getref)(struct obd_device *obd);
void (*o_putref)(struct obd_device *obd);
+ int (*o_sync_fs)(struct obd_device *obd, struct obd_info *oinfo,
+ int wait);
/*
* NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line
* to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c.
RETURN(rc);
}
+int lov_fini_sync_fs_set(struct lov_request_set *set)
+{
+ int rc = 0;
+ ENTRY;
+
+ if (set == NULL)
+ RETURN(rc);
+ lov_put_reqset(set);
+ RETURN(rc);
+}
+
void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
int success)
{
lov_fini_statfs_set(set);
RETURN(rc);
}
+
+int cb_sync_fs_update(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct lov_request *lovreq;
+ ENTRY;
+
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ lov_update_set(lovreq->rq_rqset, lovreq, rc);
+
+ RETURN(rc);
+}
+
+int lov_prep_sync_fs_set(struct obd_device *obd, struct obd_info *oinfo,
+ struct lov_request_set **request)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &obd->u.lov;
+ int rc = 0;
+ int i;
+
+ ENTRY;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ RETURN(ENOMEM);
+ lov_init_set(set);
+ set->set_obd = obd;
+ set->set_oi = oinfo;
+
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ struct lov_request *req;
+
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active ||
+ !lov->lov_tgts[i]->ltd_exp) {
+ CDEBUG(D_INFO, "lov idx %d inactive or disabled\n", i);
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out, rc = ENOMEM);
+
+ req->rq_idx = i;
+ req->rq_oi.oi_cb_up = cb_sync_fs_update;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out, rc = -EIO);
+ *request = set;
+ RETURN(rc);
+out:
+ lov_fini_sync_fs_set(set);
+ RETURN(rc);
+}
return cli->cl_r_in_flight + cli->cl_w_in_flight;
}
+int osc_wake_sync_fs(struct client_obd *cli)
+{
+ int rc = 0;
+ ENTRY;
+ if (cfs_list_empty(&cli->cl_loi_sync_fs_list) &&
+ cli->cl_sf_wait.started) {
+ cli->cl_sf_wait.sfw_upcall(cli->cl_sf_wait.sfw_oi, rc);
+ cli->cl_sf_wait.started = 0;
+ CDEBUG(D_CACHE, "sync_fs_loi list is empty\n");
+ }
+ RETURN(rc);
+}
+
/* caller must hold loi_list_lock */
void osc_wake_cache_waiters(struct client_obd *cli)
{
osc_release_write_grant(cli, &oap->oap_brw_page, sent);
}
+static int lop_makes_syncfs_rpc(struct loi_oap_pages *lop)
+{
+ struct osc_async_page *oap;
+ ENTRY;
+
+ if (cfs_list_empty(&lop->lop_urgent))
+ RETURN(0);
+
+ oap = cfs_list_entry(lop->lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
+
+ if (oap->oap_async_flags & ASYNC_SYNCFS) {
+ CDEBUG(D_CACHE, "syncfs request forcing RPC\n");
+ RETURN(1);
+ }
+
+ RETURN(0);
+}
/* This maintains the lists of pending pages to read/write for a given object
* (lop). This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
} else {
- on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
- on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
- lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
- lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
+ if (lop_makes_syncfs_rpc(&loi->loi_write_lop)) {
+ on_list(&loi->loi_sync_fs_item,
+ &cli->cl_loi_sync_fs_list,
+ loi->loi_write_lop.lop_num_pending);
+ } else {
+ on_list(&loi->loi_hp_ready_item,
+ &cli->cl_loi_hp_ready_list, 0);
+ on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
+ lop_makes_rpc(cli, &loi->loi_write_lop,
+ OBD_BRW_WRITE)||
+ lop_makes_rpc(cli, &loi->loi_read_lop,
+ OBD_BRW_READ));
+ }
}
on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
}
osc_wake_cache_waiters(cli);
+ osc_wake_sync_fs(cli);
osc_check_rpcs(env, cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
if (!async)
* to be canceled, the pages covered by the lock will be sent out
* with ASYNC_HP. We have to send out them as soon as possible. */
cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
- if (oap->oap_async_flags & ASYNC_HP)
+ if (oap->oap_async_flags & ASYNC_HP)
cfs_list_move(&oap->oap_pending_item, &tmp_list);
else
cfs_list_move_tail(&oap->oap_pending_item, &tmp_list);
}
osc_wake_cache_waiters(cli);
-
+ osc_wake_sync_fs(cli);
loi_list_maint(cli, loi);
client_obd_list_unlock(&cli->cl_loi_list_lock);
if (!cfs_list_empty(&cli->cl_loi_ready_list))
RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
struct lov_oinfo, loi_ready_item));
+ if (!cfs_list_empty(&cli->cl_loi_sync_fs_list))
+ RETURN(cfs_list_entry(cli->cl_loi_sync_fs_list.next,
+ struct lov_oinfo, loi_sync_fs_item));
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
cfs_list_del_init(&loi->loi_write_item);
if (!cfs_list_empty(&loi->loi_read_item))
cfs_list_del_init(&loi->loi_read_item);
+ if (!cfs_list_empty(&loi->loi_sync_fs_item))
+ cfs_list_del_init(&loi->loi_sync_fs_item);
loi_list_maint(cli, loi);
if ((oap->oap_async_flags & async_flags) == async_flags)
RETURN(0);
+ /* XXX: This introduces a tiny insignificant race for the case if this
+ * loi already had other urgent items.
+ */
+ if (SETTING(oap->oap_async_flags, async_flags, ASYNC_SYNCFS) &&
+ cfs_list_empty(&oap->oap_rpc_item) &&
+ cfs_list_empty(&oap->oap_urgent_item)) {
+ cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+ flags |= ASYNC_SYNCFS;
+ cfs_spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= flags;
+ cfs_spin_unlock(&oap->oap_lock);
+ loi_list_maint(cli, loi);
+ RETURN(0);
+ }
+
if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
flags |= ASYNC_READY;
if (!cfs_list_empty(&oap->oap_urgent_item)) {
cfs_list_del_init(&oap->oap_urgent_item);
cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
+ oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP |
+ ASYNC_SYNCFS);
cfs_spin_unlock(&oap->oap_lock);
}
if (!cfs_list_empty(&oap->oap_pending_item)) {
return(rc);
}
+static int osc_sync_fs(struct obd_device *obd, struct obd_info *oinfo,
+ int wait)
+{
+ struct client_obd *cli;
+ struct lov_oinfo *loi;
+ struct lov_oinfo *tloi;
+ struct osc_async_page *oap;
+ struct osc_async_page *toap;
+ struct loi_oap_pages *lop;
+ struct lu_env *env;
+ int refcheck;
+ int rc = 0;
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ cli = &obd->u.cli;
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_sf_wait.sfw_oi = oinfo;
+ cli->cl_sf_wait.sfw_upcall = oinfo->oi_cb_up;
+ cli->cl_sf_wait.started = 1;
+ /* creating cl_loi_sync_fs list */
+ cfs_list_for_each_entry_safe(loi, tloi, &cli->cl_loi_write_list,
+ loi_write_item) {
+ lop = &loi->loi_write_lop;
+ cfs_list_for_each_entry_safe(oap, toap, &lop->lop_pending,
+ oap_pending_item)
+ osc_set_async_flags_base(cli, loi, oap, ASYNC_SYNCFS);
+ }
+
+ osc_check_rpcs(env, cli);
+ osc_wake_sync_fs(cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ cl_env_put(env, &refcheck);
+ RETURN(rc);
+}
+
static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
{
return osc_process_config_base(obd, buf);
.o_llog_init = osc_llog_init,
.o_llog_finish = osc_llog_finish,
.o_process_config = osc_process_config,
+ .o_sync_fs = osc_sync_fs,
};
extern struct lu_kmem_descr osc_caches[];