*/
pgoff_t oti_next_index;
pgoff_t oti_fn_index; /* first non-overlapped index */
+ pgoff_t oti_ng_index; /* negative lock caching */
struct cl_sync_io oti_anchor;
struct cl_req_attr oti_req_attr;
struct lu_buf oti_ladvise_buf;
if (enqflags & CEF_NONBLOCK)
result |= LDLM_FL_BLOCK_NOWAIT;
if (enqflags & CEF_GLIMPSE)
- result |= LDLM_FL_HAS_INTENT;
+ result |= LDLM_FL_HAS_INTENT|LDLM_FL_CBPENDING;
if (enqflags & CEF_DISCARD_DATA)
result |= LDLM_FL_AST_DISCARD_DATA;
if (enqflags & CEF_PEEK)
* Return the lock even if it is being canceled.
*/
OSC_DAP_FL_CANCELING = BIT(1),
+ /**
+ * check ast data is present, requested to cancel cb
+ */
+ OSC_DAP_FL_AST = BIT(2),
+ /**
+ * look at right region for the desired lock
+ */
+ OSC_DAP_FL_RIGHT = BIT(3),
};
/*
* An offset within page from which next transfer starts. This is used
* by cl_page_clip() to submit partial page transfers.
*/
- int ops_from;
+ unsigned int ops_from:PAGE_SHIFT,
/**
- * An offset within page at which next transfer ends.
+ * An offset within page at which next transfer ends(inclusive).
*
* \see osc_page::ops_from.
*/
- int ops_to;
+ ops_to:PAGE_SHIFT,
/**
* Boolean, true iff page is under transfer. Used for sanity checking.
*/
- unsigned ops_transfer_pinned:1,
+ ops_transfer_pinned:1,
/**
* in LRU?
*/
void osc_io_fsync_end(const struct lu_env *env,
const struct cl_io_slice *slice);
void osc_read_ahead_release(const struct lu_env *env, void *cbdata);
+int osc_io_lseek_start(const struct lu_env *env,
+ const struct cl_io_slice *slice);
+void osc_io_lseek_end(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/* osc_lock.c */
void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols,