unsigned long fo_read_cache:1, /* read-only cache */
fo_writethrough_cache:1, /* writetrhough cache */
fo_syncjournal:1, /* sync journal on writes */
+ fo_sync_lock_cancel:2, /* sync on lock cancel */
fo_raid_degraded:1; /* RAID device degraded */
struct obd_import *fo_mdc_imp;
#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
#define OSC_DEFAULT_RESENDS 10
+/* possible values for fo_sync_lock_cancel */
+enum {
+ NEVER_SYNC_ON_CANCEL = 0,
+ BLOCKING_SYNC_ON_CANCEL = 1,
+ ALWAYS_SYNC_ON_CANCEL = 2,
+ NUM_SYNC_ON_CANCEL_STATES
+};
+
#define MDC_MAX_RIF_DEFAULT 8
#define MDC_MAX_RIF_MAX 512
struct ptlrpc_service *ost_create_service;
struct ptlrpc_service *ost_io_service;
struct semaphore ost_health_sem;
- int ost_sync_on_lock_cancel;
};
struct echo_client_obd {
#define KEY_MAX_EASIZE "max_ea_size"
#define KEY_FIEMAP "fiemap"
#define KEY_CONNECT_FLAG "connect_flags"
+#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
/* XXX unused */
#define KEY_ASYNC "async"
#define KEY_CAPA_KEY "capa_key"
filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
filter->fo_syncjournal = 1; /* Sync journals on i/o by default b=19128 */
+ filter_slc_set(filter); /* initialize sync on lock cancel */
rc = filter_prep(obd);
if (rc)
RETURN(rc);
}
+ if (KEY_IS(KEY_SYNC_LOCK_CANCEL)) {
+ *((__u32 *) val) = obd->u.filter.fo_sync_lock_cancel;
+ *vallen = sizeof(__u32);
+ RETURN(0);
+ }
+
CDEBUG(D_IOCTL, "invalid key\n");
RETURN(-EINVAL);
}
/* Quota stuff */
extern quota_interface_t *filter_quota_interface_ref;
-
+/* sync on lock cancel is useless when we force a journal flush,
+ * and if we enable async journal commit, we should also turn on
+ * sync on lock cancel if it is not enabled already. */
+static inline void filter_slc_set(struct filter_obd *filter)
+{
+ if (filter->fo_syncjournal == 1)
+ filter->fo_sync_lock_cancel = NEVER_SYNC_ON_CANCEL;
+ else if (filter->fo_sync_lock_cancel == NEVER_SYNC_ON_CANCEL)
+ filter->fo_sync_lock_cancel = ALWAYS_SYNC_ON_CANCEL;
+}
#endif /* _FILTER_INTERNAL_H */
return -EINVAL;
obd->u.filter.fo_syncjournal = !!val;
+ filter_slc_set(&obd->u.filter);
+
+ return count;
+}
+
+static char *sync_on_cancel_states[] = {"never",
+ "blocking",
+ "always" };
+
+int lprocfs_filter_rd_sync_lock_cancel(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct obd_device *obd = data;
+ int rc;
+
+ rc = snprintf(page, count, "%s\n",
+ sync_on_cancel_states[obd->u.filter.fo_sync_lock_cancel]);
+ return rc;
+}
+
+int lprocfs_filter_wr_sync_lock_cancel(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct obd_device *obd = data;
+ int val = -1;
+ int i;
+
+ for (i = 0 ; i < NUM_SYNC_ON_CANCEL_STATES; i++) {
+ if (memcmp(buffer, sync_on_cancel_states[i],
+ strlen(sync_on_cancel_states[i])) == 0) {
+ val = i;
+ break;
+ }
+ }
+ if (val == -1) {
+ int rc;
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+ }
+
+ if (val < 0 || val > 2)
+ return -EINVAL;
+
+ obd->u.filter.fo_sync_lock_cancel = val;
return count;
}
#endif
{ "sync_journal", lprocfs_filter_rd_syncjournal,
lprocfs_filter_wr_syncjournal, 0 },
+ { "sync_on_lock_cancel", lprocfs_filter_rd_sync_lock_cancel,
+ lprocfs_filter_wr_sync_lock_cancel, 0 },
{ "degraded", lprocfs_filter_rd_degraded,
lprocfs_filter_wr_degraded, 0 },
{ 0 }
#include "ost_internal.h"
#ifdef LPROCFS
-static char *sync_on_cancel_states[] = {"never",
- "blocking",
- "always" };
-
-int lprocfs_ost_rd_ost_sync_on_lock_cancel(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct obd_device *obd = data;
- int rc;
-
- rc = snprintf(page, count, "%s\n",
- sync_on_cancel_states[obd->u.ost.ost_sync_on_lock_cancel]);
- return rc;
-}
-
-int lprocfs_ost_wr_ost_sync_on_lock_cancel(struct file *file,
- const char *buffer,
- unsigned long count, void *data)
-{
- struct obd_device *obd = data;
- int val = -1;
- int i;
-
- for (i = 0 ; i < NUM_SYNC_ON_CANCEL_STATES; i++) {
- if (memcmp(buffer, sync_on_cancel_states[i],
- strlen(sync_on_cancel_states[i])) == 0) {
- val = i;
- break;
- }
- }
- if (val == -1) {
- int rc;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
- }
-
- if (val < 0 || val > 2)
- return -EINVAL;
-
- obd->u.ost.ost_sync_on_lock_cancel = val;
- return count;
-}
-
static struct lprocfs_vars lprocfs_ost_obd_vars[] = {
{ "uuid", lprocfs_rd_uuid, 0, 0 },
- { "sync_on_lock_cancel", lprocfs_ost_rd_ost_sync_on_lock_cancel,
- lprocfs_ost_wr_ost_sync_on_lock_cancel, 0 },
{ 0 }
};
struct ldlm_lock_desc *desc,
void *data, int flag)
{
- struct obd_device *obd = lock->l_export->exp_obd;
- if (flag == LDLM_CB_CANCELING &&
+ __u32 sync_lock_cancel = 0;
+ __u32 len = sizeof(sync_lock_cancel);
+ int rc = 0;
+ ENTRY;
+
+ rc = obd_get_info(lock->l_export, sizeof(KEY_SYNC_LOCK_CANCEL),
+ KEY_SYNC_LOCK_CANCEL, &len, &sync_lock_cancel, NULL);
+
+ if (!rc && flag == LDLM_CB_CANCELING &&
(lock->l_granted_mode & (LCK_PW|LCK_GROUP)) &&
- (obd->u.ost.ost_sync_on_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
- (obd->u.ost.ost_sync_on_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
+ (sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
+ (sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
lock->l_flags & LDLM_FL_CBPENDING))) {
struct obd_info *oinfo;
- int rc;
OBD_ALLOC_PTR(oinfo);
if (!oinfo)
sema_init(&ost->ost_health_sem, 1);
- /* Always sync on lock cancel */
- ost->ost_sync_on_lock_cancel = ALWAYS_SYNC_ON_CANCEL;
-
if (oss_num_threads) {
/* If oss_num_threads is set, it is the min and the max. */
if (oss_num_threads > OSS_THREADS_MAX)
memset(lvars, 0, sizeof(*lvars));
}
#endif
-
-enum {
- NEVER_SYNC_ON_CANCEL = 0,
- BLOCKING_SYNC_ON_CANCEL = 1,
- ALWAYS_SYNC_ON_CANCEL = 2,
- NUM_SYNC_ON_CANCEL_STATES
-};
-
#endif /* OST_INTERNAL_H */