removed cwd "./" (refer to Bugzilla 14399).
* File join has been disabled in this release, refer to Bugzilla 16929.
+Severity : enhancement
+Bugzilla : 18688
+Description: Allow tuning service thread via /proc
+Details : For each service a new
+ /proc/fs/lustre/{service}/*/thread_{min,max,started} entry is
+ created that can be used to set min/max thread counts, and get the
+ current number of running threads.
+
Severity : normal
Bugzilla : 18382
Descriptoin: don't return error if have particaly created objects for file.
Bugzilla : 19293
Description: move AT tunable parameters for more consistent usage
Details : add AT tunables under /proc/sys/lustre, add to conf_param parsing
-
+
Severity : enhancement
Bugzilla : 17974
Description: add lazystatfs mount option to allow statfs(2) to skip down OSTs
* considered full when less than ?_MAXREQSIZE is left in them.
*/
-#define LDLM_THREADS_AUTO_MIN \
- min((int)(num_online_cpus() * num_online_cpus() * 2), 8)
-#define LDLM_THREADS_AUTO_MAX (LDLM_THREADS_AUTO_MIN * 16)
+#define LDLM_THREADS_AUTO_MIN (2)
+#define LDLM_THREADS_AUTO_MAX min(num_online_cpus()*num_online_cpus()*32, 128)
#define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
#define LDLM_NBUFS (64 * num_online_cpus())
#define LDLM_BUFSIZE (8 * 1024)
* except in the open case where there are a large number of OSTs in a LOV.
*/
#define MDS_MAXREQSIZE (5 * 1024)
-#define MDS_MAXREPSIZE max(9 * 1024, 280 + LOV_MAX_STRIPE_COUNT * 56)
+#define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
/* FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */
#define FLD_MAXREQSIZE (160)
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
timeout_cb_t cb, void *data,
struct list_head *obd_list);
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
return count;
}
+static int
+ptlrpc_lprocfs_rd_threads_min(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ return snprintf(page, count, "%d\n", svc->srv_threads_min);
+}
+
+static int
+ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val < 2)
+ return -ERANGE;
+
+ if (val > svc->srv_threads_max)
+ return -ERANGE;
+
+ spin_lock(&svc->srv_lock);
+ svc->srv_threads_min = val;
+ spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+
+static int
+ptlrpc_lprocfs_rd_threads_started(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ return snprintf(page, count, "%d\n", svc->srv_threads_started);
+}
+
+static int
+ptlrpc_lprocfs_rd_threads_max(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ return snprintf(page, count, "%d\n", svc->srv_threads_max);
+}
+
+static int
+ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val < 2)
+ return -ERANGE;
+
+ if (val < svc->srv_threads_min)
+ return -ERANGE;
+
+ spin_lock(&svc->srv_lock);
+ svc->srv_threads_max = val;
+ spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+
struct ptlrpc_srh_iterator {
__u64 srhi_seq;
struct ptlrpc_request *srhi_req;
struct ptlrpc_service *svc)
{
struct lprocfs_vars lproc_vars[] = {
+ {.name = "high_priority_ratio",
+ .read_fptr = ptlrpc_lprocfs_rd_hp_ratio,
+ .write_fptr = ptlrpc_lprocfs_wr_hp_ratio,
+ .data = svc},
{.name = "req_buffer_history_len",
- .write_fptr = NULL,
.read_fptr = ptlrpc_lprocfs_read_req_history_len,
.data = svc},
{.name = "req_buffer_history_max",
.write_fptr = ptlrpc_lprocfs_write_req_history_max,
.read_fptr = ptlrpc_lprocfs_read_req_history_max,
.data = svc},
+ {.name = "threads_min",
+ .read_fptr = ptlrpc_lprocfs_rd_threads_min,
+ .write_fptr = ptlrpc_lprocfs_wr_threads_min,
+ .data = svc},
+ {.name = "threads_max",
+ .read_fptr = ptlrpc_lprocfs_rd_threads_max,
+ .write_fptr = ptlrpc_lprocfs_wr_threads_max,
+ .data = svc},
+ {.name = "threads_started",
+ .read_fptr = ptlrpc_lprocfs_rd_threads_started,
+ .data = svc},
{.name = "timeouts",
.read_fptr = ptlrpc_lprocfs_rd_timeouts,
.data = svc},
- {.name = "high_priority_ratio",
- .read_fptr = ptlrpc_lprocfs_rd_hp_ratio,
- .write_fptr = ptlrpc_lprocfs_wr_hp_ratio,
- .data = svc},
{NULL}
};
static struct file_operations req_history_fops = {
LLOG_LCM_FL_EXIT = 1 << 1
};
-static void llcd_print(struct llog_canceld_ctxt *llcd,
- const char *func, int line)
+static void llcd_print(struct llog_canceld_ctxt *llcd,
+ const char *func, int line)
{
CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line);
CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size);
atomic_dec(&lcm->lcm_count);
spin_unlock(&lcm->lcm_lock);
- CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
+ CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
llcd, lcm, atomic_read(&lcm->lcm_count));
}
LASSERT(atomic_read(&llcd_count) > 0);
atomic_dec(&llcd_count);
- size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
+ size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
llcd->llcd_size;
OBD_SLAB_FREE(llcd, llcd_cache, size);
}
* Checks if passed cookie fits into llcd free space buffer. Returns
* 1 if yes and 0 otherwise.
*/
-static inline int
+static inline int
llcd_fit(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
{
return (llcd->llcd_size - llcd->llcd_cookiebytes >= sizeof(*cookies));
/**
* Copy passed @cookies to @llcd.
*/
-static inline void
+static inline void
llcd_copy(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
{
LASSERT(llcd_fit(llcd, cookies));
- memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
+ memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
cookies, sizeof(*cookies));
llcd->llcd_cookiebytes += sizeof(*cookies);
}
if (!llcd)
return NULL;
- CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n",
+ CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n",
llcd, ctxt);
ctxt->loc_llcd = NULL;
struct llog_canceld_ctxt *llcd;
struct list_head *tmp;
- CERROR("Busy llcds found (%d) on lcm %p\n",
+ CERROR("Busy llcds found (%d) on lcm %p\n",
atomic_read(&lcm->lcm_count) == 0, lcm);
spin_lock(&lcm->lcm_lock);
llcd_print(llcd, __FUNCTION__, __LINE__);
}
spin_unlock(&lcm->lcm_lock);
-
+
/*
* No point to go further with busy llcds at this point
* as this is clear bug. It might mean we got hanging
int rc = 0;
ENTRY;
- /*
- * Flush any remaining llcd.
+ /*
+ * Flush any remaining llcd.
*/
mutex_down(&ctxt->loc_sem);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
llcd_put(ctxt);
mutex_up(&ctxt->loc_sem);
} else {
- /*
+ /*
* This is either llog_sync() from generic llog code or sync
* on client disconnect. In either way let's do it and send
- * llcds to the target with waiting for completion.
+ * llcds to the target with waiting for completion.
*/
CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
mutex_up(&ctxt->loc_sem);