*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_OSC
+#include <lustre_osc.h>
-#include "osc_cl_internal.h"
+#include "osc_internal.h"
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
+ struct osc_io *oio = osc_env_io(env);
int result;
opg->ops_from = 0;
- opg->ops_to = PAGE_CACHE_SIZE;
+ opg->ops_to = PAGE_SIZE;
+
+ INIT_LIST_HEAD(&opg->ops_lru);
result = osc_prep_async_page(osc, opg, page->cp_vmpage,
cl_offset(obj, index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, index,
- &osc_page_ops);
- }
- INIT_LIST_HEAD(&opg->ops_lru);
+ if (result != 0)
+ return result;
+
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj, index,
+ &osc_page_ops);
+
/* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0) {
+ if (page->cp_type == CPT_CACHEABLE) {
result = osc_lru_alloc(env, osc_cli(osc), opg);
if (result == 0) {
- spin_lock(&osc->oo_tree_lock);
- result = radix_tree_insert(&osc->oo_tree, index, opg);
- if (result == 0)
- ++osc->oo_npages;
- spin_unlock(&osc->oo_tree_lock);
- LASSERT(result == 0);
+ result = radix_tree_preload(GFP_NOFS);
+ if (result == 0) {
+ spin_lock(&osc->oo_tree_lock);
+ result = radix_tree_insert(&osc->oo_tree,
+ index, opg);
+ if (result == 0)
+ ++osc->oo_npages;
+ spin_unlock(&osc->oo_tree_lock);
+
+ radix_tree_preload_end();
+ }
}
}
else if (pages >= budget / 2)
return lru_shrink_min(cli);
} else {
- int duration = cfs_time_current_sec() - cli->cl_lru_last_used;
+ time64_t duration = ktime_get_real_seconds();
+ long timediff;
/* knock out pages by duration of no IO activity */
- duration >>= 6; /* approximately 1 minute */
- if (duration > 0 && pages >= budget / duration)
+ duration -= cli->cl_lru_last_used;
+ /*
+ * The difference shouldn't be more than 70 years
+ * so we can safely case to a long. Round to
+ * approximately 1 minute.
+ */
+ timediff = (long)(duration >> 6);
+ if (timediff > 0 && pages >= budget / timediff)
return lru_shrink_min(cli);
}
return 0;
list_splice_tail(&lru, &cli->cl_lru_list);
atomic_long_sub(npages, &cli->cl_lru_busy);
atomic_long_add(npages, &cli->cl_lru_in_list);
- cli->cl_lru_last_used = cfs_time_current_sec();
+ cli->cl_lru_last_used = ktime_get_real_seconds();
spin_unlock(&cli->cl_lru_list_lock);
if (waitqueue_active(&osc_lru_waitq))
}
/**
- * Delete page from LRUlist for redirty.
+ * Delete page from LRU list for redirty.
*/
static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
{
/* If page is being transferred for the first time,
* ops_lru should be empty */
- if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+ if (opg->ops_in_lru) {
spin_lock(&cli->cl_lru_list_lock);
- __osc_lru_del(cli, opg);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ atomic_long_inc(&cli->cl_lru_busy);
+ }
spin_unlock(&cli->cl_lru_list_lock);
- atomic_long_inc(&cli->cl_lru_busy);
}
}
* are likely from the same page zone.
*/
static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa,
int factor)
{
- int page_count = desc->bd_iov_count;
+ int page_count;
void *zone = NULL;
int count = 0;
int i;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ if (desc != NULL) {
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ page_count = desc->bd_iov_count;
+ } else {
+ page_count = aa->aa_page_count;
+ }
for (i = 0; i < page_count; i++) {
- void *pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+ void *pz;
+ if (desc)
+ pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+ else
+ pz = page_zone(aa->aa_ppga[i]->pg);
if (likely(pz == zone)) {
++count;
mod_zone_page_state(zone, NR_UNSTABLE_NFS, factor * count);
}
-static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa)
{
- unstable_page_accounting(desc, 1);
+ unstable_page_accounting(desc, aa, 1);
}
-static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa)
{
- unstable_page_accounting(desc, -1);
+ unstable_page_accounting(desc, aa, -1);
}
/**
void osc_dec_unstable_pages(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- int page_count = desc->bd_iov_count;
+ int page_count;
long unstable_count;
+ if (desc)
+ page_count = desc->bd_iov_count;
+ else
+ page_count = aa->aa_page_count;
+
LASSERT(page_count >= 0);
- dec_unstable_page_accounting(desc);
+
+ dec_unstable_page_accounting(desc, aa);
unstable_count = atomic_long_sub_return(page_count,
&cli->cl_unstable_count);
void osc_inc_unstable_pages(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- long page_count = desc->bd_iov_count;
+ long page_count;
/* No unstable page tracking */
if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
return;
- add_unstable_page_accounting(desc);
+ if (desc)
+ page_count = desc->bd_iov_count;
+ else
+ page_count = aa->aa_page_count;
+
+ add_unstable_page_accounting(desc, aa);
atomic_long_add(page_count, &cli->cl_unstable_count);
atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);