*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* lustre/osp/osp_object.c
if (req != NULL)
ptlrpc_req_finished(req);
- osp_update_request_destroy(update);
+ osp_update_request_destroy(env, update);
return rc;
}
struct osp_object *obj = dt2osp_obj(dt);
struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
struct osp_xattr_entry *oxe;
- __u16 namelen = strlen(name);
+ __u16 namelen;
int rc = 0;
LASSERT(buf != NULL);
LASSERT(name != NULL);
+ namelen = strlen(name);
+
/* If only for xattr size, return directly. */
if (unlikely(buf->lb_len == 0))
return 0;
ptlrpc_req_finished(req);
if (update != NULL && !IS_ERR(update))
- osp_update_request_destroy(update);
+ osp_update_request_destroy(env, update);
if (oxe != NULL)
osp_oac_xattr_put(oxe);
return 0;
}
+void osp_obj_invalidate_cache(struct osp_object *obj)
+{
+ struct osp_xattr_entry *oxe;
+ struct osp_xattr_entry *tmp;
+
+ spin_lock(&obj->opo_lock);
+ list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
+ oxe->oxe_ready = 0;
+ list_del_init(&oxe->oxe_list);
+ osp_oac_xattr_put(oxe);
+ }
+ obj->opo_attr.la_valid = 0;
+ spin_unlock(&obj->opo_lock);
+}
+
/**
* Implement OSP layer dt_object_operations::do_invalidate() interface.
*
int osp_invalidate(const struct lu_env *env, struct dt_object *dt)
{
struct osp_object *obj = dt2osp_obj(dt);
- struct osp_xattr_entry *oxe;
- struct osp_xattr_entry *tmp;
ENTRY;
+ osp_obj_invalidate_cache(obj);
+
spin_lock(&obj->opo_lock);
- list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
- oxe->oxe_ready = 0;
- list_del_init(&oxe->oxe_list);
- osp_oac_xattr_put(oxe);
- }
- obj->opo_attr.la_valid = 0;
obj->opo_stale = 1;
spin_unlock(&obj->opo_lock);
struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
struct osp_device *osp = lu2osp_dev(dev);
struct page **pages;
- struct lu_device *top_device;
struct ptlrpc_request *req = NULL;
struct ptlrpc_bulk_desc *desc;
struct idx_info *ii;
/* 1MB bulk */
npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
- npages /= PAGE_CACHE_SIZE;
+ npages /= PAGE_SIZE;
OBD_ALLOC(pages, npages * sizeof(*pages));
if (pages == NULL)
it->ooi_pages = pages;
it->ooi_total_npages = npages;
for (i = 0; i < npages; i++) {
- pages[i] = alloc_page(GFP_IOFS);
+ pages[i] = alloc_page(GFP_NOFS);
if (pages[i] == NULL)
RETURN(-ENOMEM);
}
RETURN(rc);
}
- /* Let's allow this request during recovery, otherwise
- * if the remote target is also in recovery status,
- * it might cause deadlock */
- top_device = dev->ld_site->ls_top_dev;
- if (top_device->ld_obd->obd_recovering)
- req->rq_allow_replay = 1;
-
+ osp_set_req_replay(osp, req);
req->rq_request_portal = OUT_PORTAL;
ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
memset(ii, 0, sizeof(*ii));
for (i = 0; i < npages; i++)
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
GOTO(out, rc = -EPROTO);
npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
- (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT);
+ (PAGE_SHIFT - LU_PAGE_SHIFT);
if (npages > it->ooi_total_npages) {
CERROR("%s: returned more pages than expected, %u > %u\n",
osp->opd_obd->obd_name, npages, it->ooi_total_npages);
spin_lock_init(&po->opo_lock);
o->lo_header->loh_attr |= LOHA_REMOTE;
INIT_LIST_HEAD(&po->opo_xattr_list);
+ INIT_LIST_HEAD(&po->opo_invalidate_cb_list);
if (is_ost_obj(o)) {
po->opo_obj.do_ops = &osp_obj_ops;