4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * cl_object implementation for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <libcfs/libcfs.h>
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
50 /*****************************************************************************
56 int vvp_object_invariant(const struct cl_object *obj)
58 struct inode *inode = vvp_object_inode(obj);
59 struct ll_inode_info *lli = ll_i2info(inode);
61 return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
65 static int vvp_object_print(const struct lu_env *env, void *cookie,
66 lu_printer_t p, const struct lu_object *o)
68 struct vvp_object *obj = lu2vvp(o);
69 struct inode *inode = obj->vob_inode;
70 struct ll_inode_info *lli;
72 (*p)(env, cookie, "(%s %d %d) inode: %p ",
73 list_empty(&obj->vob_pending_list) ? "-" : "+",
74 atomic_read(&obj->vob_transient_pages),
75 atomic_read(&obj->vob_mmap_cnt),
78 lli = ll_i2info(inode);
79 (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
80 inode->i_ino, inode->i_generation, inode->i_mode,
81 inode->i_nlink, atomic_read(&inode->i_count),
82 lli->lli_clob, PFID(&lli->lli_fid));
87 static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
90 struct inode *inode = vvp_object_inode(obj);
93 * lov overwrites most of these fields in
94 * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
95 * attributes are newer.
98 attr->cat_size = i_size_read(inode);
99 attr->cat_mtime = LTIME_S(inode->i_mtime);
100 attr->cat_atime = LTIME_S(inode->i_atime);
101 attr->cat_ctime = LTIME_S(inode->i_ctime);
102 attr->cat_blocks = inode->i_blocks;
103 attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
104 attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
105 /* KMS is not known by this layer */
106 return 0; /* layers below have to fill in the rest */
109 static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
110 const struct cl_attr *attr, unsigned valid)
112 struct inode *inode = vvp_object_inode(obj);
115 inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
117 inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
118 if (valid & CAT_ATIME)
119 LTIME_S(inode->i_atime) = attr->cat_atime;
120 if (valid & CAT_MTIME)
121 LTIME_S(inode->i_mtime) = attr->cat_mtime;
122 if (valid & CAT_CTIME)
123 LTIME_S(inode->i_ctime) = attr->cat_ctime;
124 if (0 && valid & CAT_SIZE)
125 i_size_write(inode, attr->cat_size);
126 /* not currently necessary */
127 if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
128 mark_inode_dirty(inode);
132 static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
133 const struct cl_object_conf *conf)
135 struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
137 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
138 CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
139 PFID(&lli->lli_fid));
141 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
143 /* Clean up page mmap for this inode.
144 * The reason for us to do this is that if the page has
145 * already been installed into memory space, the process
146 * can access it without interacting with lustre, so this
147 * page may be stale due to layout change, and the process
148 * will never be notified.
149 * This operation is expensive but mmap processes have to pay
150 * a price themselves. */
151 unmap_mapping_range(conf->coc_inode->i_mapping,
152 0, OBD_OBJECT_EOF, 0);
157 static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
159 struct inode *inode = vvp_object_inode(obj);
163 rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
165 CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
166 PFID(lu_object_fid(&obj->co_lu)), rc);
170 truncate_inode_pages(inode->i_mapping, 0);
174 static int vvp_object_glimpse(const struct lu_env *env,
175 const struct cl_object *obj, struct ost_lvb *lvb)
177 struct inode *inode = vvp_object_inode(obj);
180 lvb->lvb_mtime = LTIME_S(inode->i_mtime);
181 lvb->lvb_atime = LTIME_S(inode->i_atime);
182 lvb->lvb_ctime = LTIME_S(inode->i_ctime);
185 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
186 * "cp" or "tar" on remote node may think it's a completely sparse file
189 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
190 lvb->lvb_blocks = dirty_cnt(inode);
195 static const struct cl_object_operations vvp_ops = {
196 .coo_page_init = vvp_page_init,
197 .coo_lock_init = vvp_lock_init,
198 .coo_io_init = vvp_io_init,
199 .coo_attr_get = vvp_attr_get,
200 .coo_attr_set = vvp_attr_set,
201 .coo_conf_set = vvp_conf_set,
202 .coo_prune = vvp_prune,
203 .coo_glimpse = vvp_object_glimpse
206 static int vvp_object_init0(const struct lu_env *env,
207 struct vvp_object *vob,
208 const struct cl_object_conf *conf)
210 vob->vob_inode = conf->coc_inode;
211 atomic_set(&vob->vob_transient_pages, 0);
212 cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
216 static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
217 const struct lu_object_conf *conf)
219 struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
220 struct vvp_object *vob = lu2vvp(obj);
221 struct lu_object *below;
222 struct lu_device *under;
225 under = &dev->vdv_next->cd_lu_dev;
226 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
228 const struct cl_object_conf *cconf;
230 cconf = lu2cl_conf(conf);
231 INIT_LIST_HEAD(&vob->vob_pending_list);
232 lu_object_add(obj, below);
233 result = vvp_object_init0(env, vob, cconf);
240 static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
242 struct vvp_object *vob = lu2vvp(obj);
245 lu_object_header_fini(obj->lo_header);
246 OBD_SLAB_FREE_PTR(vob, vvp_object_kmem);
249 static const struct lu_object_operations vvp_lu_obj_ops = {
250 .loo_object_init = vvp_object_init,
251 .loo_object_free = vvp_object_free,
252 .loo_object_print = vvp_object_print,
255 struct vvp_object *cl_inode2vvp(struct inode *inode)
257 struct ll_inode_info *lli = ll_i2info(inode);
258 struct cl_object *obj = lli->lli_clob;
259 struct lu_object *lu;
261 LASSERT(obj != NULL);
262 lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
268 struct lu_object *vvp_object_alloc(const struct lu_env *env,
269 const struct lu_object_header *unused,
270 struct lu_device *dev)
272 struct vvp_object *vob;
273 struct lu_object *obj;
275 OBD_SLAB_ALLOC_PTR_GFP(vob, vvp_object_kmem, GFP_NOFS);
277 struct cl_object_header *hdr;
279 obj = &vob->vob_cl.co_lu;
280 hdr = &vob->vob_header;
281 cl_object_header_init(hdr);
282 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
284 lu_object_init(obj, &hdr->coh_lu, dev);
285 lu_object_add_top(&hdr->coh_lu, obj);
287 vob->vob_cl.co_ops = &vvp_ops;
288 obj->lo_ops = &vvp_lu_obj_ops;