4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * cl_object implementation for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
36 #define DEBUG_SUBSYSTEM S_LLITE
38 #include <linux/user_namespace.h>
39 #include <linux/uidgid.h>
41 #include <libcfs/libcfs.h>
44 #include "llite_internal.h"
45 #include "vvp_internal.h"
47 /*****************************************************************************
53 int vvp_object_invariant(const struct cl_object *obj)
55 struct inode *inode = vvp_object_inode(obj);
56 struct ll_inode_info *lli = ll_i2info(inode);
58 return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
62 static int vvp_object_print(const struct lu_env *env, void *cookie,
63 lu_printer_t p, const struct lu_object *o)
65 struct vvp_object *obj = lu2vvp(o);
66 struct inode *inode = obj->vob_inode;
67 struct ll_inode_info *lli;
69 (*p)(env, cookie, "(%d) inode: %p ",
70 atomic_read(&obj->vob_mmap_cnt),
73 lli = ll_i2info(inode);
74 (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
75 inode->i_ino, inode->i_generation, inode->i_mode,
76 inode->i_nlink, atomic_read(&inode->i_count),
77 lli->lli_clob, PFID(&lli->lli_fid));
82 static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
85 struct inode *inode = vvp_object_inode(obj);
88 * lov overwrites most of these fields in
89 * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
90 * attributes are newer.
93 attr->cat_size = i_size_read(inode);
94 attr->cat_mtime = inode->i_mtime.tv_sec;
95 attr->cat_atime = inode->i_atime.tv_sec;
96 attr->cat_ctime = inode->i_ctime.tv_sec;
97 attr->cat_blocks = inode->i_blocks;
98 attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
99 attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
100 attr->cat_projid = ll_i2info(inode)->lli_projid;
101 /* KMS is not known by this layer */
102 return 0; /* layers below have to fill in the rest */
105 static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj,
106 const struct cl_attr *attr, unsigned valid)
108 struct inode *inode = vvp_object_inode(obj);
111 inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
113 inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
114 if (valid & CAT_ATIME)
115 inode->i_atime.tv_sec = attr->cat_atime;
116 if (valid & CAT_MTIME)
117 inode->i_mtime.tv_sec = attr->cat_mtime;
118 if (valid & CAT_CTIME)
119 inode->i_ctime.tv_sec = attr->cat_ctime;
120 if (0 && valid & CAT_SIZE)
121 i_size_write(inode, attr->cat_size);
122 if (valid & CAT_PROJID)
123 ll_i2info(inode)->lli_projid = attr->cat_projid;
124 /* not currently necessary */
125 if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE|CAT_PROJID))
126 mark_inode_dirty(inode);
130 static void vvp_dirty_for_sync(const struct lu_env *env, struct cl_object *obj)
132 struct inode *inode = vvp_object_inode(obj);
134 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
137 static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
138 const struct cl_object_conf *conf)
140 struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
142 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
143 CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
144 PFID(&lli->lli_fid));
146 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
148 /* Clean up page mmap for this inode.
149 * The reason for us to do this is that if the page has
150 * already been installed into memory space, the process
151 * can access it without interacting with lustre, so this
152 * page may be stale due to layout change, and the process
153 * will never be notified.
154 * This operation is expensive but mmap processes have to pay
155 * a price themselves. */
156 unmap_mapping_range(conf->coc_inode->i_mapping,
157 0, OBD_OBJECT_EOF, 0);
158 pcc_layout_invalidate(conf->coc_inode);
163 static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
165 struct inode *inode = vvp_object_inode(obj);
169 rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
171 CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
172 PFID(lu_object_fid(&obj->co_lu)), rc);
176 if (ll_get_inode_lock_owner(inode) != current)
177 /* ask LOV get inode lock then lo_type_guard */
180 LASSERTF(inode_is_locked(inode), DFID ":inode %p lli_flags %#lx\n",
181 PFID(lu_object_fid(&obj->co_lu)), inode,
182 ll_i2info(inode)->lli_flags);
184 ll_truncate_inode_pages_final(inode);
185 mapping_clear_exiting(inode->i_mapping);
190 static int vvp_object_glimpse(const struct lu_env *env,
191 const struct cl_object *obj, struct ost_lvb *lvb)
193 struct inode *inode = vvp_object_inode(obj);
196 lvb->lvb_mtime = inode->i_mtime.tv_sec;
197 lvb->lvb_atime = inode->i_atime.tv_sec;
198 lvb->lvb_ctime = inode->i_ctime.tv_sec;
201 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
202 * "cp" or "tar" on remote node may think it's a completely sparse file
205 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
206 lvb->lvb_blocks = dirty_cnt(inode);
211 static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
212 struct cl_req_attr *attr)
216 struct ll_inode_info *lli;
217 u64 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLUID | OBD_MD_FLGID |
221 inode = vvp_object_inode(obj);
222 lli = ll_i2info(inode);
224 if (attr->cra_type == CRT_WRITE) {
225 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
226 obdo_set_o_projid(oa, ll_i2info(inode)->lli_projid);
227 } else if (attr->cra_type == CRT_READ) {
228 valid_flags |= OBD_MD_FLATIME;
230 obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
231 obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
232 if (CFS_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
235 attr->cra_uid = lli->lli_uid;
236 attr->cra_gid = lli->lli_gid;
238 memcpy(attr->cra_jobid, &lli->lli_jobid, sizeof(attr->cra_jobid));
241 static int vvp_inode_ops(const struct lu_env *env, struct cl_object *obj,
242 enum coo_inode_opc opc, void *data)
244 struct inode *inode = vvp_object_inode(obj);
245 struct ll_inode_info *lli = ll_i2info(inode);
250 case COIO_INODE_LOCK:
251 if (ll_get_inode_lock_owner(inode) != current)
252 ll_inode_lock(inode);
256 case COIO_INODE_UNLOCK:
257 if (ll_get_inode_lock_owner(inode) == current)
258 ll_inode_unlock(inode);
263 if (lli->lli_size_lock_owner != current)
264 ll_inode_size_lock(inode);
268 case COIO_SIZE_UNLOCK:
269 if (lli->lli_size_lock_owner == current)
270 ll_inode_size_unlock(inode);
282 static const struct cl_object_operations vvp_ops = {
283 .coo_page_init = vvp_page_init,
284 .coo_io_init = vvp_io_init,
285 .coo_attr_get = vvp_attr_get,
286 .coo_attr_update = vvp_attr_update,
287 .coo_dirty_for_sync = vvp_dirty_for_sync,
288 .coo_conf_set = vvp_conf_set,
289 .coo_prune = vvp_prune,
290 .coo_glimpse = vvp_object_glimpse,
291 .coo_req_attr_set = vvp_req_attr_set,
292 .coo_inode_ops = vvp_inode_ops,
295 static int vvp_object_init0(const struct lu_env *env,
296 struct vvp_object *vob,
297 const struct cl_object_conf *conf)
299 vob->vob_inode = conf->coc_inode;
300 cl_object_page_init(&vob->vob_cl, sizeof(struct cl_page_slice));
304 static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
305 const struct lu_object_conf *conf)
307 struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
308 struct vvp_object *vob = lu2vvp(obj);
309 struct lu_object *below;
310 struct lu_device *under;
313 under = &dev->vdv_next->cd_lu_dev;
314 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
316 const struct cl_object_conf *cconf;
318 cconf = lu2cl_conf(conf);
319 lu_object_add(obj, below);
320 result = vvp_object_init0(env, vob, cconf);
327 static void vvp_object_free_rcu(struct rcu_head *head)
329 struct vvp_object *vob = container_of(head, struct vvp_object,
330 vob_header.coh_lu.loh_rcu);
332 kmem_cache_free(vvp_object_kmem, vob);
335 static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
337 struct vvp_object *vob = lu2vvp(obj);
340 lu_object_header_fini(obj->lo_header);
341 OBD_FREE_PRE(vob, sizeof(*vob), "slab-freed");
342 call_rcu(&vob->vob_header.coh_lu.loh_rcu, vvp_object_free_rcu);
345 static const struct lu_object_operations vvp_lu_obj_ops = {
346 .loo_object_init = vvp_object_init,
347 .loo_object_free = vvp_object_free,
348 .loo_object_print = vvp_object_print,
351 struct vvp_object *cl_inode2vvp(struct inode *inode)
353 struct ll_inode_info *lli = ll_i2info(inode);
354 struct cl_object *obj = lli->lli_clob;
355 struct lu_object *lu;
357 LASSERT(obj != NULL);
358 lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
364 struct lu_object *vvp_object_alloc(const struct lu_env *env,
365 const struct lu_object_header *unused,
366 struct lu_device *dev)
368 struct vvp_object *vob;
369 struct lu_object *obj;
371 OBD_SLAB_ALLOC_PTR_GFP(vob, vvp_object_kmem, GFP_NOFS);
373 struct cl_object_header *hdr;
375 obj = &vob->vob_cl.co_lu;
376 hdr = &vob->vob_header;
377 cl_object_header_init(hdr);
378 hdr->coh_page_bufsize = round_up(sizeof(struct cl_page), 8);
380 lu_object_init(obj, &hdr->coh_lu, dev);
381 lu_object_add_top(&hdr->coh_lu, obj);
383 vob->vob_cl.co_ops = &vvp_ops;
384 obj->lo_ops = &vvp_lu_obj_ops;