1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
7 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LLITE
26 #include <linux/lustre_dlm.h>
27 #include <linux/lustre_lite.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #include <linux/lustre_acl.h>
31 #include <linux/lustre_sec.h>
32 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
33 #include <linux/lustre_compat25.h>
35 #include <linux/obd_lov.h>
36 #include <linux/lustre_audit.h>
37 #include <linux/lustre_net.h>
38 #include "llite_internal.h"
40 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
41 __u64 lov_merge_blocks(struct lov_stripe_md *lsm);
42 __u64 lov_merge_mtime(struct lov_stripe_md *lsm, __u64 current_time);
44 int ll_validate_size(struct inode *inode, __u64 *size, __u64 *blocks,
47 ldlm_policy_data_t extent = { .l_extent = { 0, OBD_OBJECT_EOF } };
48 struct obd_export *exp = ll_i2sbi(inode)->ll_dt_exp;
49 struct ll_inode_info *lli = ll_i2info(inode);
50 struct lustre_handle match_lockh = {0};
54 if (lli->lli_smd == NULL)
57 LASSERT(size != NULL && blocks != NULL);
59 flags = LDLM_FL_TEST_LOCK | LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
60 rc = obd_match(exp, lli->lli_smd, LDLM_EXTENT, &extent,
61 LCK_PR | LCK_PW, &flags, inode, &match_lockh);
63 /* we have no all needed locks,
64 * so we don't know actual size */
68 /* we know actual size! */
69 down(&lli->lli_size_sem);
70 *size = lov_merge_size(lli->lli_smd, 0);
71 *blocks = lov_merge_blocks(lli->lli_smd);
72 *mtime = lov_merge_mtime(lli->lli_smd, LTIME_S(inode->i_mtime));
74 up(&lli->lli_size_sem);
80 int ll_md_och_close(struct obd_export *md_exp, struct inode *inode,
81 struct obd_client_handle *och, int dirty,
84 struct ptlrpc_request *req = NULL;
85 struct mdc_op_data *op_data;
86 struct obd_device *obd;
91 obd = class_exp2obd(md_exp);
93 CERROR("Invalid MDC connection handle "LPX64"\n",
94 md_exp->exp_handle.h_cookie);
98 /* here we check if this is forced umount. If so this is called on
99 * canceling "open lock" and we do not call md_close() in this case , as
100 * it will not successful, as import is already deactivated. */
101 if (obd->obd_no_recov)
104 /* prepare @op_data for close request */
105 OBD_ALLOC(op_data, sizeof(*op_data));
109 memset(op_data, 0, sizeof(*op_data));
111 valid = (OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
112 OBD_MD_FLTYPE | OBD_MD_FLMODE | OBD_MD_FLEPOCH |
115 ll_inode2mdc_data(op_data, inode, valid);
117 if (0 /* ll_is_inode_dirty(inode) */) {
118 op_data->flags = MDS_BFLAG_UNCOMMITTED_WRITES;
119 op_data->valid |= OBD_MD_FLFLAGS;
123 /* we modified data through this handle */
124 op_data->io_epoch = epoch;
125 op_data->flags |= MDS_BFLAG_DIRTY_EPOCH;
126 op_data->valid |= OBD_MD_FLFLAGS | OBD_MD_FLEPOCH;
127 if (ll_validate_size(inode, &op_data->size, &op_data->blocks,
129 op_data->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLMTIME;
132 rc = md_close(md_exp, op_data, och, &req);
133 OBD_FREE(op_data, sizeof(*op_data));
136 /* we are the last writer, so the MDS has instructed us to get
137 * the file size and any write cookies, then close again. */
139 //ll_queue_done_writing(inode);
142 CERROR("inode %lu mdc close failed: rc = %d\n",
143 (unsigned long)inode->i_ino, rc);
146 ptlrpc_req_finished(req);
149 mdc_clear_open_replay_data(md_exp, och);
150 och->och_fh.cookie = DEAD_HANDLE_MAGIC;
151 OBD_FREE(och, sizeof *och);
155 /* must be called under lli_och_sem */
156 void ll_drop_needless_capa(struct inode *inode)
158 struct ll_inode_info *lli = ll_i2info(inode);
159 struct obd_capa *ocapa, *tmp;
161 /* drop capa's we don't need */
162 spin_lock(&lli->lli_lock);
163 list_for_each_entry_safe(ocapa, tmp, &lli->lli_capas, u.client.lli_list) {
164 struct lustre_capa *capa = &ocapa->c_capa;
165 if (capa->lc_op == CAPA_READ) {
166 /* we need CAPA_READ only for read-only handles */
167 if (lli->lli_mds_read_och == NULL) {
168 //DEBUG_CAPA(D_ERROR, capa, "drop read capa");
171 } else if (ocapa->c_capa.lc_op == CAPA_WRITE) {
172 if (lli->lli_mds_write_och == NULL) {
173 //DEBUG_CAPA(D_ERROR, capa, "drop write capa");
178 spin_unlock(&lli->lli_lock);
181 int ll_md_real_close(struct obd_export *md_exp,
182 struct inode *inode, int flags)
184 struct ll_inode_info *lli = ll_i2info(inode);
185 int freeing = inode->i_state & I_FREEING;
186 struct obd_client_handle **och_p;
187 struct obd_client_handle *och;
188 __u64 *och_usecount, epoch = 0;
189 int rc = 0, dirty = 0;
192 if (flags & FMODE_WRITE) {
193 och_p = &lli->lli_mds_write_och;
194 och_usecount = &lli->lli_open_fd_write_count;
195 } else if (flags & FMODE_EXEC) {
196 och_p = &lli->lli_mds_exec_och;
197 och_usecount = &lli->lli_open_fd_exec_count;
199 och_p = &lli->lli_mds_read_och;
200 och_usecount = &lli->lli_open_fd_read_count;
203 down(&lli->lli_och_sem);
204 if (*och_usecount) { /* There are still users of this handle, so
206 up(&lli->lli_och_sem);
209 if (ll_is_inode_dirty(inode)) {
210 /* the inode still has dirty pages, let's close later */
211 CDEBUG(D_INODE, "inode %lu/%u still has dirty pages\n",
212 inode->i_ino, inode->i_generation);
213 LASSERT(freeing == 0);
214 ll_queue_done_writing(inode);
215 up(&lli->lli_och_sem);
219 if (LLI_DIRTY_HANDLE(inode) && (flags & FMODE_WRITE)) {
220 clear_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
222 } else if (0 && !(flags & FMODE_SYNC) && !freeing) {
223 /* in order to speed up creation rate we pass
224 * closing to dedicated thread so we don't need
225 * to wait for close reply here -bzzz */
226 ll_queue_done_writing(inode);
227 up(&lli->lli_och_sem);
233 if (flags & FMODE_WRITE) {
234 epoch = lli->lli_io_epoch;
235 lli->lli_io_epoch = 0;
238 ll_drop_needless_capa(inode);
240 up(&lli->lli_och_sem);
243 * there might be a race and somebody have freed this och
244 * already. Another way to have this twice called is if file closing
245 * will fail due to netwok problems and on umount lock will be canceled
246 * and this will be called from block_ast callack.
248 if (och && och->och_fh.cookie != DEAD_HANDLE_MAGIC)
249 rc = ll_md_och_close(md_exp, inode, och, dirty, epoch);
254 int ll_md_close(struct obd_export *md_exp, struct inode *inode,
257 struct ll_file_data *fd = file->private_data;
258 struct ll_inode_info *lli = ll_i2info(inode);
262 /* clear group lock, if present */
263 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
264 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
265 fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
266 rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
270 /* Let's see if we have good enough OPEN lock on the file and if
271 we can skip talking to MDS */
272 if (file->f_dentry->d_inode) {
274 struct obd_device *obddev;
275 struct lustre_handle lockh;
276 int flags = LDLM_FL_BLOCK_GRANTED;
277 struct ldlm_res_id file_res_id = {.name = {id_fid(&lli->lli_id),
278 id_group(&lli->lli_id)}};
279 ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
281 down(&lli->lli_och_sem);
282 if (fd->fd_omode & FMODE_WRITE) {
284 LASSERT(lli->lli_open_fd_write_count);
285 lli->lli_open_fd_write_count--;
286 } else if (fd->fd_omode & FMODE_EXEC) {
288 LASSERT(lli->lli_open_fd_exec_count);
289 lli->lli_open_fd_exec_count--;
292 LASSERT(lli->lli_open_fd_read_count);
293 lli->lli_open_fd_read_count--;
295 up(&lli->lli_och_sem);
297 obddev = md_get_real_obd(md_exp, &lli->lli_id);
298 if (!ldlm_lock_match(obddev->obd_namespace, flags, &file_res_id,
299 LDLM_IBITS, &policy, lockmode, &lockh))
301 rc = ll_md_real_close(md_exp, file->f_dentry->d_inode,
304 ldlm_lock_decref(&lockh, lockmode);
308 file->private_data = NULL;
309 OBD_SLAB_FREE(fd, ll_file_data_slab, sizeof(*fd));
313 /* While this returns an error code, fput() the caller does not, so we need
314 * to make every effort to clean up all of our state here. Also, applications
315 * rarely check close errors and even if an error is returned they will not
316 * re-try the close call.
318 int ll_file_release(struct inode *inode, struct file *file)
320 struct ll_file_data *fd;
321 struct ll_sb_info *sbi = ll_i2sbi(inode);
325 CDEBUG(D_VFSTRACE, "VFS Op:inode="DLID4"(%p)\n",
326 OLID4(&ll_i2info(inode)->lli_id), inode);
328 /* don't do anything for / */
329 if (inode->i_sb->s_root == file->f_dentry)
332 lprocfs_counter_incr(sbi->ll_stats, LPROC_LL_RELEASE);
333 fd = (struct ll_file_data *)file->private_data;
336 rc = ll_md_close(sbi->ll_md_exp, inode, file);
340 static int ll_intent_file_open(struct file *file, void *lmm,
341 int lmmsize, struct lookup_intent *itp)
343 struct ll_sb_info *sbi = ll_i2sbi(file->f_dentry->d_inode);
344 const char *name = (char *)file->f_dentry->d_name.name;
345 struct dentry *parent = file->f_dentry->d_parent;
346 const int len = file->f_dentry->d_name.len;
347 struct lustre_handle lockh;
348 struct mdc_op_data *op_data;
354 OBD_ALLOC(op_data, sizeof(*op_data));
358 ll_prepare_mdc_data(op_data, parent->d_inode, NULL,
361 rc = md_enqueue(sbi->ll_md_exp, LDLM_IBITS, itp, LCK_PR, op_data,
362 &lockh, lmm, lmmsize, ldlm_completion_ast,
363 ll_mdc_blocking_ast, NULL);
364 OBD_FREE(op_data, sizeof(*op_data));
366 if (LUSTRE_IT(itp)->it_lock_mode)
367 memcpy(&LUSTRE_IT(itp)->it_lock_handle,
368 &lockh, sizeof(lockh));
371 CERROR("lock enqueue: err: %d\n", rc);
376 int ll_och_fill(struct inode *inode, struct lookup_intent *it,
377 struct obd_client_handle *och)
379 struct ptlrpc_request *req = LUSTRE_IT(it)->it_data;
380 struct ll_inode_info *lli = ll_i2info(inode);
381 struct mds_body *body;
386 body = lustre_msg_buf (req->rq_repmsg, 1, sizeof (*body));
387 LASSERT (body != NULL); /* reply already checked out */
388 LASSERT_REPSWABBED (req, 1); /* and swabbed down */
390 memcpy(&och->och_fh, &body->handle, sizeof(body->handle));
391 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
392 if (it->it_flags & FMODE_WRITE) {
393 if (lli->lli_io_epoch && lli->lli_io_epoch != body->io_epoch)
394 CDEBUG(D_ERROR, "new epoch?! "LPD64" != "LPD64"\n",
395 lli->lli_io_epoch, body->io_epoch);
396 lli->lli_io_epoch = body->io_epoch;
398 mdc_set_open_replay_data(ll_i2mdexp(inode), och,
399 LUSTRE_IT(it)->it_data);
404 int ll_local_open(struct file *file, struct lookup_intent *it,
405 struct obd_client_handle *och)
407 struct ll_file_data *fd;
408 struct inode *inode = file->f_dentry->d_inode;
413 rc = ll_och_fill(inode, it, och);
418 LASSERTF(file->private_data == NULL, "file %.*s/%.*s ino %lu/%u (%o)\n",
419 file->f_dentry->d_name.len, file->f_dentry->d_name.name,
420 file->f_dentry->d_parent->d_name.len,
421 file->f_dentry->d_parent->d_name.name,
422 file->f_dentry->d_inode->i_ino,
423 file->f_dentry->d_inode->i_generation,
424 file->f_dentry->d_inode->i_mode);
426 OBD_SLAB_ALLOC(fd, ll_file_data_slab, SLAB_KERNEL, sizeof *fd);
428 /* We can't handle this well without reorganizing ll_file_open and
429 * ll_md_close(), so don't even try right now. */
432 file->private_data = fd;
433 ll_readahead_init(inode, &fd->fd_ras);
434 fd->fd_omode = it->it_flags;
439 /* Open a file, and (for the very first open) create objects on the OSTs at
440 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
441 * creation or open until ll_lov_setstripe() ioctl is called. We grab
442 * lli_open_sem to ensure no other process will create objects, send the
443 * stripe MD to the MDS, or try to destroy the objects if that fails.
445 * If we already have the stripe MD locally then we don't request it in
446 * mdc_open(), by passing a lmm_size = 0.
448 * It is up to the application to ensure no other processes open this file
449 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
450 * used. We might be able to avoid races of that sort by getting lli_open_sem
451 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
452 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
454 int ll_file_open(struct inode *inode, struct file *file)
456 struct ll_inode_info *lli = ll_i2info(inode);
457 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
458 .it_flags = file->f_flags };
459 struct lov_stripe_md *lsm;
460 struct ptlrpc_request *req;
462 struct obd_client_handle **och_p = NULL;
463 __u64 *och_usecount = NULL;
466 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n",
467 inode->i_ino, inode->i_generation, inode, file->f_flags);
469 /* don't do anything for / */
470 if (inode->i_sb->s_root == file->f_dentry)
473 if ((file->f_flags+1) & O_ACCMODE)
475 if (file->f_flags & O_TRUNC)
480 /* sometimes LUSTRE_IT(it) may not be allocated like opening file by
481 * dentry_open() from GNS stuff. */
482 if (!it || !LUSTRE_IT(it)) {
484 rc = ll_intent_alloc(it);
489 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_OPEN);
491 /* mdc_intent_lock() didn't get a request ref if there was an open
492 * error, so don't do cleanup on the * request here (bug 3430) */
493 if (LUSTRE_IT(it)->it_disposition) {
494 rc = it_open_error(DISP_OPEN_OPEN, it);
499 /* Let's see if we have file open on MDS already. */
500 if (it->it_flags & FMODE_WRITE) {
501 och_p = &lli->lli_mds_write_och;
502 och_usecount = &lli->lli_open_fd_write_count;
503 } else if (it->it_flags & FMODE_EXEC) {
504 och_p = &lli->lli_mds_exec_och;
505 och_usecount = &lli->lli_open_fd_exec_count;
507 och_p = &lli->lli_mds_read_och;
508 och_usecount = &lli->lli_open_fd_read_count;
511 rc = ll_crypto_decrypt_key(inode, it);
515 down(&lli->lli_och_sem);
516 if (*och_p) { /* Open handle is present */
517 if (it_disposition(it, DISP_LOOKUP_POS) && /* Positive lookup */
518 it_disposition(it, DISP_OPEN_OPEN)) { /* & OPEN happened */
519 struct obd_client_handle *och;
520 /* Well, there's extra open request that we do not need,
521 let's close it somehow*/
522 OBD_ALLOC(och, sizeof (struct obd_client_handle));
524 up(&lli->lli_och_sem);
528 rc = ll_och_fill(inode, it, och);
530 up(&lli->lli_och_sem);
534 /* ll_md_och_close() will free och */
535 ll_md_och_close(ll_i2mdexp(inode), inode, och, 0, 0);
539 rc = ll_local_open(file, it, NULL);
543 rc = ll_set_capa(inode, it, *och_p);
545 up(&lli->lli_och_sem);
549 LASSERT(*och_usecount == 0);
550 OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
552 GOTO(out, rc = -ENOMEM);
555 if (!it || !LUSTRE_IT(it) || !LUSTRE_IT(it)->it_disposition) {
557 * we are going to replace intent here, and that may
558 * possibly change access mode (FMODE_EXEC can only be
559 * set in intent), but I hope it never happens (I was
560 * not able to trigger it yet at least) -- green
563 /* FIXME: FMODE_EXEC is not covered by O_ACCMODE! */
564 LASSERT(!(it->it_flags & FMODE_EXEC));
565 LASSERTF((it->it_flags & O_ACCMODE) ==
566 (oit.it_flags & O_ACCMODE), "Changing intent "
567 "flags %x to incompatible %x\n", it->it_flags,
570 rc = ll_intent_file_open(file, NULL, 0, it);
573 rc = it_open_error(DISP_OPEN_OPEN, it);
575 GOTO(out_och_free, rc);
577 mdc_set_lock_data(NULL, &LUSTRE_IT(it)->it_lock_handle,
578 file->f_dentry->d_inode);
580 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_OPEN);
581 rc = ll_local_open(file, it, *och_p);
582 LASSERTF(rc == 0, "rc = %d\n", rc);
584 rc = ll_set_capa(inode, it, *och_p);
586 up(&lli->lli_och_sem);
590 up(&lli->lli_och_sem);
593 * must do this outside lli_och_sem lock to prevent deadlock where
594 * different kind of OPEN lock for this same inode gets cancelled by
597 if (!S_ISREG(inode->i_mode))
602 if (file->f_flags & O_LOV_DELAY_CREATE ||
603 !(file->f_mode & FMODE_WRITE)) {
604 CDEBUG(D_INODE, "object creation was delayed\n");
608 file->f_flags &= ~O_LOV_DELAY_CREATE;
611 /* audit stuff if there was no RPC */
612 if (LUSTRE_IT(it)->it_data == 0)
613 ll_audit_log(inode, AUDIT_OPEN, rc);
615 req = LUSTRE_IT(it)->it_data;
616 ll_intent_drop_lock(it);
617 ll_intent_release(it);
620 ptlrpc_req_finished(req);
622 ll_open_complete(inode);
626 OBD_FREE(*och_p, sizeof (struct obd_client_handle));
627 *och_p = NULL; /* OBD_FREE writes some magic there */
630 up(&lli->lli_och_sem);
636 /* Fills the obdo with the attributes for the inode defined by lsm */
637 int ll_lsm_getattr(struct obd_export *exp, struct lov_stripe_md *lsm,
640 struct ptlrpc_request_set *set;
644 LASSERT(lsm != NULL);
646 memset(oa, 0, sizeof *oa);
647 oa->o_id = lsm->lsm_object_id;
648 oa->o_gr = lsm->lsm_object_gr;
649 oa->o_mode = S_IFREG;
650 oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
651 OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
652 OBD_MD_FLCTIME | OBD_MD_FLGROUP;
654 set = ptlrpc_prep_set();
658 rc = obd_getattr_async(exp, oa, lsm, set);
660 rc = ptlrpc_set_wait(set);
661 ptlrpc_set_destroy(set);
666 oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
667 OBD_MD_FLCTIME | OBD_MD_FLSIZE);
671 static inline void ll_remove_suid(struct inode *inode)
675 /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
676 mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
678 /* was any of the uid bits set? */
679 mode &= inode->i_mode;
680 if (mode && !capable(CAP_FSETID)) {
681 inode->i_mode &= ~mode;
682 // XXX careful here - we cannot change the size
686 static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
688 struct ll_inode_info *lli = ll_i2info(inode);
689 struct lov_stripe_md *lsm = lli->lli_smd;
690 struct obd_export *exp = ll_i2dtexp(inode);
693 struct ldlm_lock *lock;
694 struct lov_stripe_md *lsm;
695 } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
696 __u32 stripe, vallen = sizeof(stripe);
700 if (lsm->lsm_stripe_count == 1)
701 GOTO(check, stripe = 0);
703 /* get our offset in the lov */
704 rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe);
706 CERROR("obd_get_info: rc = %d\n", rc);
709 LASSERT(stripe < lsm->lsm_stripe_count);
712 if (lsm->lsm_oinfo[stripe].loi_id != lock->l_resource->lr_name.name[0]||
713 lsm->lsm_oinfo[stripe].loi_gr != lock->l_resource->lr_name.name[2]){
714 LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64
715 " inode=%lu/%u (%p)\n",
716 lsm->lsm_oinfo[stripe].loi_id,
717 lsm->lsm_oinfo[stripe].loi_gr,
718 inode->i_ino, inode->i_generation, inode);
719 return -ELDLM_NO_LOCK_DATA;
725 /* Flush the page cache for an extent as its canceled. When we're on an LOV,
726 * we get a lock cancellation for each stripe, so we have to map the obd's
727 * region back onto the stripes in the file that it held.
729 * No one can dirty the extent until we've finished our work and they can
730 * enqueue another lock. The DLM protects us from ll_file_read/write here,
731 * but other kernel actors could have pages locked.
733 * Called with the DLM lock held. */
734 void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
735 struct ldlm_lock *lock, __u32 stripe)
737 ldlm_policy_data_t tmpex;
738 unsigned long start, end, count, skip, i, j;
740 int rc, rc2, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
741 struct lustre_handle lockh;
744 memcpy(&tmpex, &lock->l_policy_data, sizeof(tmpex));
745 CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
746 inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
749 /* our locks are page granular thanks to osc_enqueue, we invalidate the
751 LASSERT((tmpex.l_extent.start & ~PAGE_CACHE_MASK) == 0);
752 LASSERT(((tmpex.l_extent.end + 1) & ~PAGE_CACHE_MASK) == 0);
756 start = tmpex.l_extent.start >> PAGE_CACHE_SHIFT;
757 end = tmpex.l_extent.end >> PAGE_CACHE_SHIFT;
758 if (lsm->lsm_stripe_count > 1) {
759 count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
760 skip = (lsm->lsm_stripe_count - 1) * count;
761 start += start/count * skip + stripe * count;
763 end += end/count * skip + stripe * count;
765 if (end < tmpex.l_extent.end >> PAGE_CACHE_SHIFT)
768 i = inode->i_size ? (inode->i_size - 1) >> PAGE_CACHE_SHIFT : 0;
772 CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
773 "count: %lu skip: %lu end: %lu%s\n", start, start % count,
774 count, skip, end, discard ? " (DISCARDING)" : "");
776 /* walk through the vmas on the inode and tear down mmaped pages that
777 * intersect with the lock. this stops immediately if there are no
778 * mmap()ed regions of the file. This is not efficient at all and
779 * should be short lived. We'll associate mmap()ed pages with the lock
780 * and will be able to find them directly */
782 for (i = start; i <= end; i += (j + skip)) {
783 j = min(count - (i % count), end - i + 1);
785 LASSERT(inode->i_mapping);
786 if (ll_teardown_mmaps(inode->i_mapping,
787 (__u64)i << PAGE_CACHE_SHIFT,
788 ((__u64)(i+j) << PAGE_CACHE_SHIFT) - 1) )
792 /* this is the simplistic implementation of page eviction at
793 * cancelation. It is careful to get races with other page
794 * lockers handled correctly. fixes from bug 20 will make it
795 * more efficient by associating locks with pages and with
796 * batching writeback under the lock explicitly. */
797 for (i = start, j = start % count; i <= end;
798 j++, i++, tmpex.l_extent.start += PAGE_CACHE_SIZE) {
800 CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
806 LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
807 LPU64" >= "LPU64" start %lu i %lu end %lu\n",
808 tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
811 if (!mapping_has_pages(inode->i_mapping)) {
812 CDEBUG(D_INODE|D_PAGE, "nothing left\n");
818 page = find_get_page(inode->i_mapping, i);
821 LL_CDEBUG_PAGE(D_PAGE, page, "lock page idx %lu ext "LPU64"\n",
822 i, tmpex.l_extent.start);
825 /* page->mapping to check with racing against teardown */
826 if (!discard && clear_page_dirty_for_io(page)) {
827 rc = ll_call_writepage(inode, page);
829 CERROR("writepage of page %p failed: %d\n",
831 /* either waiting for io to complete or reacquiring
832 * the lock that the failed writepage released */
836 tmpex.l_extent.end = tmpex.l_extent.start + PAGE_CACHE_SIZE - 1;
837 /* check to see if another DLM lock covers this page */
838 rc2 = ldlm_lock_match(lock->l_resource->lr_namespace,
839 LDLM_FL_BLOCK_GRANTED|LDLM_FL_CBPENDING |
841 &lock->l_resource->lr_name, LDLM_EXTENT,
842 &tmpex, LCK_PR | LCK_PW, &lockh);
843 if (rc2 == 0 && page->mapping != NULL) {
844 // checking again to account for writeback's lock_page()
845 LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
846 ll_ra_accounting(page, inode->i_mapping);
847 ll_truncate_complete_page(page);
850 page_cache_release(page);
852 LASSERTF(tmpex.l_extent.start <=
853 (lock->l_policy_data.l_extent.end == ~0ULL ? ~0ULL :
854 lock->l_policy_data.l_extent.end + 1),
855 "loop too long "LPU64" > "LPU64" start %lu i %lu end %lu\n",
856 tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
861 static int ll_extent_lock_callback(struct ldlm_lock *lock,
862 struct ldlm_lock_desc *new, void *data,
865 struct lustre_handle lockh = { 0 };
869 if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
870 LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
875 case LDLM_CB_BLOCKING:
876 ldlm_lock2handle(lock, &lockh);
877 rc = ldlm_cli_cancel(&lockh);
879 CERROR("ldlm_cli_cancel failed: %d\n", rc);
881 case LDLM_CB_CANCELING: {
883 struct ll_inode_info *lli;
884 struct lov_stripe_md *lsm;
888 /* This lock wasn't granted, don't try to evict pages */
889 if (lock->l_req_mode != lock->l_granted_mode)
892 inode = ll_inode_from_lock(lock);
895 lli = ll_i2info(inode);
898 if (lli->lli_smd == NULL)
902 stripe = ll_lock_to_stripe_offset(inode, lock);
904 CERROR("ll_lock_to_stripe_offset failed: %d\n", stripe);
908 ll_pgcache_remove_extent(inode, lsm, lock, stripe);
910 down(&lli->lli_size_sem);
911 lock_res_and_lock(lock);
912 kms = ldlm_extent_shift_kms(lock,
913 lsm->lsm_oinfo[stripe].loi_kms);
915 if (lsm->lsm_oinfo[stripe].loi_kms != kms)
916 LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
917 lsm->lsm_oinfo[stripe].loi_kms, kms);
918 lsm->lsm_oinfo[stripe].loi_kms = kms;
919 unlock_res_and_lock(lock);
920 up(&lli->lli_size_sem);
921 //ll_try_done_writing(inode);
934 int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
936 /* XXX ALLOCATE - 160 bytes */
937 struct inode *inode = ll_inode_from_lock(lock);
938 struct ll_inode_info *lli = ll_i2info(inode);
939 struct lustre_handle lockh = { 0 };
944 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
945 LDLM_FL_BLOCK_CONV)) {
946 LBUG(); /* not expecting any blocked async locks yet */
947 LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
949 ldlm_lock_dump(D_OTHER, lock, 0);
950 ldlm_reprocess_all(lock->l_resource);
954 LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
956 stripe = ll_lock_to_stripe_offset(inode, lock);
958 CERROR("ll_lock_to_stripe_offset failed: %d\n", stripe);
962 if (lock->l_lvb_len) {
963 struct lov_stripe_md *lsm = lli->lli_smd;
965 lvb = lock->l_lvb_data;
966 lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
969 lock_res_and_lock(lock);
970 kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
971 kms = ldlm_extent_shift_kms(NULL, kms);
972 if (lsm->lsm_oinfo[stripe].loi_kms != kms)
973 LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
974 lsm->lsm_oinfo[stripe].loi_kms, kms);
975 lsm->lsm_oinfo[stripe].loi_kms = kms;
976 unlock_res_and_lock(lock);
982 wake_up(&lock->l_waitq);
984 ldlm_lock2handle(lock, &lockh);
985 ldlm_lock_decref(&lockh, LCK_PR);
990 static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
992 struct ptlrpc_request *req = reqp;
993 struct inode *inode = ll_inode_from_lock(lock);
994 struct ll_inode_info *lli;
996 struct lov_stripe_md *lsm;
997 int rc, size = sizeof(*lvb), stripe;
1001 GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
1002 lli = ll_i2info(inode);
1004 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
1008 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
1010 /* First, find out which stripe index this lock corresponds to. */
1011 stripe = ll_lock_to_stripe_offset(inode, lock);
1013 CERROR("ll_lock_to_stripe_offset failed: %d\n", stripe);
1014 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
1017 rc = lustre_pack_reply(req, 1, &size, NULL);
1019 CERROR("lustre_pack_reply: %d\n", rc);
1023 lvb = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*lvb));
1024 lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe].loi_kms;
1025 lvb->lvb_mtime = LTIME_S(inode->i_mtime);
1026 lvb->lvb_atime = LTIME_S(inode->i_atime);
1027 lvb->lvb_ctime = LTIME_S(inode->i_ctime);
1029 LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64,
1030 inode->i_size, stripe, lvb->lvb_size);
1036 /* These errors are normal races, so we don't want to fill the console
1037 * with messages by calling ptlrpc_error() */
1038 if (rc == -ELDLM_NO_LOCK_DATA)
1039 lustre_pack_reply(req, 0, NULL, NULL);
1041 req->rq_status = rc;
1045 /* NB: lov_merge_size will prefer locally cached writes if they extend the
1046 * file (because it prefers KMS over RSS when larger) */
1047 int ll_glimpse_size(struct inode *inode)
1049 struct ll_inode_info *lli = ll_i2info(inode);
1050 struct ll_sb_info *sbi = ll_i2sbi(inode);
1051 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
1052 struct lustre_handle lockh = { 0 };
1053 int rc, flags = LDLM_FL_HAS_INTENT;
1056 CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
1058 rc = obd_enqueue(sbi->ll_dt_exp, lli->lli_smd, LDLM_EXTENT, &policy,
1059 LCK_PR, &flags, ll_extent_lock_callback,
1060 ldlm_completion_ast, ll_glimpse_callback, inode,
1061 sizeof(struct ost_lvb), lustre_swab_ost_lvb, &lockh);
1066 CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
1067 RETURN(rc > 0 ? -EIO : rc);
1070 down(&lli->lli_size_sem);
1071 inode->i_size = lov_merge_size(lli->lli_smd, 0);
1072 inode->i_blocks = lov_merge_blocks(lli->lli_smd);
1073 LTIME_S(inode->i_mtime) = lov_merge_mtime(lli->lli_smd,
1074 LTIME_S(inode->i_mtime));
1075 up(&lli->lli_size_sem);
1077 CDEBUG(D_DLMTRACE, "glimpse: size: "LPU64", blocks: "LPU64"\n",
1078 (__u64)inode->i_size, (__u64)inode->i_blocks);
1080 obd_cancel(sbi->ll_dt_exp, lli->lli_smd, LCK_PR, &lockh);
1084 void ll_stime_record(struct ll_sb_info *sbi, struct timeval *start,
1085 struct obd_service_time *stime)
1087 struct timeval stop;
1088 do_gettimeofday(&stop);
1090 spin_lock(&sbi->ll_lock);
1091 lprocfs_stime_record(stime, &stop, start);
1092 spin_unlock(&sbi->ll_lock);
1095 int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
1096 struct lov_stripe_md *lsm, int mode,
1097 ldlm_policy_data_t *policy, struct lustre_handle *lockh,
1098 int ast_flags, struct obd_service_time *stime)
1100 struct ll_inode_info *lli = ll_i2info(inode);
1101 struct ll_sb_info *sbi = ll_i2sbi(inode);
1102 struct timeval start;
1106 LASSERT(lockh->cookie == 0);
1108 /* XXX phil: can we do this? won't it screw the file size up? */
1109 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
1110 (sbi->ll_flags & LL_SBI_NOLCK))
1113 CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
1114 inode->i_ino, policy->l_extent.start, policy->l_extent.end);
1116 do_gettimeofday(&start);
1117 rc = obd_enqueue(sbi->ll_dt_exp, lsm, LDLM_EXTENT, policy, mode,
1118 &ast_flags, ll_extent_lock_callback,
1119 ldlm_completion_ast, ll_glimpse_callback, inode,
1120 sizeof(struct ost_lvb), lustre_swab_ost_lvb, lockh);
1124 ll_stime_record(sbi, &start, stime);
1126 if (policy->l_extent.start == 0 &&
1127 policy->l_extent.end == OBD_OBJECT_EOF) {
1128 /* vmtruncate()->ll_truncate() first sets the i_size and then
1129 * the kms under both a DLM lock and the i_sem. If we don't
1130 * get the i_sem here we can match the DLM lock and reset
1131 * i_size from the kms before the truncating path has updated
1132 * the kms. generic_file_write can then trust the stale i_size
1133 * when doing appending writes and effectively cancel the
1134 * result of the truncate. Getting the i_sem after the enqueue
1135 * maintains the DLM -> i_sem acquiry order. */
1136 down(&lli->lli_size_sem);
1137 inode->i_size = lov_merge_size(lsm, 1);
1138 up(&lli->lli_size_sem);
1142 LTIME_S(inode->i_mtime) =
1143 lov_merge_mtime(lsm, LTIME_S(inode->i_mtime));
1149 int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
1150 struct lov_stripe_md *lsm, int mode,
1151 struct lustre_handle *lockh)
1153 struct ll_sb_info *sbi = ll_i2sbi(inode);
1157 /* XXX phil: can we do this? won't it screw the file size up? */
1158 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
1159 (sbi->ll_flags & LL_SBI_NOLCK))
1162 rc = obd_cancel(sbi->ll_dt_exp, lsm, mode, lockh);
1167 static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
1170 struct inode *inode = file->f_dentry->d_inode;
1171 struct ll_inode_info *lli = ll_i2info(inode);
1172 struct lov_stripe_md *lsm = lli->lli_smd;
1173 struct ll_lock_tree tree;
1174 struct ll_lock_tree_node *node;
1179 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
1180 inode->i_ino, inode->i_generation, inode, count, *ppos);
1182 /* "If nbyte is 0, read() will return 0 and have no other results."
1183 * -- Single Unix Spec */
1187 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_READ_BYTES,
1193 node = ll_node_from_inode(inode, *ppos, *ppos + count - 1,
1196 tree.lt_fd = file->private_data;
1198 rc = ll_tree_lock(&tree, node, inode, buf, count,
1199 file->f_flags & O_NONBLOCK ? LDLM_FL_BLOCK_NOWAIT :0);
1203 down(&lli->lli_size_sem);
1204 kms = lov_merge_size(lsm, 1);
1205 if (*ppos + count - 1 > kms) {
1206 /* A glimpse is necessary to determine whether we return a short
1207 * read or some zeroes at the end of the buffer */
1208 up(&lli->lli_size_sem);
1209 retval = ll_glimpse_size(inode);
1213 inode->i_size = kms;
1214 up(&lli->lli_size_sem);
1217 CDEBUG(D_INFO, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
1218 inode->i_ino, count, *ppos, inode->i_size);
1220 /* turn off the kernel's read-ahead */
1221 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1224 file->f_ra.ra_pages = 0;
1226 retval = generic_file_read(file, buf, count, ppos);
1227 ll_audit_log(inode, AUDIT_READ, retval);
1230 ll_tree_unlock(&tree, inode);
1235 * Write to a file (through the page cache).
1237 static ssize_t ll_file_write(struct file *file, const char *buf,
1238 size_t count, loff_t *ppos)
1240 struct inode *inode = file->f_dentry->d_inode;
1241 loff_t maxbytes = ll_file_maxbytes(inode);
1242 struct ll_lock_tree tree;
1243 struct ll_lock_tree_node *node;
1248 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
1249 inode->i_ino, inode->i_generation, inode, count, *ppos);
1251 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
1253 /* POSIX, but surprised the VFS doesn't check this already */
1257 /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
1258 * called on the file, don't fail the below assertion (bug 2388). */
1259 if (file->f_flags & O_LOV_DELAY_CREATE &&
1260 ll_i2info(inode)->lli_smd == NULL)
1263 LASSERT(ll_i2info(inode)->lli_smd != NULL);
1265 if (file->f_flags & O_APPEND)
1266 node = ll_node_from_inode(inode, 0, OBD_OBJECT_EOF, LCK_PW);
1268 node = ll_node_from_inode(inode, *ppos, *ppos + count - 1,
1272 RETURN(PTR_ERR(node));
1274 tree.lt_fd = file->private_data;
1276 rc = ll_tree_lock(&tree, node, inode, buf, count,
1277 file->f_flags & O_NONBLOCK ? LDLM_FL_BLOCK_NOWAIT :0);
1281 /* this is ok, g_f_w will overwrite this under i_sem if it races
1282 * with a local truncate, it just makes our maxbyte checking easier */
1283 if (file->f_flags & O_APPEND)
1284 *ppos = inode->i_size;
1286 if (*ppos >= maxbytes) {
1287 if (count || *ppos > maxbytes) {
1288 send_sig(SIGXFSZ, current, 0);
1289 GOTO(out, retval = -EFBIG);
1292 if (*ppos + count > maxbytes)
1293 count = maxbytes - *ppos;
1295 CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
1296 inode->i_ino, count, *ppos);
1298 /* mark open handle dirty */
1299 set_bit(LLI_F_DIRTY_HANDLE, &(ll_i2info(inode)->lli_flags));
1301 /* generic_file_write handles O_APPEND after getting i_sem */
1302 retval = generic_file_write(file, buf, count, ppos);
1306 /* ll_audit_log(inode, AUDIT_WRITE, retval); */
1308 ll_tree_unlock(&tree, inode);
1309 /* serialize with mmap/munmap/mremap */
1310 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
1311 retval > 0 ? retval : 0);
1315 static int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
1316 int flags, struct lov_user_md *lum,
1319 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
1320 struct ll_inode_info *lli = ll_i2info(inode);
1321 struct ptlrpc_request *req = NULL;
1322 struct obd_client_handle *och;
1323 struct lov_stripe_md *lsm;
1324 struct lustre_md md;
1330 if ((file->f_flags+1) & O_ACCMODE)
1332 if (file->f_flags & O_TRUNC)
1335 down(&lli->lli_open_sem);
1338 up(&lli->lli_open_sem);
1339 CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
1344 f = get_empty_filp();
1348 f->f_dentry = file->f_dentry;
1349 f->f_vfsmnt = file->f_vfsmnt;
1352 rc = ll_intent_alloc(&oit);
1356 rc = ll_intent_file_open(f, lum, lum_size, &oit);
1359 if (it_disposition(&oit, DISP_LOOKUP_NEG))
1362 req = LUSTRE_IT(&oit)->it_data;
1363 rc = LUSTRE_IT(&oit)->it_status;
1368 rc = mdc_req2lustre_md(ll_i2mdexp(inode), req, 1,
1369 ll_i2dtexp(inode), &md);
1372 ll_update_inode(f->f_dentry->d_inode, &md);
1374 OBD_ALLOC(och, sizeof(struct obd_client_handle));
1376 GOTO(out, rc = -ENOMEM);
1378 /* actually ll_local_open() cannot fail! */
1379 rc = ll_local_open(f, &oit, och);
1383 if (LUSTRE_IT(&oit)->it_lock_mode) {
1384 ldlm_lock_decref_and_cancel((struct lustre_handle *)
1385 &LUSTRE_IT(&oit)->it_lock_handle,
1386 LUSTRE_IT(&oit)->it_lock_mode);
1387 LUSTRE_IT(&oit)->it_lock_mode = 0;
1390 ll_intent_release(&oit);
1392 /* ll_file_release() will decrease the count, but won't free anything
1393 * because we have at least one more reference coming from actual
1395 down(&lli->lli_och_sem);
1396 lli->lli_open_fd_write_count++;
1397 up(&lli->lli_och_sem);
1398 rc = ll_file_release(f->f_dentry->d_inode, f);
1400 /* Now also destroy our supplemental och */
1401 ll_md_och_close(ll_i2mdexp(inode), f->f_dentry->d_inode, och, 0, 0);
1404 ll_intent_release(&oit);
1407 up(&lli->lli_open_sem);
1409 ptlrpc_req_finished(req);
1413 static int ll_lov_setea(struct inode *inode, struct file *file,
1416 int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
1417 int lum_size = sizeof(struct lov_user_md) +
1418 sizeof(struct lov_user_ost_data);
1419 struct lov_user_md *lump;
1423 if (!capable(CAP_SYS_ADMIN))
1426 OBD_ALLOC(lump, lum_size);
1430 rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
1432 GOTO(out_free_lump, rc = -EFAULT);
1434 rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
1437 OBD_FREE(lump, lum_size);
1441 static int ll_lov_setstripe(struct inode *inode, struct file *file,
1444 struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
1445 int rc, flags = FMODE_WRITE;
1448 /* Bug 1152: copy properly when this is no longer true */
1449 LASSERT(sizeof(lum) == sizeof(*lump));
1450 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
1451 rc = copy_from_user(&lum, lump, sizeof(lum));
1455 rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
1457 put_user(0, &lump->lmm_stripe_count);
1458 rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
1459 0, ll_i2info(inode)->lli_smd, lump);
1464 static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
1466 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1471 return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, lsm,
1475 static int ll_get_grouplock(struct inode *inode, struct file *file,
1478 struct ll_file_data *fd = file->private_data;
1479 ldlm_policy_data_t policy = { .l_extent = { .start = 0,
1480 .end = OBD_OBJECT_EOF}};
1481 struct lustre_handle lockh = { 0 };
1482 struct ll_inode_info *lli = ll_i2info(inode);
1483 struct lov_stripe_md *lsm = lli->lli_smd;
1487 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
1491 policy.l_extent.gid = arg;
1492 if (file->f_flags & O_NONBLOCK)
1493 flags = LDLM_FL_BLOCK_NOWAIT;
1495 rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags,
1496 &ll_i2sbi(inode)->ll_grouplock_stime);
1500 fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
1502 memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
1507 static int ll_put_grouplock(struct inode *inode, struct file *file,
1510 struct ll_file_data *fd = file->private_data;
1511 struct ll_inode_info *lli = ll_i2info(inode);
1512 struct lov_stripe_md *lsm = lli->lli_smd;
1516 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1517 /* Ugh, it's already unlocked. */
1521 if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
1524 fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
1526 rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
1531 memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
1536 int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
1539 struct ll_file_data *fd = file->private_data;
1540 struct ll_sb_info *sbi = ll_i2sbi(inode);
1544 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
1545 inode->i_generation, inode, cmd);
1547 if (_IOC_TYPE(cmd) == 'T') /* tty ioctls */
1550 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_IOCTL);
1552 case LL_IOC_GETFLAGS:
1553 /* Get the current value of the file flags */
1554 return put_user(fd->fd_flags, (int *)arg);
1555 case LL_IOC_SETFLAGS:
1556 case LL_IOC_CLRFLAGS:
1557 /* Set or clear specific file flags */
1558 /* XXX This probably needs checks to ensure the flags are
1559 * not abused, and to handle any flag side effects.
1561 if (get_user(flags, (int *) arg))
1564 if (cmd == LL_IOC_SETFLAGS)
1565 fd->fd_flags |= flags;
1567 fd->fd_flags &= ~flags;
1569 case LL_IOC_LOV_SETSTRIPE:
1570 RETURN(ll_lov_setstripe(inode, file, arg));
1571 case LL_IOC_LOV_SETEA:
1572 RETURN(ll_lov_setea(inode, file, arg));
1573 case IOC_MDC_SHOWFID: {
1574 struct lustre_id *idp = (struct lustre_id *)arg;
1575 struct lustre_id id;
1579 filename = getname((const char *)arg);
1580 if (IS_ERR(filename))
1581 RETURN(PTR_ERR(filename));
1583 ll_inode2id(&id, inode);
1585 rc = ll_get_fid(sbi->ll_md_exp, &id, filename, &id);
1587 GOTO(out_filename, rc);
1589 rc = copy_to_user(idp, &id, sizeof(*idp));
1591 GOTO(out_filename, rc = -EFAULT);
1598 case LL_IOC_KEY_TYPE: {
1599 struct obd_ioctl_data *data;
1602 int typelen, rc, len = 0;
1604 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1609 type = data->ioc_inlbuf1;
1610 typelen = data->ioc_inllen1;
1613 CDEBUG(D_INFO, "LL_IOC_KEY_TYPE missing filename\n");
1614 GOTO(out, rc = -EINVAL);
1616 ll_set_sb_gksinfo(inode->i_sb, type);
1619 obd_ioctl_freedata(buf, len);
1623 case LL_IOC_LOV_GETSTRIPE:
1624 RETURN(ll_lov_getstripe(inode, arg));
1625 case EXT3_IOC_GETFLAGS:
1626 case EXT3_IOC_SETFLAGS:
1627 RETURN( ll_iocontrol(inode, file, cmd, arg) );
1628 case LL_IOC_GROUP_LOCK:
1629 RETURN(ll_get_grouplock(inode, file, arg));
1630 case LL_IOC_GROUP_UNLOCK:
1631 RETURN(ll_put_grouplock(inode, file, arg));
1632 case EXT3_IOC_GETVERSION_OLD:
1633 case EXT3_IOC_GETVERSION:
1634 return put_user(inode->i_generation, (int *) arg);
1635 /* We need to special case any other ioctls we want to handle,
1636 * to send them to the MDS/OST as appropriate and to properly
1637 * network encode the arg field.
1638 case EXT2_IOC_GETVERSION_OLD:
1639 case EXT2_IOC_GETVERSION_NEW:
1640 case EXT2_IOC_SETVERSION_OLD:
1641 case EXT2_IOC_SETVERSION_NEW:
1642 case EXT3_IOC_SETVERSION_OLD:
1643 case EXT3_IOC_SETVERSION:
1645 case LL_IOC_FLUSH_CRED:
1646 RETURN(ll_flush_cred(inode));
1648 RETURN(ll_set_audit(inode, arg));
1650 RETURN( obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
1655 loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
1657 struct inode *inode = file->f_dentry->d_inode;
1658 struct ll_file_data *fd = file->private_data;
1659 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1660 struct lustre_handle lockh = {0};
1663 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),to=%llu\n", inode->i_ino,
1664 inode->i_generation, inode,
1665 offset + ((origin==2) ? inode->i_size : file->f_pos));
1667 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_LLSEEK);
1668 if (origin == 2) { /* SEEK_END */
1669 ldlm_policy_data_t policy = { .l_extent = {0, OBD_OBJECT_EOF }};
1670 struct ll_inode_info *lli = ll_i2info(inode);
1671 int nonblock = 0, rc;
1673 if (file->f_flags & O_NONBLOCK)
1674 nonblock = LDLM_FL_BLOCK_NOWAIT;
1676 rc = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
1677 nonblock, &ll_i2sbi(inode)->ll_seek_stime);
1681 down(&lli->lli_size_sem);
1682 offset += inode->i_size;
1683 up(&lli->lli_size_sem);
1684 } else if (origin == 1) { /* SEEK_CUR */
1685 offset += file->f_pos;
1689 if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
1690 if (offset != file->f_pos) {
1691 file->f_pos = offset;
1692 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1694 file->f_version = ++event;
1701 ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
1705 int ll_fsync(struct file *file, struct dentry *dentry, int data)
1707 struct inode *inode = dentry->d_inode;
1708 struct ll_inode_info *lli = ll_i2info(inode);
1709 struct address_space *mapping = inode->i_mapping;
1710 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1711 struct lustre_id id;
1712 struct ptlrpc_request *req;
1715 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1716 inode->i_generation, inode);
1718 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_FSYNC);
1720 /* fsync's caller has already called _fdata{sync,write}, we want
1721 * that IO to finish before calling the osc and mdc sync methods */
1722 rc = filemap_fdatawait(inode->i_mapping);
1724 /* 2.6 implements filemap_fdatawait() using PG_writeback which we
1725 * don't support now. so, wait until all submited llaps are gone */
1726 wait_event(lli->lli_dirty_wait, !ll_is_inode_dirty(mapping->host));
1728 ll_inode2id(&id, inode);
1729 err = md_sync(ll_i2sbi(inode)->ll_md_exp, &id, &req);
1733 ptlrpc_req_finished(req);
1736 struct obdo *oa = obdo_alloc();
1739 RETURN(rc ? rc : -ENOMEM);
1741 oa->o_id = lsm->lsm_object_id;
1742 oa->o_gr = lsm->lsm_object_gr;
1743 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1745 obdo_from_inode(oa, inode, (OBD_MD_FLTYPE | OBD_MD_FLATIME |
1746 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1749 err = obd_sync(ll_i2sbi(inode)->ll_dt_exp, oa, lsm,
1759 int ll_file_flock(struct file *file, int cmd, struct file_lock *fl)
1761 struct inode *inode = file->f_dentry->d_inode;
1762 struct ll_inode_info *li = ll_i2info(inode);
1763 struct ll_sb_info *sbi = ll_i2sbi(inode);
1764 struct obd_device *obd = md_get_real_obd(sbi->ll_md_exp, &li->lli_id);
1765 struct ldlm_res_id res_id = { .name = {id_fid(&li->lli_id),
1766 id_group(&li->lli_id), LDLM_FLOCK} };
1767 struct lustre_handle lockh = {0};
1768 struct ptlrpc_connection *conn;
1769 ptl_process_id_t ptlpid;
1770 ldlm_policy_data_t flock;
1771 ldlm_mode_t mode = 0;
1776 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu cmd=%d file_lock=%p\n",
1777 inode->i_ino, cmd, fl);
1779 if (!(fl->fl_flags & FL_POSIX))
1782 switch (fl->fl_type) {
1788 /* An unlock request may or may not have any relation to
1789 * existing locks so we may not be able to pass a lock handle
1790 * via a normal ldlm_lock_cancel() request. The request may even
1791 * unlock a byte range in the middle of an existing lock. In
1792 * order to process an unlock request we need all of the same
1793 * information that is given with a normal read or write record
1794 * lock request. To avoid creating another ldlm unlock (cancel)
1795 * message we'll treat a LCK_NL flock request as an unlock. */
1802 CERROR("unknown fcntl lock type: %d\n", fl->fl_type);
1811 flags |= LDLM_FL_BLOCK_NOWAIT;
1822 flags = LDLM_FL_TEST_LOCK;
1823 /* Save the old mode so that if the mode in the lock changes we
1824 * can decrement the appropriate reader or writer refcount. */
1828 CERROR("unknown fcntl lock command: %d\n", cmd);
1832 /* Since we're called on every close to remove any oustanding Posix
1833 * flocks owned by the process it's worth a little effort to avoid
1834 * the RPCs if there are no flocks on this file from this node. */
1835 if (mode == LCK_NL && fl->fl_start == 0 && fl->fl_end >= OFFSET_MAX) {
1836 struct ldlm_resource *res;
1838 res = ldlm_resource_get(obd->obd_namespace, NULL,
1839 res_id, LDLM_FLOCK, 0);
1843 ldlm_resource_putref(res);
1846 conn = class_exp2cliimp(obd->obd_self_export)->imp_connection;
1847 if (!conn || !conn->c_peer.peer_ni)
1850 rc = PtlGetId(conn->c_peer.peer_ni->pni_ni_h, &ptlpid);
1854 flock.l_flock.start = fl->fl_start;
1855 flock.l_flock.end = fl->fl_end;
1856 /* XXX - ptlpid.pid is currently coming back a constant; i.e. 12345. */
1857 flock.l_flock.pid = fl->fl_pid;
1858 flock.l_flock.nid = ptlpid.nid;
1859 flock.l_flock.blocking_pid = 0;
1860 flock.l_flock.blocking_nid = 0;
1862 CDEBUG(D_DLMTRACE, "inode=%lu, pid="LPU64", flags=%#x, mode=%u, "
1863 "start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
1864 flags, mode, flock.l_flock.start, flock.l_flock.end);
1866 rc = ldlm_cli_enqueue(obd->obd_self_export, NULL, obd->obd_namespace,
1867 res_id, LDLM_FLOCK, &flock, mode, &flags,
1868 NULL, ldlm_flock_completion_ast, NULL,
1869 md_get_real_obd(sbi->ll_md_exp, &sbi->ll_rootid),
1870 NULL, 0, NULL, &lockh);
1872 if (flags & LDLM_FL_TEST_LOCK) {
1873 struct ldlm_lock *lock = ldlm_handle2lock(&lockh);
1875 fl->fl_start = lock->l_policy_data.l_flock.start;
1876 fl->fl_end = lock->l_policy_data.l_flock.end;
1877 fl->fl_pid = lock->l_policy_data.l_flock.pid;
1879 switch (lock->l_granted_mode) {
1881 fl->fl_type = F_RDLCK;
1884 fl->fl_type = F_WRLCK;
1887 fl->fl_type = F_UNLCK;
1890 CERROR("unexpected lock type: %d returned from server."
1891 "\n", lock->l_granted_mode);
1896 /* offset the addref() done by ldlm_handle2lock() above. */
1897 LDLM_LOCK_PUT(lock);
1898 ldlm_lock_decref(&lockh, mode);
1899 ldlm_cli_cancel(&lockh);
1901 /* the LDLM_CBPENDING flag was set in the lock by the
1902 * completion AST so the ldlm_lock_decref() call above
1903 * scheduled a blocking AST which will do the final
1904 * lock put on the lock. */
1910 int ll_inode_revalidate_it(struct dentry *dentry)
1912 struct lookup_intent oit = { .it_op = IT_GETATTR };
1913 struct inode *inode = dentry->d_inode;
1914 struct ptlrpc_request *req = NULL;
1915 struct ll_inode_info *lli;
1916 struct lov_stripe_md *lsm;
1917 struct ll_sb_info *sbi;
1918 struct lustre_id id;
1923 CERROR("REPORT THIS LINE TO PETER\n");
1927 sbi = ll_i2sbi(inode);
1929 ll_inode2id(&id, inode);
1930 lli = ll_i2info(inode);
1931 LASSERT(id_fid(&id) != 0);
1933 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), name=%s(%p)\n",
1934 inode->i_ino, inode->i_generation, inode, dentry->d_name.name,
1937 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
1938 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_REVALIDATE);
1941 rc = ll_intent_alloc(&oit);
1945 rc = ll_crypto_init_it_key(inode, &oit);
1949 rc = md_intent_lock(sbi->ll_md_exp, &id, NULL, 0, NULL, 0, &id,
1950 &oit, 0, &req, ll_mdc_blocking_ast);
1954 rc = revalidate_it_finish(req, 1, &oit, dentry);
1959 ll_lookup_finish_locks(&oit, dentry);
1962 if (!req && (oit.it_op & IT_GETATTR))
1963 ll_audit_log(inode, AUDIT_STAT, 0);
1966 if (!LLI_HAVE_FLSIZE(inode)) {
1967 /* if object not yet allocated, don't validate size */
1970 /* ll_glimpse_size() will prefer locally cached
1971 * writes if they extend the file */
1972 rc = ll_glimpse_size(inode);
1978 ll_intent_release(&oit);
1980 ptlrpc_req_finished(req);
1985 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1986 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
1989 struct inode *inode = de->d_inode;
1990 struct ll_inode_info *lli = ll_i2info(inode);
1992 res = ll_inode_revalidate_it(de);
1993 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_GETATTR);
1998 stat->ino = inode->i_ino;
1999 stat->mode = inode->i_mode;
2000 stat->nlink = inode->i_nlink;
2001 stat->uid = inode->i_uid;
2002 stat->gid = inode->i_gid;
2003 stat->atime = inode->i_atime;
2004 stat->mtime = inode->i_mtime;
2005 stat->ctime = inode->i_ctime;
2006 stat->blksize = inode->i_blksize;
2008 down(&lli->lli_size_sem);
2009 stat->size = inode->i_size;
2010 stat->blocks = inode->i_blocks;
2011 up(&lli->lli_size_sem);
2013 stat->rdev = kdev_t_to_nr(inode->i_rdev);
2014 stat->dev = id_group(&ll_i2info(inode)->lli_id);
2020 int ll_setxattr_internal(struct inode *inode, const char *name,
2021 const void *value, size_t size, int flags,
2024 struct ll_sb_info *sbi = ll_i2sbi(inode);
2025 struct ptlrpc_request *request = NULL;
2026 struct mdc_op_data *op_data;
2029 int rc = 0, key_size = 0;
2032 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu\n", inode->i_ino);
2033 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_SETXATTR);
2035 if (sbi->ll_remote && !strcmp(name, XATTR_NAME_ACL_ACCESS))
2036 RETURN(-EOPNOTSUPP);
2038 memset(&attr, 0x0, sizeof(attr));
2039 attr.ia_valid |= valid;
2040 attr.ia_attr_flags = flags;
2042 if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) {
2043 rc = ll_crypto_get_mac(inode, &attr, (void *)value, size,
2046 CERROR("can not get right mac, rc=%d\n", rc);
2051 OBD_ALLOC(op_data, sizeof(*op_data));
2055 ll_inode2mdc_data(op_data, inode, (OBD_MD_FLID | OBD_MD_MEA));
2057 rc = md_setattr(sbi->ll_md_exp, op_data, &attr,
2058 (void *)name, strnlen(name, XATTR_NAME_MAX) + 1,
2059 (void *)value, size, key, key_size, &request);
2060 OBD_FREE(op_data, sizeof(*op_data));
2063 CDEBUG(D_SEC, "md_setattr fails: rc = %d\n", rc);
2067 if (key && key_size)
2068 OBD_FREE(key, key_size);
2069 ptlrpc_req_finished(request);
2073 int ll_setxattr(struct dentry *dentry, const char *name, const void *value,
2074 size_t size, int flags)
2077 struct posix_acl *acl;
2078 struct ll_inode_info *lli;
2081 rc = ll_setxattr_internal(dentry->d_inode, name, value, size,
2084 /* update inode's acl info */
2085 if (rc == 0 && strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) {
2087 acl = posix_acl_from_xattr(value, size);
2089 CERROR("convert from xattr to acl error: %ld",
2093 error = posix_acl_valid(acl);
2095 CERROR("acl valid error: %d", error);
2096 posix_acl_release(acl);
2104 lli = ll_i2info(dentry->d_inode);
2105 spin_lock(&lli->lli_lock);
2106 if (lli->lli_posix_acl != NULL)
2107 posix_acl_release(lli->lli_posix_acl);
2108 lli->lli_posix_acl = acl;
2109 spin_unlock(&lli->lli_lock);
2116 int ll_removexattr(struct dentry *dentry, const char *name)
2118 return ll_setxattr_internal(dentry->d_inode, name, NULL, 0, 0,
2122 int ll_getxattr_internal(struct inode *inode, const char *name,
2123 void *value, size_t size, __u64 valid)
2125 struct ptlrpc_request *request = NULL;
2126 struct ll_sb_info *sbi = ll_i2sbi(inode);
2127 struct lustre_id id;
2128 struct mds_body *body;
2133 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_GETXATTR);
2135 if (sbi->ll_remote && !strcmp(name, XATTR_NAME_ACL_ACCESS))
2136 RETURN(-EOPNOTSUPP);
2138 ll_inode2id(&id, inode);
2139 rc = md_getattr(sbi->ll_md_exp, &id, valid, name, NULL, 0,
2140 size, NULL, &request);
2142 if (rc != -ENODATA && rc != -EOPNOTSUPP)
2143 CERROR("rc = %d\n", rc);
2147 body = lustre_msg_buf(request->rq_repmsg, 0, sizeof(*body));
2148 LASSERT(body != NULL);
2149 LASSERT_REPSWABBED(request, 0);
2151 ea_size = body->eadatasize;
2153 GOTO(out, rc = ea_size);
2155 LASSERT(ea_size <= request->rq_repmsg->buflens[1]);
2156 ea_data = lustre_msg_buf(request->rq_repmsg, 1, ea_size);
2157 LASSERT(ea_data != NULL);
2158 LASSERT_REPSWABBED(request, 1);
2161 memcpy(value, ea_data, ea_size);
2165 ptlrpc_req_finished(request);
2169 int ll_getxattr(struct dentry *dentry, const char *name, void *value,
2172 return ll_getxattr_internal(dentry->d_inode, name,
2173 value, size, OBD_MD_FLXATTR);
2176 int ll_listxattr(struct dentry *dentry, char *list, size_t size)
2178 return ll_getxattr_internal(dentry->d_inode, NULL, list, size,
2179 OBD_MD_FLXATTRLIST);
2183 * Here we hold DLM lock across permission check, to get better
2184 * conformance especially for remote acl. will introduce certain
2185 * overhead, maybe need a profile.
2188 lustre_check_acl(struct inode *inode, int mask)
2190 struct lookup_intent it = { .it_op = IT_GETATTR };
2191 struct dentry de = { .d_inode = inode };
2192 struct ll_sb_info *sbi;
2193 struct lustre_id id;
2194 struct ptlrpc_request *req = NULL;
2195 struct ll_inode_info *lli = ll_i2info(inode);
2196 struct posix_acl *acl;
2200 sbi = ll_i2sbi(inode);
2201 ll_inode2id(&id, inode);
2203 if (ll_intent_alloc(&it))
2206 rc = md_intent_lock(sbi->ll_md_exp, &id, NULL, 0, NULL, 0, &id,
2207 &it, 0, &req, ll_mdc_blocking_ast);
2209 ll_intent_free(&it);
2213 rc = revalidate_it_finish(req, 1, &it, &de);
2215 ll_intent_release(&it);
2219 if (sbi->ll_remote) {
2220 rc = ll_remote_acl_permission(inode, mask);
2222 spin_lock(&lli->lli_lock);
2223 acl = posix_acl_dup(ll_i2info(inode)->lli_posix_acl);
2224 spin_unlock(&lli->lli_lock);
2229 rc = posix_acl_permission(inode, acl, mask);
2230 posix_acl_release(acl);
2234 ll_lookup_finish_locks(&it, &de);
2235 ll_intent_free(&it);
2239 ptlrpc_req_finished(req);
2244 int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
2246 struct ll_sb_info *sbi = ll_i2sbi(inode);
2248 /* for remote client, permission bits in inode doesn't
2249 * play a role anymore.
2252 return lustre_check_acl(inode, mask);
2254 return generic_permission(inode, mask, lustre_check_acl);
2257 struct file_operations ll_file_operations = {
2258 .read = ll_file_read,
2259 .write = ll_file_write,
2260 .ioctl = ll_file_ioctl,
2261 .open = ll_file_open,
2262 .release = ll_file_release,
2263 .mmap = ll_file_mmap,
2264 .llseek = ll_file_seek,
2265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
2266 .sendfile = generic_file_sendfile,
2269 .lock = ll_file_flock
2272 struct inode_operations ll_file_inode_operations = {
2273 .setattr = ll_setattr,
2274 .truncate = ll_truncate,
2275 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
2276 .getattr = ll_getattr,
2278 .revalidate_it = ll_inode_revalidate_it,
2280 .setxattr = ll_setxattr,
2281 .getxattr = ll_getxattr,
2282 .listxattr = ll_listxattr,
2283 .removexattr = ll_removexattr,
2284 .permission = ll_inode_permission,