+ LASSERTF(it->it_magic == INTENT_MAGIC, "bad intent magic: %x\n",
+ it->it_magic);
+ }
+#endif
+
+ if (!it || it->it_op == IT_GETXATTR)
+ it = *itp = deft;
+
+ if (it->d.fs_data)
+ return;
+
+ if (ll_intent_alloc(it)) {
+ CERROR("Failed to allocate memory for lustre specific intent "
+ "data\n");
+ /* XXX: we cannot return status just yet */
+ LBUG();
+ }
+}
+
+int ll_intent_alloc(struct lookup_intent *it)
+{
+ if (it->d.fs_data) {
+ CERROR("Intent alloc on already allocated intent\n");
+ return 0;
+ }
+ OBD_SLAB_ALLOC(it->d.fs_data, ll_intent_slab, SLAB_KERNEL,
+ sizeof(struct lustre_intent_data));
+ if (!it->d.fs_data) {
+ CERROR("Failed to allocate memory for lustre specific intent "
+ "data\n");
+ return -ENOMEM;
+ }
+
+ it->it_op_release = ll_intent_release;
+ return 0;
+}
+
+void ll_intent_free(struct lookup_intent *it)
+{
+ if (it->d.fs_data) {
+ OBD_SLAB_FREE(it->d.fs_data, ll_intent_slab,
+ sizeof(struct lustre_intent_data));
+ it->d.fs_data = NULL;
+ }
+}
+
+static inline int
+ll_special_name(struct dentry *de)
+{
+ if (de->d_name.name[0] == '.') switch (de->d_name.len) {
+ case 2:
+ if (de->d_name.name[1] == '.')
+ return 1;
+ case 1:
+ return 1;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+int ll_revalidate_it(struct dentry *de, int flags, struct nameidata *nd,
+ struct lookup_intent *it)
+{
+ struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
+ struct ptlrpc_request *req = NULL;
+ int gns_it, gns_flags, rc = 0;
+ struct obd_export *exp;
+ struct it_cb_data icbd;
+ struct lustre_id pid;
+ struct lustre_id cid;
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%s (%p), intent=%s\n", de->d_name.name,
+ de, LL_IT2STR(it));
+
+ /* Cached negative dentries are unsafe for now - look them up again */
+ if (de->d_inode == NULL)
+ RETURN(0);
+
+ /*
+ * root of the tree is always valid, attributes would be fixed in
+ * ll_inode_revalidate_it()
+ */
+ if (de->d_sb->s_root == de)
+ RETURN(1);
+
+ CDEBUG(D_INODE, "revalidate 0x%p: %*s -> %lu/%lu\n",
+ de, de->d_name.len, de->d_name.name,
+ (unsigned long) de->d_inode->i_ino,
+ (unsigned long) de->d_inode->i_generation);
+
+ exp = ll_i2mdexp(de->d_inode);
+ ll_inode2id(&pid, de->d_parent->d_inode);
+ ll_inode2id(&cid, de->d_inode);
+ LASSERT(id_fid(&cid) != 0);
+
+ icbd.icbd_parent = de->d_parent->d_inode;
+ icbd.icbd_childp = &de;
+
+ /*
+ * never execute intents for mount points. Attributes will be fixed up
+ * in ll_inode_revalidate_it().
+ */
+ if (d_mountpoint(de))
+ RETURN(1);
+
+ if (nd != NULL)
+ nd->mnt->mnt_last_used = jiffies;
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
+ gns_it = nd ? nd->intent.open.it_op : IT_OPEN;
+ gns_flags = nd ? nd->flags : LOOKUP_CONTINUE;
+
+ if (it && it->it_op == IT_GETATTR)
+ it = NULL; /* will use it_lookup */
+ else if (it && (it->it_op == IT_OPEN) && de->d_inode) {
+ /* open lock stuff */
+ struct inode *inode = de->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+ struct obd_device *obddev;
+ struct lustre_handle lockh;
+ int flags = LDLM_FL_BLOCK_GRANTED;
+ ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN}};
+ struct ldlm_res_id file_res_id = {.name = {id_fid(&lli->lli_id),
+ id_group(&lli->lli_id)}};
+ int lockmode;
+
+ if (it->it_flags & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ lockmode = LCK_CW;
+ } else if (it->it_flags & FMODE_EXEC) {
+ och_p = &lli->lli_mds_exec_och;
+ och_usecount = &lli->lli_open_fd_exec_count;
+ lockmode = LCK_PR;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ lockmode = LCK_CR;
+ }
+
+ /* Check for the proper lock */
+ obddev = md_get_real_obd(exp, &lli->lli_id);
+ if (!ldlm_lock_match(obddev->obd_namespace, flags, &file_res_id,
+ LDLM_IBITS, &policy, lockmode, &lockh))
+ goto do_lock;
+ down(&lli->lli_och_sem);
+ if (*och_p) { /* Everything is open already, do nothing */
+ /*(*och_usecount)++; Do not let them steal our open
+ handle from under us */
+ /* XXX The code above was my original idea, but in case
+ we have the handle, but we cannot use it due to later
+ checks (e.g. O_CREAT|O_EXCL flags set), nobody
+ would decrement counter increased here. So we just
+ hope the lock won't be invalidated in between. But
+ if it would be, we'll reopen the open request to
+ MDS later during file open path */
+ up(&lli->lli_och_sem);
+ if (ll_intent_alloc(it))
+ LBUG();
+ memcpy(&LUSTRE_IT(it)->it_lock_handle, &lockh,
+ sizeof(lockh));
+ LUSTRE_IT(it)->it_lock_mode = lockmode;
+
+ /*
+ * we do not check here for possible GNS dentry as if
+ * file is opened on it, it is mounted already and we do
+ * not need do anything. --umka
+ */
+ RETURN(1);
+ } else {
+ /* Hm, interesting. Lock is present, but no open
+ handle? */
+ up(&lli->lli_och_sem);
+ ldlm_lock_decref(&lockh, lockmode);
+ }