1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite routines to issue a secondary close after writeback
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/module.h>
26 #define DEBUG_SUBSYSTEM S_LLITE
28 //#include <lustre_mdc.h>
29 #include <lustre_lite.h>
30 #include "llite_internal.h"
32 /* record that a write is in flight */
33 void llap_write_pending(struct inode *inode, struct ll_async_page *llap)
35 struct ll_inode_info *lli = ll_i2info(inode);
38 spin_lock(&lli->lli_lock);
39 lli->lli_flags |= LLIF_SOM_DIRTY;
40 if (llap && list_empty(&llap->llap_pending_write))
41 list_add(&llap->llap_pending_write,
42 &lli->lli_pending_write_llaps);
43 spin_unlock(&lli->lli_lock);
47 /* record that a write has completed */
48 int llap_write_complete(struct inode *inode, struct ll_async_page *llap)
50 struct ll_inode_info *lli = ll_i2info(inode);
54 spin_lock(&lli->lli_lock);
55 if (llap && !list_empty(&llap->llap_pending_write)) {
56 list_del_init(&llap->llap_pending_write);
59 spin_unlock(&lli->lli_lock);
63 /* Queue DONE_WRITING if
64 * - done writing is allowed;
65 * - inode has no no dirty pages; */
66 void ll_queue_done_writing(struct inode *inode, unsigned long flags)
68 struct ll_inode_info *lli = ll_i2info(inode);
70 spin_lock(&lli->lli_lock);
71 lli->lli_flags |= flags;
73 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
74 list_empty(&lli->lli_pending_write_llaps)) {
75 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
77 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
78 CWARN("ino %lu/%u(flags %lu) som valid it just after "
80 inode->i_ino, inode->i_generation,
82 /* DONE_WRITING is allowed and inode has no dirty page. */
83 spin_lock(&lcq->lcq_lock);
85 LASSERT(list_empty(&lli->lli_close_list));
86 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
87 inode->i_ino, inode->i_generation);
88 list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
90 /* Avoid a concurrent insertion into the close thread queue:
91 * an inode is already in the close thread, open(), write(),
92 * close() happen, epoch is closed as the inode is marked as
93 * LLIF_EPOCH_PENDING. When pages are written inode should not
94 * be inserted into the queue again, clear this flag to avoid
96 lli->lli_flags &= ~LLIF_DONE_WRITING;
98 wake_up(&lcq->lcq_waitq);
99 spin_unlock(&lcq->lcq_lock);
101 spin_unlock(&lli->lli_lock);
104 /* Close epoch and send Size-on-MDS attribute update if possible.
105 * Call this under @lli->lli_lock spinlock. */
106 void ll_epoch_close(struct inode *inode, struct md_op_data *op_data,
107 struct obd_client_handle **och, unsigned long flags)
109 struct ll_inode_info *lli = ll_i2info(inode);
112 spin_lock(&lli->lli_lock);
113 if (!(list_empty(&lli->lli_pending_write_llaps))) {
114 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
115 LASSERT(*och != NULL);
116 LASSERT(lli->lli_pending_och == NULL);
117 /* Inode is dirty and there is no pending write done
118 * request yet, DONE_WRITE is to be sent later. */
119 lli->lli_flags |= LLIF_EPOCH_PENDING;
120 lli->lli_pending_och = *och;
121 spin_unlock(&lli->lli_lock);
123 inode = igrab(inode);
127 if (flags & LLIF_DONE_WRITING) {
128 /* Some pages are still dirty, it is early to send
129 * DONE_WRITE. Wait untill all pages will be flushed
130 * and try DONE_WRITE again later. */
131 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
132 lli->lli_flags |= LLIF_DONE_WRITING;
133 spin_unlock(&lli->lli_lock);
135 inode = igrab(inode);
140 CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID"\n",
141 ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
142 op_data->op_flags |= MF_EPOCH_CLOSE;
144 if (flags & LLIF_DONE_WRITING) {
145 LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
146 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
147 *och = lli->lli_pending_och;
148 lli->lli_pending_och = NULL;
149 lli->lli_flags &= ~LLIF_EPOCH_PENDING;
151 /* Pack Size-on-MDS inode attributes only if they has changed */
152 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
153 spin_unlock(&lli->lli_lock);
157 /* There is a pending DONE_WRITE -- close epoch with no
158 * attribute change. */
159 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
160 spin_unlock(&lli->lli_lock);
165 LASSERT(list_empty(&lli->lli_pending_write_llaps));
166 lli->lli_flags &= ~LLIF_SOM_DIRTY;
167 spin_unlock(&lli->lli_lock);
168 op_data->op_flags |= MF_SOM_CHANGE;
170 /* Check if Size-on-MDS attributes are valid. */
171 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
172 CWARN("ino %lu/%u(flags %lu) som valid it just after "
174 inode->i_ino, inode->i_generation, lli->lli_flags);
176 if (!ll_local_size(inode)) {
177 /* Send Size-on-MDS Attributes if valid. Atime is sent along
178 * with all the attributes. */
179 op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
180 ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
187 int ll_sizeonmds_update(struct inode *inode, struct md_open_data *mod,
188 struct lustre_handle *fh, __u64 ioepoch)
190 struct ll_inode_info *lli = ll_i2info(inode);
191 struct md_op_data *op_data;
196 /* LASSERT(!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)); */
197 /* After recovery that can be valid. */
198 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
199 CWARN("ino %lu/%u(flags %lu) som valid it just after "
200 "recovery\n", inode->i_ino, inode->i_generation,
204 OBD_ALLOC_PTR(op_data);
205 if (!oa || !op_data) {
206 CERROR("can't allocate memory for Size-on-MDS update.\n");
209 rc = ll_inode_getattr(inode, oa);
212 CDEBUG(D_INODE, "objid "LPX64" is already destroyed\n",
213 lli->lli_smd->lsm_object_id);
215 CERROR("inode_getattr failed (%d): unable to send a "
216 "Size-on-MDS attribute update for inode %lu/%u\n",
217 rc, inode->i_ino, inode->i_generation);
220 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n", PFID(&lli->lli_fid));
222 md_from_obdo(op_data, oa, oa->o_valid);
223 memcpy(&op_data->op_handle, fh, sizeof(*fh));
225 op_data->op_ioepoch = ioepoch;
226 op_data->op_flags |= MF_SOM_CHANGE;
228 rc = ll_md_setattr(inode, op_data, &mod);
234 ll_finish_md_op_data(op_data);
238 /* Send a DONE_WRITING rpc, pack Size-on-MDS attributes into it, if possible */
239 static void ll_done_writing(struct inode *inode)
241 struct obd_client_handle *och = NULL;
242 struct md_op_data *op_data;
246 LASSERT(ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM);
248 OBD_ALLOC_PTR(op_data);
249 if (op_data == NULL) {
250 CERROR("can't allocate op_data\n");
255 ll_epoch_close(inode, op_data, &och, LLIF_DONE_WRITING);
256 /* If there is no @och, we do not do D_W yet. */
260 ll_pack_inode2opdata(inode, op_data, &och->och_fh);
262 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, och->och_mod);
264 /* MDS has instructed us to obtain Size-on-MDS attribute from
265 * OSTs and send setattr to back to MDS. */
266 rc = ll_sizeonmds_update(inode, och->och_mod,
267 &och->och_fh, op_data->op_ioepoch);
269 CERROR("inode %lu mdc done_writing failed: rc = %d\n",
273 ll_finish_md_op_data(op_data);
275 md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
281 static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
283 struct ll_inode_info *lli = NULL;
285 spin_lock(&lcq->lcq_lock);
287 if (!list_empty(&lcq->lcq_head)) {
288 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
290 list_del_init(&lli->lli_close_list);
291 } else if (atomic_read(&lcq->lcq_stop))
292 lli = ERR_PTR(-EALREADY);
294 spin_unlock(&lcq->lcq_lock);
298 static int ll_close_thread(void *arg)
300 struct ll_close_queue *lcq = arg;
304 char name[CFS_CURPROC_COMM_MAX];
305 snprintf(name, sizeof(name) - 1, "ll_close");
309 complete(&lcq->lcq_comp);
312 struct l_wait_info lwi = { 0 };
313 struct ll_inode_info *lli;
316 l_wait_event_exclusive(lcq->lcq_waitq,
317 (lli = ll_close_next_lli(lcq)) != NULL,
322 inode = ll_info2i(lli);
323 CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
324 inode->i_ino, inode->i_generation);
325 ll_done_writing(inode);
329 CDEBUG(D_INFO, "ll_close exiting\n");
330 complete(&lcq->lcq_comp);
334 int ll_close_thread_start(struct ll_close_queue **lcq_ret)
336 struct ll_close_queue *lcq;
339 OBD_ALLOC(lcq, sizeof(*lcq));
343 spin_lock_init(&lcq->lcq_lock);
344 INIT_LIST_HEAD(&lcq->lcq_head);
345 init_waitqueue_head(&lcq->lcq_waitq);
346 init_completion(&lcq->lcq_comp);
348 pid = kernel_thread(ll_close_thread, lcq, 0);
350 OBD_FREE(lcq, sizeof(*lcq));
354 wait_for_completion(&lcq->lcq_comp);
359 void ll_close_thread_shutdown(struct ll_close_queue *lcq)
361 init_completion(&lcq->lcq_comp);
362 atomic_inc(&lcq->lcq_stop);
363 wake_up(&lcq->lcq_waitq);
364 wait_for_completion(&lcq->lcq_comp);
365 OBD_FREE(lcq, sizeof(*lcq));