4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/llite/llite_close.c
38 * Lustre Lite routines to issue a secondary close after writeback
41 #include <linux/module.h>
43 #define DEBUG_SUBSYSTEM S_LLITE
45 #include <lustre_lite.h>
46 #include "llite_internal.h"
48 /** records that a write is in flight */
49 void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
51 struct ll_inode_info *lli = ll_i2info(club->cob_inode);
54 spin_lock(&lli->lli_lock);
55 lli->lli_flags |= LLIF_SOM_DIRTY;
56 if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
57 cfs_list_add(&page->cpg_pending_linkage,
58 &club->cob_pending_list);
59 spin_unlock(&lli->lli_lock);
63 /** records that a write has completed */
64 void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
66 struct ll_inode_info *lli = ll_i2info(club->cob_inode);
70 spin_lock(&lli->lli_lock);
71 if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
72 cfs_list_del_init(&page->cpg_pending_linkage);
75 spin_unlock(&lli->lli_lock);
77 ll_queue_done_writing(club->cob_inode, 0);
81 /** Queues DONE_WRITING if
82 * - done writing is allowed;
83 * - inode has no no dirty pages; */
84 void ll_queue_done_writing(struct inode *inode, unsigned long flags)
86 struct ll_inode_info *lli = ll_i2info(inode);
87 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
90 spin_lock(&lli->lli_lock);
91 lli->lli_flags |= flags;
93 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
94 cfs_list_empty(&club->cob_pending_list)) {
95 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
97 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
98 CWARN("ino %lu/%u(flags %u) som valid it just after "
100 inode->i_ino, inode->i_generation,
102 /* DONE_WRITING is allowed and inode has no dirty page. */
103 spin_lock(&lcq->lcq_lock);
105 LASSERT(cfs_list_empty(&lli->lli_close_list));
106 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
107 inode->i_ino, inode->i_generation);
108 cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
110 /* Avoid a concurrent insertion into the close thread queue:
111 * an inode is already in the close thread, open(), write(),
112 * close() happen, epoch is closed as the inode is marked as
113 * LLIF_EPOCH_PENDING. When pages are written inode should not
114 * be inserted into the queue again, clear this flag to avoid
116 lli->lli_flags &= ~LLIF_DONE_WRITING;
118 wake_up(&lcq->lcq_waitq);
119 spin_unlock(&lcq->lcq_lock);
121 spin_unlock(&lli->lli_lock);
125 /** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
126 void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
128 struct ll_inode_info *lli = ll_i2info(inode);
131 op_data->op_flags |= MF_SOM_CHANGE;
132 /* Check if Size-on-MDS attributes are valid. */
133 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
134 CERROR("ino %lu/%u(flags %u) som valid it just after "
135 "recovery\n", inode->i_ino, inode->i_generation,
138 if (!cl_local_size(inode)) {
139 /* Send Size-on-MDS Attributes if valid. */
140 op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
141 ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
146 /** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
147 void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
148 struct obd_client_handle **och, unsigned long flags)
150 struct ll_inode_info *lli = ll_i2info(inode);
151 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
154 spin_lock(&lli->lli_lock);
155 if (!(cfs_list_empty(&club->cob_pending_list))) {
156 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
157 LASSERT(*och != NULL);
158 LASSERT(lli->lli_pending_och == NULL);
159 /* Inode is dirty and there is no pending write done
160 * request yet, DONE_WRITE is to be sent later. */
161 lli->lli_flags |= LLIF_EPOCH_PENDING;
162 lli->lli_pending_och = *och;
163 spin_unlock(&lli->lli_lock);
165 inode = igrab(inode);
169 if (flags & LLIF_DONE_WRITING) {
170 /* Some pages are still dirty, it is early to send
171 * DONE_WRITE. Wait untill all pages will be flushed
172 * and try DONE_WRITE again later. */
173 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
174 lli->lli_flags |= LLIF_DONE_WRITING;
175 spin_unlock(&lli->lli_lock);
177 inode = igrab(inode);
182 CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID"\n",
183 ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
184 op_data->op_flags |= MF_EPOCH_CLOSE;
186 if (flags & LLIF_DONE_WRITING) {
187 LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
188 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
189 *och = lli->lli_pending_och;
190 lli->lli_pending_och = NULL;
191 lli->lli_flags &= ~LLIF_EPOCH_PENDING;
193 /* Pack Size-on-MDS inode attributes only if they has changed */
194 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
195 spin_unlock(&lli->lli_lock);
199 /* There is a pending DONE_WRITE -- close epoch with no
200 * attribute change. */
201 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
202 spin_unlock(&lli->lli_lock);
207 LASSERT(cfs_list_empty(&club->cob_pending_list));
208 lli->lli_flags &= ~LLIF_SOM_DIRTY;
209 spin_unlock(&lli->lli_lock);
210 ll_done_writing_attr(inode, op_data);
218 * Cliens updates SOM attributes on MDS (including llog cookies):
219 * obd_getattr with no lock and md_setattr.
221 int ll_som_update(struct inode *inode, struct md_op_data *op_data)
223 struct ll_inode_info *lli = ll_i2info(inode);
224 struct ptlrpc_request *request = NULL;
230 LASSERT(op_data != NULL);
231 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
232 CERROR("ino %lu/%u(flags %u) som valid it just after "
233 "recovery\n", inode->i_ino, inode->i_generation,
238 CERROR("can't allocate memory for Size-on-MDS update.\n");
242 old_flags = op_data->op_flags;
243 op_data->op_flags = MF_SOM_CHANGE;
245 /* If inode is already in another epoch, skip getattr from OSTs. */
246 if (lli->lli_ioepoch == op_data->op_ioepoch) {
247 rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
248 old_flags & MF_GETATTR_LOCK);
252 CERROR("inode_getattr failed (%d): unable to "
253 "send a Size-on-MDS attribute update "
254 "for inode %lu/%u\n", rc, inode->i_ino,
255 inode->i_generation);
257 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
258 PFID(&lli->lli_fid));
260 /* Install attributes into op_data. */
261 md_from_obdo(op_data, oa, oa->o_valid);
264 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
265 NULL, 0, NULL, 0, &request, NULL);
266 ptlrpc_req_finished(request);
273 * Closes the ioepoch and packs all the attributes into @op_data for
276 static void ll_prepare_done_writing(struct inode *inode,
277 struct md_op_data *op_data,
278 struct obd_client_handle **och)
280 ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
281 /* If there is no @och, we do not do D_W yet. */
285 ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
286 ll_prep_md_op_data(op_data, inode, NULL, NULL,
287 0, 0, LUSTRE_OPC_ANY, NULL);
290 /** Send a DONE_WRITING rpc. */
291 static void ll_done_writing(struct inode *inode)
293 struct obd_client_handle *och = NULL;
294 struct md_op_data *op_data;
298 LASSERT(exp_connect_som(ll_i2mdexp(inode)));
300 OBD_ALLOC_PTR(op_data);
301 if (op_data == NULL) {
302 CERROR("can't allocate op_data\n");
307 ll_prepare_done_writing(inode, op_data, &och);
308 /* If there is no @och, we do not do D_W yet. */
312 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
314 /* MDS has instructed us to obtain Size-on-MDS attribute from
315 * OSTs and send setattr to back to MDS. */
316 rc = ll_som_update(inode, op_data);
318 CERROR("inode %lu mdc done_writing failed: rc = %d\n",
322 ll_finish_md_op_data(op_data);
324 md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
330 static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
332 struct ll_inode_info *lli = NULL;
334 spin_lock(&lcq->lcq_lock);
336 if (!cfs_list_empty(&lcq->lcq_head)) {
337 lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
339 cfs_list_del_init(&lli->lli_close_list);
340 } else if (cfs_atomic_read(&lcq->lcq_stop))
341 lli = ERR_PTR(-EALREADY);
343 spin_unlock(&lcq->lcq_lock);
347 static int ll_close_thread(void *arg)
349 struct ll_close_queue *lcq = arg;
352 complete(&lcq->lcq_comp);
355 struct l_wait_info lwi = { 0 };
356 struct ll_inode_info *lli;
359 l_wait_event_exclusive(lcq->lcq_waitq,
360 (lli = ll_close_next_lli(lcq)) != NULL,
365 inode = ll_info2i(lli);
366 CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
367 inode->i_ino, inode->i_generation);
368 ll_done_writing(inode);
372 CDEBUG(D_INFO, "ll_close exiting\n");
373 complete(&lcq->lcq_comp);
377 int ll_close_thread_start(struct ll_close_queue **lcq_ret)
379 struct ll_close_queue *lcq;
380 struct task_struct *task;
382 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
385 OBD_ALLOC(lcq, sizeof(*lcq));
389 spin_lock_init(&lcq->lcq_lock);
390 CFS_INIT_LIST_HEAD(&lcq->lcq_head);
391 init_waitqueue_head(&lcq->lcq_waitq);
392 init_completion(&lcq->lcq_comp);
394 task = kthread_run(ll_close_thread, lcq, "ll_close");
396 OBD_FREE(lcq, sizeof(*lcq));
397 return PTR_ERR(task);
400 wait_for_completion(&lcq->lcq_comp);
405 void ll_close_thread_shutdown(struct ll_close_queue *lcq)
407 init_completion(&lcq->lcq_comp);
408 cfs_atomic_inc(&lcq->lcq_stop);
409 wake_up(&lcq->lcq_waitq);
410 wait_for_completion(&lcq->lcq_comp);
411 OBD_FREE(lcq, sizeof(*lcq));