4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
27 * lustre/target/tgt_main.c
29 * Lustre Unified Target main initialization code
31 * Author: Mikhail Pershin <mike.pershin@intel.com>
34 #define DEBUG_SUBSYSTEM S_CLASS
37 #include <obd_cksum.h>
38 #include "tgt_internal.h"
39 #include "../ptlrpc/ptlrpc_internal.h"
41 /* This must be longer than the longest string below */
42 #define SYNC_STATES_MAXLEN 16
43 static const char * const sync_lock_cancel_states[] = {
44 [SYNC_LOCK_CANCEL_NEVER] = "never",
45 [SYNC_LOCK_CANCEL_BLOCKING] = "blocking",
46 [SYNC_LOCK_CANCEL_ALWAYS] = "always",
50 * Show policy for handling dirty data under a lock being cancelled.
52 * \param[in] kobj sysfs kobject
53 * \param[in] attr sysfs attribute
54 * \param[in] buf buffer for data
56 * \retval 0 and buffer filled with data on success
57 * \retval negative value on error
59 ssize_t sync_lock_cancel_show(struct kobject *kobj,
60 struct attribute *attr, char *buf)
62 struct obd_device *obd = container_of(kobj, struct obd_device,
64 struct lu_target *tgt = obd->u.obt.obt_lut;
66 return sprintf(buf, "%s\n",
67 sync_lock_cancel_states[tgt->lut_sync_lock_cancel]);
69 EXPORT_SYMBOL(sync_lock_cancel_show);
72 * Change policy for handling dirty data under a lock being cancelled.
74 * This variable defines what action target takes upon lock cancel
75 * There are three possible modes:
76 * 1) never - never do sync upon lock cancel. This can lead to data
77 * inconsistencies if both the OST and client crash while writing a file
78 * that is also concurrently being read by another client. In these cases,
79 * this may allow the file data to "rewind" to an earlier state.
80 * 2) blocking - do sync only if there is blocking lock, e.g. if another
81 * client is trying to access this same object
82 * 3) always - do sync always
84 * \param[in] kobj kobject
85 * \param[in] attr attribute to show
86 * \param[in] buf buffer for data
87 * \param[in] count buffer size
89 * \retval \a count on success
90 * \retval negative value on error
92 ssize_t sync_lock_cancel_store(struct kobject *kobj, struct attribute *attr,
93 const char *buffer, size_t count)
95 struct obd_device *obd = container_of(kobj, struct obd_device,
97 struct lu_target *tgt = obd->u.obt.obt_lut;
99 enum tgt_sync_lock_cancel slc;
101 if (count == 0 || count >= SYNC_STATES_MAXLEN)
104 for (slc = 0; slc < ARRAY_SIZE(sync_lock_cancel_states); slc++) {
105 if (strcmp(buffer, sync_lock_cancel_states[slc]) == 0) {
111 /* Legacy numeric codes */
113 int rc = kstrtoint(buffer, 0, &val);
118 if (val < 0 || val > 2)
121 spin_lock(&tgt->lut_flags_lock);
122 tgt->lut_sync_lock_cancel = val;
123 spin_unlock(&tgt->lut_flags_lock);
126 EXPORT_SYMBOL(sync_lock_cancel_store);
127 LUSTRE_RW_ATTR(sync_lock_cancel);
130 * Show maximum number of Filter Modification Data (FMD) maintained.
132 * \param[in] kobj kobject
133 * \param[in] attr attribute to show
134 * \param[in] buf buffer for data
136 * \retval 0 and buffer filled with data on success
137 * \retval negative value on error
139 ssize_t tgt_fmd_count_show(struct kobject *kobj, struct attribute *attr,
142 struct obd_device *obd = container_of(kobj, struct obd_device,
144 struct lu_target *lut = obd->u.obt.obt_lut;
146 return sprintf(buf, "%u\n", lut->lut_fmd_max_num);
150 * Change number of FMDs maintained by target.
152 * This defines how large the list of FMDs can be.
154 * \param[in] kobj kobject
155 * \param[in] attr attribute to show
156 * \param[in] buf buffer for data
157 * \param[in] count buffer size
159 * \retval \a count on success
160 * \retval negative value on error
162 ssize_t tgt_fmd_count_store(struct kobject *kobj, struct attribute *attr,
163 const char *buffer, size_t count)
165 struct obd_device *obd = container_of(kobj, struct obd_device,
167 struct lu_target *lut = obd->u.obt.obt_lut;
170 rc = kstrtoint(buffer, 0, &val);
174 if (val < 1 || val > 65536)
177 lut->lut_fmd_max_num = val;
181 LUSTRE_RW_ATTR(tgt_fmd_count);
184 * Show the maximum age of FMD data in seconds.
186 * \param[in] kobj kobject
187 * \param[in] attr attribute to show
188 * \param[in] buf buffer for data
190 * \retval 0 and buffer filled with data on success
191 * \retval negative value on error
193 ssize_t tgt_fmd_seconds_show(struct kobject *kobj, struct attribute *attr,
196 struct obd_device *obd = container_of(kobj, struct obd_device,
198 struct lu_target *lut = obd->u.obt.obt_lut;
200 return sprintf(buf, "%lld\n", lut->lut_fmd_max_age);
204 * Set the maximum age of FMD data in seconds.
206 * This defines how long FMD data stays in the FMD list.
208 * \param[in] kobj kobject
209 * \param[in] attr attribute to show
210 * \param[in] buf buffer for data
211 * \param[in] count buffer size
213 * \retval \a count on success
214 * \retval negative number on error
216 ssize_t tgt_fmd_seconds_store(struct kobject *kobj, struct attribute *attr,
217 const char *buffer, size_t count)
219 struct obd_device *obd = container_of(kobj, struct obd_device,
221 struct lu_target *lut = obd->u.obt.obt_lut;
225 rc = kstrtoll(buffer, 0, &val);
229 if (val < 1 || val > 65536) /* ~ 18 hour max */
232 lut->lut_fmd_max_age = val;
236 LUSTRE_RW_ATTR(tgt_fmd_seconds);
238 /* These two aliases are old names and kept for compatibility, they were
239 * changed to 'tgt_fmd_count' and 'tgt_fmd_seconds'.
240 * This change was made in Lustre 2.13, so these aliases can be removed
241 * when back compatibility is not needed with any Lustre version prior 2.13
243 static struct lustre_attr tgt_fmd_count_compat = __ATTR(client_cache_count,
244 0644, tgt_fmd_count_show, tgt_fmd_count_store);
245 static struct lustre_attr tgt_fmd_seconds_compat = __ATTR(client_cache_seconds,
246 0644, tgt_fmd_seconds_show, tgt_fmd_seconds_store);
248 static const struct attribute *tgt_attrs[] = {
249 &lustre_attr_sync_lock_cancel.attr,
250 &lustre_attr_tgt_fmd_count.attr,
251 &lustre_attr_tgt_fmd_seconds.attr,
252 &tgt_fmd_count_compat.attr,
253 &tgt_fmd_seconds_compat.attr,
258 * Decide which checksums both client and OST support, possibly forcing
259 * the use of T10PI checksums if the hardware supports this.
261 * The clients that have no T10-PI RPC checksum support will use the same
262 * mechanism to select checksum type as before, and will not be affected by
263 * the following logic.
265 * For the clients that have T10-PI RPC checksum support:
267 * If the target supports T10-PI feature and T10-PI checksum is enforced,
268 * clients will have no other choice for RPC checksum type other than using
269 * the T10PI checksum type. This is useful for enforcing end-to-end integrity
270 * in the whole system.
272 * If the target doesn't support T10-PI feature and T10-PI checksum is
273 * enforced, together with other checksum with reasonably good speeds (e.g.
274 * crc32, crc32c, adler, etc.), all T10-PI checksum types understood by the
275 * client (t10ip512, t10ip4K, t10crc512, t10crc4K) will be added to the
276 * available checksum types, regardless of the speeds of T10-PI checksums.
277 * This is useful for testing T10-PI checksum of RPC.
279 * If the target supports T10-PI feature and T10-PI checksum is NOT enforced,
280 * the corresponding T10-PI checksum type will be added to the checksum type
281 * list, regardless of the speed of the T10-PI checksum. This provides clients
282 * the flexibility to choose whether to enable end-to-end integrity or not.
284 * If the target does NOT supports T10-PI feature and T10-PI checksum is NOT
285 * enforced, together with other checksums with reasonably good speeds,
286 * all the T10-PI checksum types with good speeds will be added into the
287 * checksum type list. Note that a T10-PI checksum type with a speed worse
288 * than half of Alder will NOT be added as a option. In this circumstance,
289 * T10-PI checksum types has the same behavior like other normal checksum
292 void tgt_mask_cksum_types(struct lu_target *lut, enum cksum_types *cksum_types)
294 bool enforce = lut->lut_cksum_t10pi_enforce;
295 enum cksum_types tgt_t10_cksum_type;
296 enum cksum_types client_t10_types = *cksum_types & OBD_CKSUM_T10_ALL;
297 enum cksum_types server_t10_types;
300 * The client set in ocd_cksum_types the checksum types it
301 * supports. We have to mask off the algorithms that we don't
302 * support. T10PI checksum types will be added later.
304 *cksum_types &= (lut->lut_cksum_types_supported & ~OBD_CKSUM_T10_ALL);
305 server_t10_types = lut->lut_cksum_types_supported & OBD_CKSUM_T10_ALL;
306 tgt_t10_cksum_type = lut->lut_dt_conf.ddp_t10_cksum_type;
308 /* Quick exit if no T10-PI support on client */
309 if (!client_t10_types)
313 * This OST has NO T10-PI feature. Add all supported T10-PI checksums
314 * as options if T10-PI checksum is enforced. If the T10-PI checksum is
315 * not enforced, only add them as options when speed is good.
317 if (tgt_t10_cksum_type == 0) {
319 * Server allows all T10PI checksums, and server_t10_types
320 * include quick ones.
323 *cksum_types |= client_t10_types;
325 *cksum_types |= client_t10_types & server_t10_types;
330 * This OST has T10-PI feature. Disable all other checksum types if
331 * T10-PI checksum is enforced. If the T10-PI checksum is not enforced,
332 * add the checksum type as an option.
334 if (client_t10_types & tgt_t10_cksum_type) {
336 *cksum_types = tgt_t10_cksum_type;
338 *cksum_types |= tgt_t10_cksum_type;
341 EXPORT_SYMBOL(tgt_mask_cksum_types);
343 int tgt_tunables_init(struct lu_target *lut)
347 rc = sysfs_create_files(&lut->lut_obd->obd_kset.kobj, tgt_attrs);
349 lut->lut_attrs = tgt_attrs;
352 EXPORT_SYMBOL(tgt_tunables_init);
354 void tgt_tunables_fini(struct lu_target *lut)
356 if (lut->lut_attrs) {
357 sysfs_remove_files(&lut->lut_obd->obd_kset.kobj,
359 lut->lut_attrs = NULL;
362 EXPORT_SYMBOL(tgt_tunables_fini);
365 * Save cross-MDT lock in lut_slc_locks.
367 * Lock R/W count is not saved, but released in unlock (not canceled remotely),
368 * instead only a refcount is taken, so that the remote MDT where the object
369 * resides can detect conflict with this lock there.
372 * \param lock cross-MDT lock to save
373 * \param transno when the transaction with this transno is committed, this lock
376 void tgt_save_slc_lock(struct lu_target *lut, struct ldlm_lock *lock,
379 spin_lock(&lut->lut_slc_locks_guard);
380 lock_res_and_lock(lock);
381 if (ldlm_is_cbpending(lock)) {
382 /* if it was canceld by server, don't save, because remote MDT
383 * will do Sync-on-Cancel. */
386 lock->l_transno = transno;
387 /* if this lock is in the list already, there are two operations
388 * both use this lock, and save it after use, so for the second
389 * one, just put the refcount. */
390 if (list_empty(&lock->l_slc_link))
391 list_add_tail(&lock->l_slc_link, &lut->lut_slc_locks);
395 unlock_res_and_lock(lock);
396 spin_unlock(&lut->lut_slc_locks_guard);
398 EXPORT_SYMBOL(tgt_save_slc_lock);
401 * Discard cross-MDT lock from lut_slc_locks.
403 * This is called upon BAST, just remove lock from lut_slc_locks and put lock
404 * refcount. The BAST will cancel this lock.
407 * \param lock cross-MDT lock to discard
409 void tgt_discard_slc_lock(struct lu_target *lut, struct ldlm_lock *lock)
411 spin_lock(&lut->lut_slc_locks_guard);
412 lock_res_and_lock(lock);
413 /* may race with tgt_cancel_slc_locks() */
414 if (lock->l_transno != 0) {
415 LASSERT(!list_empty(&lock->l_slc_link));
416 LASSERT(ldlm_is_cbpending(lock));
417 list_del_init(&lock->l_slc_link);
421 unlock_res_and_lock(lock);
422 spin_unlock(&lut->lut_slc_locks_guard);
424 EXPORT_SYMBOL(tgt_discard_slc_lock);
427 * Cancel cross-MDT locks upon transaction commit.
429 * Remove cross-MDT locks from lut_slc_locks, cancel them and put lock refcount.
432 * \param transno transaction with this number was committed.
434 void tgt_cancel_slc_locks(struct lu_target *lut, __u64 transno)
436 struct ldlm_lock *lock, *next;
438 struct lustre_handle lockh;
440 spin_lock(&lut->lut_slc_locks_guard);
441 list_for_each_entry_safe(lock, next, &lut->lut_slc_locks,
443 lock_res_and_lock(lock);
444 LASSERT(lock->l_transno != 0);
445 if (lock->l_transno > transno) {
446 unlock_res_and_lock(lock);
449 /* ouch, another operation is using it after it's saved */
450 if (lock->l_readers != 0 || lock->l_writers != 0) {
451 unlock_res_and_lock(lock);
454 /* set CBPENDING so that this lock won't be used again */
455 ldlm_set_cbpending(lock);
457 list_move(&lock->l_slc_link, &list);
458 unlock_res_and_lock(lock);
460 spin_unlock(&lut->lut_slc_locks_guard);
462 list_for_each_entry_safe(lock, next, &list, l_slc_link) {
463 list_del_init(&lock->l_slc_link);
464 ldlm_lock2handle(lock, &lockh);
465 ldlm_cli_cancel(&lockh, LCF_ASYNC);
470 int tgt_init(const struct lu_env *env, struct lu_target *lut,
471 struct obd_device *obd, struct dt_device *dt,
472 struct tgt_opc_slice *slice, int request_fail_id,
475 struct dt_object_format dof;
479 struct tg_grants_data *tgd = &lut->lut_tgd;
480 struct obd_statfs *osfs;
488 lut->lut_bottom = dt;
489 lut->lut_last_rcvd = NULL;
490 lut->lut_client_bitmap = NULL;
491 atomic_set(&lut->lut_num_clients, 0);
492 atomic_set(&lut->lut_client_generation, 0);
493 lut->lut_reply_data = NULL;
494 lut->lut_reply_bitmap = NULL;
495 obd->u.obt.obt_lut = lut;
496 obd->u.obt.obt_magic = OBT_MAGIC;
498 /* set request handler slice and parameters */
499 lut->lut_slice = slice;
500 lut->lut_reply_fail_id = reply_fail_id;
501 lut->lut_request_fail_id = request_fail_id;
503 /* sptlrcp variables init */
504 rwlock_init(&lut->lut_sptlrpc_lock);
505 sptlrpc_rule_set_init(&lut->lut_sptlrpc_rset);
507 spin_lock_init(&lut->lut_flags_lock);
508 lut->lut_sync_lock_cancel = SYNC_LOCK_CANCEL_NEVER;
509 lut->lut_cksum_t10pi_enforce = 0;
510 lut->lut_cksum_types_supported =
511 obd_cksum_types_supported_server(obd->obd_name);
513 spin_lock_init(&lut->lut_slc_locks_guard);
514 INIT_LIST_HEAD(&lut->lut_slc_locks);
516 /* last_rcvd initialization is needed by replayable targets only */
517 if (!obd->obd_replayable)
520 /* initialize grant and statfs data in target */
521 dt_conf_get(env, lut->lut_bottom, &lut->lut_dt_conf);
524 spin_lock_init(&tgd->tgd_osfs_lock);
525 tgd->tgd_osfs_age = ktime_get_seconds() - 1000;
526 tgd->tgd_osfs_unstable = 0;
527 tgd->tgd_statfs_inflight = 0;
528 tgd->tgd_osfs_inflight = 0;
531 spin_lock_init(&tgd->tgd_grant_lock);
532 tgd->tgd_tot_dirty = 0;
533 tgd->tgd_tot_granted = 0;
534 tgd->tgd_tot_pending = 0;
535 tgd->tgd_grant_compat_disable = 0;
536 tgd->tgd_lbug_on_grant_miscount = 0;
538 /* populate cached statfs data */
539 osfs = &tgt_th_info(env)->tti_u.osfs;
540 rc = tgt_statfs_internal(env, lut, osfs, 0, NULL);
542 CERROR("%s: can't get statfs data, rc %d\n", tgt_name(lut),
546 if (!is_power_of_2(osfs->os_bsize)) {
547 CERROR("%s: blocksize (%d) is not a power of 2\n",
548 tgt_name(lut), osfs->os_bsize);
549 GOTO(out, rc = -EPROTO);
551 tgd->tgd_blockbits = fls(osfs->os_bsize) - 1;
553 spin_lock_init(&lut->lut_translock);
554 spin_lock_init(&lut->lut_client_bitmap_lock);
556 OBD_ALLOC(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
557 if (lut->lut_client_bitmap == NULL)
560 memset(&attr, 0, sizeof(attr));
561 attr.la_valid = LA_MODE;
562 attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
563 dof.dof_type = dt_mode_to_dft(S_IFREG);
565 lu_local_obj_fid(&fid, LAST_RECV_OID);
567 o = dt_find_or_create(env, lut->lut_bottom, &fid, &dof, &attr);
570 CERROR("%s: cannot open LAST_RCVD: rc = %d\n", tgt_name(lut),
575 lut->lut_last_rcvd = o;
576 rc = tgt_server_data_init(env, lut);
580 /* prepare transactions callbacks */
581 lut->lut_txn_cb.dtc_txn_start = tgt_txn_start_cb;
582 lut->lut_txn_cb.dtc_txn_stop = tgt_txn_stop_cb;
583 lut->lut_txn_cb.dtc_cookie = lut;
584 lut->lut_txn_cb.dtc_tag = LCT_DT_THREAD | LCT_MD_THREAD;
585 INIT_LIST_HEAD(&lut->lut_txn_cb.dtc_linkage);
587 dt_txn_callback_add(lut->lut_bottom, &lut->lut_txn_cb);
588 lut->lut_bottom->dd_lu_dev.ld_site->ls_tgt = lut;
590 lut->lut_fmd_max_num = LUT_FMD_MAX_NUM_DEFAULT;
591 lut->lut_fmd_max_age = LUT_FMD_MAX_AGE_DEFAULT;
593 atomic_set(&lut->lut_sync_count, 0);
595 /* reply_data is supported by MDT targets only for now */
596 if (strncmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME, 3) != 0)
599 OBD_ALLOC(lut->lut_reply_bitmap,
600 LUT_REPLY_SLOTS_MAX_CHUNKS * sizeof(unsigned long *));
601 if (lut->lut_reply_bitmap == NULL)
602 GOTO(out, rc = -ENOMEM);
604 memset(&attr, 0, sizeof(attr));
605 attr.la_valid = LA_MODE;
606 attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
607 dof.dof_type = dt_mode_to_dft(S_IFREG);
609 lu_local_obj_fid(&fid, REPLY_DATA_OID);
611 o = dt_find_or_create(env, lut->lut_bottom, &fid, &dof, &attr);
614 CERROR("%s: cannot open REPLY_DATA: rc = %d\n", tgt_name(lut),
618 lut->lut_reply_data = o;
620 rc = tgt_reply_data_init(env, lut);
627 dt_txn_callback_del(lut->lut_bottom, &lut->lut_txn_cb);
629 obd->u.obt.obt_magic = 0;
630 obd->u.obt.obt_lut = NULL;
631 if (lut->lut_last_rcvd != NULL) {
632 dt_object_put(env, lut->lut_last_rcvd);
633 lut->lut_last_rcvd = NULL;
635 if (lut->lut_client_bitmap != NULL)
636 OBD_FREE(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
637 lut->lut_client_bitmap = NULL;
638 if (lut->lut_reply_data != NULL)
639 dt_object_put(env, lut->lut_reply_data);
640 lut->lut_reply_data = NULL;
641 if (lut->lut_reply_bitmap != NULL) {
642 for (i = 0; i < LUT_REPLY_SLOTS_MAX_CHUNKS; i++) {
643 if (lut->lut_reply_bitmap[i] != NULL)
644 OBD_FREE_LARGE(lut->lut_reply_bitmap[i],
645 BITS_TO_LONGS(LUT_REPLY_SLOTS_PER_CHUNK) *
647 lut->lut_reply_bitmap[i] = NULL;
649 OBD_FREE(lut->lut_reply_bitmap,
650 LUT_REPLY_SLOTS_MAX_CHUNKS * sizeof(unsigned long *));
652 lut->lut_reply_bitmap = NULL;
655 EXPORT_SYMBOL(tgt_init);
657 void tgt_fini(const struct lu_env *env, struct lu_target *lut)
663 if (lut->lut_lsd.lsd_feature_incompat & OBD_INCOMPAT_MULTI_RPCS &&
664 atomic_read(&lut->lut_num_clients) == 0) {
665 /* Clear MULTI RPCS incompatibility flag that prevents previous
666 * Lustre versions to mount a target with reply_data file */
667 lut->lut_lsd.lsd_feature_incompat &= ~OBD_INCOMPAT_MULTI_RPCS;
668 rc = tgt_server_data_update(env, lut, 1);
670 CERROR("%s: unable to clear MULTI RPCS "
671 "incompatibility flag\n",
672 lut->lut_obd->obd_name);
675 sptlrpc_rule_set_free(&lut->lut_sptlrpc_rset);
677 if (lut->lut_reply_data != NULL)
678 dt_object_put(env, lut->lut_reply_data);
679 lut->lut_reply_data = NULL;
680 if (lut->lut_reply_bitmap != NULL) {
681 for (i = 0; i < LUT_REPLY_SLOTS_MAX_CHUNKS; i++) {
682 if (lut->lut_reply_bitmap[i] != NULL)
683 OBD_FREE_LARGE(lut->lut_reply_bitmap[i],
684 BITS_TO_LONGS(LUT_REPLY_SLOTS_PER_CHUNK) *
686 lut->lut_reply_bitmap[i] = NULL;
688 OBD_FREE(lut->lut_reply_bitmap,
689 LUT_REPLY_SLOTS_MAX_CHUNKS * sizeof(unsigned long *));
691 lut->lut_reply_bitmap = NULL;
692 if (lut->lut_client_bitmap) {
693 OBD_FREE(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
694 lut->lut_client_bitmap = NULL;
696 if (lut->lut_last_rcvd) {
697 dt_txn_callback_del(lut->lut_bottom, &lut->lut_txn_cb);
698 dt_object_put(env, lut->lut_last_rcvd);
699 lut->lut_last_rcvd = NULL;
703 EXPORT_SYMBOL(tgt_fini);
705 static struct kmem_cache *tgt_thread_kmem;
706 static struct kmem_cache *tgt_session_kmem;
707 struct kmem_cache *tgt_fmd_kmem;
709 static struct lu_kmem_descr tgt_caches[] = {
711 .ckd_cache = &tgt_thread_kmem,
712 .ckd_name = "tgt_thread_kmem",
713 .ckd_size = sizeof(struct tgt_thread_info),
716 .ckd_cache = &tgt_session_kmem,
717 .ckd_name = "tgt_session_kmem",
718 .ckd_size = sizeof(struct tgt_session_info)
721 .ckd_cache = &tgt_fmd_kmem,
722 .ckd_name = "tgt_fmd_cache",
723 .ckd_size = sizeof(struct tgt_fmd_data)
731 /* context key constructor/destructor: tg_key_init, tg_key_fini */
732 static void *tgt_key_init(const struct lu_context *ctx,
733 struct lu_context_key *key)
735 struct tgt_thread_info *thread;
737 OBD_SLAB_ALLOC_PTR_GFP(thread, tgt_thread_kmem, GFP_NOFS);
739 return ERR_PTR(-ENOMEM);
744 static void tgt_key_fini(const struct lu_context *ctx,
745 struct lu_context_key *key, void *data)
747 struct tgt_thread_info *info = data;
748 struct thandle_exec_args *args = &info->tti_tea;
751 for (i = 0; i < args->ta_alloc_args; i++) {
752 if (args->ta_args[i] != NULL)
753 OBD_FREE_PTR(args->ta_args[i]);
756 if (args->ta_args != NULL)
757 OBD_FREE_PTR_ARRAY(args->ta_args, args->ta_alloc_args);
758 OBD_SLAB_FREE_PTR(info, tgt_thread_kmem);
761 static void tgt_key_exit(const struct lu_context *ctx,
762 struct lu_context_key *key, void *data)
764 struct tgt_thread_info *tti = data;
766 tti->tti_has_trans = 0;
767 tti->tti_mult_trans = 0;
770 /* context key: tg_thread_key */
771 struct lu_context_key tgt_thread_key = {
772 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD,
773 .lct_init = tgt_key_init,
774 .lct_fini = tgt_key_fini,
775 .lct_exit = tgt_key_exit,
778 LU_KEY_INIT_GENERIC(tgt);
780 static void *tgt_ses_key_init(const struct lu_context *ctx,
781 struct lu_context_key *key)
783 struct tgt_session_info *session;
785 OBD_SLAB_ALLOC_PTR_GFP(session, tgt_session_kmem, GFP_NOFS);
787 return ERR_PTR(-ENOMEM);
792 static void tgt_ses_key_fini(const struct lu_context *ctx,
793 struct lu_context_key *key, void *data)
795 struct tgt_session_info *session = data;
797 OBD_SLAB_FREE_PTR(session, tgt_session_kmem);
800 /* context key: tgt_session_key */
801 struct lu_context_key tgt_session_key = {
802 .lct_tags = LCT_SERVER_SESSION,
803 .lct_init = tgt_ses_key_init,
804 .lct_fini = tgt_ses_key_fini,
806 EXPORT_SYMBOL(tgt_session_key);
808 LU_KEY_INIT_GENERIC(tgt_ses);
811 * this page is allocated statically when module is initializing
812 * it is used to simulate data corruptions, see ost_checksum_bulk()
813 * for details. as the original pages provided by the layers below
814 * can be remain in the internal cache, we do not want to modify
817 struct page *tgt_page_to_corrupt;
819 int tgt_mod_init(void)
824 result = lu_kmem_init(tgt_caches);
828 tgt_page_to_corrupt = alloc_page(GFP_KERNEL);
830 tgt_key_init_generic(&tgt_thread_key, NULL);
831 lu_context_key_register_many(&tgt_thread_key, NULL);
833 tgt_ses_key_init_generic(&tgt_session_key, NULL);
834 lu_context_key_register_many(&tgt_session_key, NULL);
842 void tgt_mod_exit(void)
845 if (tgt_page_to_corrupt != NULL)
846 put_page(tgt_page_to_corrupt);
848 lu_context_key_degister(&tgt_thread_key);
849 lu_context_key_degister(&tgt_session_key);
852 lu_kmem_fini(tgt_caches);