From: Andreas Dilger Date: Wed, 17 Aug 2011 20:50:04 +0000 (-0600) Subject: LUDOC-14 Always use --index when formatting OSTs X-Git-Tag: 2.1.0~1 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=261c2cce34c348c3b718f02f8a1177face683a03;hp=80df828ed59342afda33edd90b52b0d7cd0fa4cc;p=doc%2Fmanual.git LUDOC-14 Always use --index when formatting OSTs To prepare users for changes that are upcoming in Lustre 2.3/2.4 for the OSD restructuring project, the OST index should always be given when formatting OSTs with mkfs.lustre. This is already accepted as standard practice for Lustre users today, so reflect that in the examples in the manual. Change-Id: I41cc175dc4392950c4376b3643c3017f6b9b2fcc Signed-off-by: Andreas Dilger --- diff --git a/BackupAndRestore.xml b/BackupAndRestore.xml index 315ae73..dd69a81 100644 --- a/BackupAndRestore.xml +++ b/BackupAndRestore.xml @@ -159,8 +159,8 @@ # lctl --device lustre-MDT0000 changelog_register lustre-MDT0000 Registered\ changelog userid 'cl1' Synchronize a Lustre file system (/mnt/lustre) to a target file system (/mnt/target). - $ lustre_rsync --source=/mnt/lustre --target=/mnt/target --mdt=lustre-MDT00\ -00 --user=cl1 --statuslog sync.log --verbose + $ lustre_rsync --source=/mnt/lustre --target=/mnt/target --mdt=lustre-MDT0000 \ +--user=cl1 --statuslog sync.log --verbose Lustre filesystem: lustre MDT device: lustre-MDT0000 Source: /mnt/lustre @@ -332,15 +332,15 @@ cfs21:~# lvscan Format the LVM volumes as Lustre targets. In this example, the backup file system is called 'main' and designates the current, most up-to-date backup. - cfs21:~# mkfs.lustre --mdt --fsname=main /dev/volgroup/MDT + cfs21:~# mkfs.lustre --fsname=main --mdt --index=0 /dev/volgroup/MDT No management node specified, adding MGS to this MDT. Permanent disk data: - Target: main-MDTffff - Index: unassigned + Target: main-MDT0000 + Index: 0 Lustre FS: main Mount type: ldiskfs Flags: 0x75 - (MDT MGS needs_index first_time update ) + (MDT MGS first_time update ) Persistent mount opts: errors=remount-ro,iopen_nopriv,user_xattr Parameters: checking for existing Lustre data @@ -349,27 +349,28 @@ checking for existing Lustre data target name main-MDTffff 4k blocks 0 options -i 4096 -I 512 -q -O dir_index -F - mkfs_cmd = mkfs.ext2 -j -b 4096 -L main-MDTffff -i 4096 -I 512 -q -O dir_\ -index -F /dev/volgroup/MDT + mkfs_cmd = mkfs.ext2 -j -b 4096 -L main-MDTffff -i 4096 -I 512 -q + -O dir_index -F /dev/volgroup/MDT Writing CONFIGS/mountdata -cfs21:~# mkfs.lustre --ost --mgsnode=cfs21 --fsname=main /dev/volgroup/OST0 +cfs21:~# mkfs.lustre --mgsnode=cfs21 --fsname=main --ost --index=0 /dev/volgroup/OST0 Permanent disk data: - Target: main-OSTffff -Index: unassigned + Target: main-OST0000 + Index: 0 Lustre FS: main Mount type: ldiskfs Flags: 0x72 - (OST needs_index first_time update ) + (OST first_time update ) Persistent mount opts: errors=remount-ro,extents,mballoc Parameters: mgsnode=192.168.0.21@tcp checking for existing Lustre data device size = 200MB formatting backing filesystem ldiskfs on /dev/volgroup/OST0 - target name main-OSTffff + target name main-OST0000 4k blocks 0 options -I 256 -q -O dir_index -F - mkfs_cmd = mkfs.ext2 -j -b 4096 -L main-OSTffff -I 256 -q -O dir_index -F\ - /dev/ volgroup/OST0 + mkfs_cmd = mkfs.ext2 -j -b 4096 -L lustre-OSTffff -J size=400 -I 256 + -i 262144 -O extents,uninit_bg,dir_nlink,huge_file,flex_bg -G 256 + -E resize=4290772992,lazy_journal_init, -F /dev/volgroup/OST0 Writing CONFIGS/mountdata cfs21:~# mount -t lustre /dev/volgroup/MDT /mnt/mdt cfs21:~# mount -t lustre /dev/volgroup/OST0 /mnt/ost @@ -411,8 +412,7 @@ fstab passwd termcap Rename the LVM snapshot. Rename the file system snapshot from "main" to "back" so you can mount it without unmounting "main". This is recommended, but not required. Use the --reformat flag to tunefs.lustre to force the name change. For example: - cfs21:~# tunefs.lustre --reformat --fsname=back --writeconf /dev/volgroup/M\ -DTb1 + cfs21:~# tunefs.lustre --reformat --fsname=back --writeconf /dev/volgroup/MDTb1 checking for existing Lustre data found Lustre data Reading CONFIGS/mountdata @@ -435,8 +435,7 @@ Permanent disk data: Persistent mount opts: errors=remount-ro,iopen_nopriv,user_xattr Parameters: Writing CONFIGS/mountdata -cfs21:~# tunefs.lustre --reformat --fsname=back --writeconf /dev/volgroup/O\ -STb1 +cfs21:~# tunefs.lustre --reformat --fsname=back --writeconf /dev/volgroup/OSTb1 checking for existing Lustre data found Lustre data Reading CONFIGS/mountdata @@ -459,8 +458,7 @@ Permanent disk data: Persistent mount opts: errors=remount-ro,extents,mballoc Parameters: mgsnode=192.168.0.21@tcp Writing CONFIGS/mountdata -When renaming an FS, we must also erase the last_rcvd file from the snapsho\ -ts +When renaming an FS, we must also erase the last_rcvd file from the snapshots cfs21:~# mount -t ldiskfs /dev/volgroup/MDTb1 /mnt/mdtback cfs21:~# rm /mnt/mdtback/last_rcvd cfs21:~# umount /mnt/mdtback @@ -471,7 +469,7 @@ cfs21:~# umount /mnt/ostback Mount the file system from the LVM snapshot. For example: - cfs21:~# mount -t lustre /dev/volgroup/MDTb1 /mnt/mdtback \ + cfs21:~# mount -t lustre /dev/volgroup/MDTb1 /mnt/mdtback cfs21:~# mount -t lustre /dev/volgroup/OSTb1 /mnt/ostback cfs21:~# mount -t lustre cfs21:/back /mnt/back diff --git a/ConfiguringLustre.xml b/ConfiguringLustre.xml index c1a0bcf..1d4c1cf 100644 --- a/ConfiguringLustre.xml +++ b/ConfiguringLustre.xml @@ -53,7 +53,7 @@ Create a combined MGS/MDT file system on a block device. On the MDS node, run: - mkfs.lustre --fsname=<fsname> --mgs --mdt <block device name> + mkfs.lustre --fsname=<fsname> --mgs --mdt --index=0 <block device name> The default file system name (fsname) is lustre. If you plan to generate multiple file systems, the MGS should be created separately on its own dedicated block device, by running: @@ -69,13 +69,14 @@ Create the OST. On the OSS node, run: - mkfs.lustre --ost --fsname=<fsname> --mgsnode=<NID> <block device name> + mkfs.lustre --fsname=<fsname> --mgsnode=<NID> --ost --index=<OST index> <block device name> When you create an OST, you are formatting a ldiskfs file system on a block storage device like you would with any local file system. You can have as many OSTs per OSS as the hardware or drivers allow. For more information about storage and memory requirements for a Lustre file system, see . You can only configure one OST per block device. You should create an OST that uses the raw block device and does not use partitioning. + You should specify the OST index number at format time in order to simplify the translating the OST number in error messages or file striping to the OSS node and block device later on. If you are using block devices that are accessible from multiple OSS nodes, ensure that you mount the OSTs from only one OSS node at at time. It is strongly recommended that multiple-mount protection be enabled for such devices to prevent serious data corruption. For more information about multiple-mount protection, see . - Lustre currently supports block devices up to 16 TB on OEL 5/RHEL 5 (up to 8 TB on other distributions). If the device size is only slightly larger that 16 TB, it is recommended that you limit the file system size to 16 TB at format time. If the size is significantly larger than 16 TB, you should reconfigure the storage into devices smaller than 16 TB. We recommend that you not place partitions on top of RAID 5/6 block devices due to negative impacts on performance. + Lustre currently supports block devices up to 128 TB on RHEL 5/6 (up to 8 TB on other distributions). If the device size is only slightly larger that 16 TB, it is recommended that you limit the file system size to 16 TB at format time. We recommend that you not place DOS partitions on top of RAID 5/6 block devices due to negative impacts on performance, but instead format the whole disk for the filesystem. @@ -83,7 +84,7 @@ mount -t lustre <block device name> <mount point> - To create additional OSTs, repeat Step 3 and Step 4. + To create additional OSTs, repeat Step 3 and Step 4, specifying the next higher OST index number. @@ -411,15 +412,15 @@ Create a combined MGS/MDT file system on the block device. On the MDS node, run: - [root@mds /]# mkfs.lustre --fsname=temp --mgs --mdt /dev/sdb + [root@mds /]# mkfs.lustre --fsname=temp --mgs --mdt --index=0 /dev/sdb This command generates this output: Permanent disk data: -Target: temp-MDTffff -Index: unassigned +Target: temp-MDT0000 +Index: 0 Lustre FS: temp Mount type: ldiskfs Flags: 0x75 - (MDT MGS needs_index first_time update ) + (MDT MGS first_time update ) Persistent mount opts: errors=remount-ro,iopen_nopriv,user_xattr Parameters: mdt.group_upcall=/usr/sbin/l_getgroups @@ -450,15 +451,15 @@ Lustre: Server temp-MDT0000 on device /dev/sdb has started Create ost1. On oss1 node, run: - [root@oss1 /]# mkfs.lustre --ost --fsname=temp --mgsnode=10.2.0.1@tcp0 /dev/sdc + [root@oss1 /]# mkfs.lustre --fsname=temp --mgsnode=10.2.0.1@tcp0 --ost --index=0 /dev/sdc The command generates this output: Permanent disk data: -Target: temp-OSTffff -Index: unassigned +Target: temp-OST0000 +Index: 0 Lustre FS: temp Mount type: ldiskfs Flags: 0x72 -(OST needs_index first_time update) +(OST first_time update) Persistent mount opts: errors=remount-ro,extents,mballoc Parameters: mgsnode=10.2.0.1@tcp @@ -466,10 +467,10 @@ checking for existing Lustre data: not found device size = 16MB 2 6 18 formatting backing filesystem ldiskfs on /dev/sdc - target name temp-OSTffff + target name temp-OST0000 4k blocks 0 options -I 256 -q -O dir_index,uninit_groups -F -mkfs_cmd = mkfs.ext2 -j -b 4096 -L temp-OSTffff -I 256 -q -O +mkfs_cmd = mkfs.ext2 -j -b 4096 -L temp-OST0000 -I 256 -q -O dir_index,uninit_groups -F /dev/sdc Writing CONFIGS/mountdata @@ -492,16 +493,16 @@ Lustre: MDS temp-MDT0000: temp-OST0000_UUID now active, resetting orphans Create ost2. On oss2 node, run: - [root@oss2 /]# mkfs.lustre --ost --fsname=temp --mgsnode=10.2.0.1@tcp0 /dev\ -/sdd + [root@oss2 /]# mkfs.lustre --fsname=temp --mgsnode=10.2.0.1@tcp0 \ +--ost --index=1 /dev/sdd The command generates this output: Permanent disk data: -Target: temp-OSTffff -Index: unassigned +Target: temp-OST0001 +Index: 1 Lustre FS: temp Mount type: ldiskfs Flags: 0x72 -(OST needs_index first_time update) +(OST first_time update) Persistent mount opts: errors=remount-ro,extents,mballoc Parameters: mgsnode=10.2.0.1@tcp @@ -509,10 +510,10 @@ checking for existing Lustre data: not found device size = 16MB 2 6 18 formatting backing filesystem ldiskfs on /dev/sdd - target name temp-OSTffff + target name temp-OST0001 4k blocks 0 options -I 256 -q -O dir_index,uninit_groups -F -mkfs_cmd = mkfs.ext2 -j -b 4096 -L temp-OSTffff -I 256 -q -O +mkfs_cmd = mkfs.ext2 -j -b 4096 -L temp-OST0001 -I 256 -q -O dir_index,uninit_groups -F /dev/sdc Writing CONFIGS/mountdata @@ -522,11 +523,11 @@ Writing CONFIGS/mountdata The command generates this output: LDISKFS-fs: file extents enabled LDISKFS-fs: mballoc enabled -Lustre: temp-OST0000: new disk, initializing -Lustre: Server temp-OST0000 on device /dev/sdb has started +Lustre: temp-OST0001: new disk, initializing +Lustre: Server temp-OST0001 on device /dev/sdb has started Shortly afterwards, this output appears: - Lustre: temp-OST0000: received MDS connection from 10.2.0.1@tcp0 -Lustre: MDS temp-MDT0000: temp-OST0000_UUID now active, resetting orphans + Lustre: temp-OST0001: received MDS connection from 10.2.0.1@tcp0 +Lustre: MDS temp-MDT0000: temp-OST0001_UUID now active, resetting orphans diff --git a/ConfiguringStorage.xml b/ConfiguringStorage.xml index e9f6b7b..1403936 100644 --- a/ConfiguringStorage.xml +++ b/ConfiguringStorage.xml @@ -126,9 +126,8 @@ Create the OST. In this example, /dev/sdc is the RAID 6 device to be used as the OST, run: - [oss#] mkfs.lustre --ost --mgsnode=mds@osib --mkfsoptions="-J device=/dev/sd\ -b1" /dev/sdc - + [oss#] mkfs.lustre --mgsnode=mds@osib --ost --index=0 \ +--mkfsoptions="-J device=/dev/sdb1" /dev/sdc Mount the OST as usual. diff --git a/LustreMaintenance.xml b/LustreMaintenance.xml index d787644..82d4889 100644 --- a/LustreMaintenance.xml +++ b/LustreMaintenance.xml @@ -266,9 +266,9 @@ Adding a New OST to a Lustre File System Add a new OST by passing on the following commands, run: - $ mkfs.lustre --fsname=spfs --ost --mgsnode=mds16@tcp0 /dev/sda -$ mkdir -p /mnt/test/ost0 -$ mount -t lustre /dev/sda /mnt/test/ost0 + $ mkfs.lustre --fsname=spfs --mgsnode=mds16@tcp0 --ost --index=12 /dev/sda +$ mkdir -p /mnt/test/ost12 +$ mount -t lustre /dev/sda /mnt/test/ost12 Migrate the data (possibly). diff --git a/LustreOperations.xml b/LustreOperations.xml index 586c0c3..504a676 100644 --- a/LustreOperations.xml +++ b/LustreOperations.xml @@ -86,7 +86,7 @@ Mounting by Label LABEL=testfs-MDT0000 /mnt/test/mdt lustre defaults,_netdev,noauto 0 0 LABEL=testfs-OST0000 /mnt/test/ost0 lustre defaults,_netdev,noauto 0 0 In general, it is wise to specify noauto and let your high-availability (HA) package manage when to mount the device. If you are not using failover, make sure that networking has been started before mounting a Lustre server. RedHat, SuSE, Debian (and perhaps others) use the _netdev flag to ensure that these disks are mounted after the network is up. - We are mounting by disk label here--the label of a device can be read with e2label. The label of a newly-formatted Lustre server ends in FFFF, meaning that it has yet to be assigned. The assignment takes place when the server is first started, and the disk label is updated. + We are mounting by disk label here. The label of a device can be read with e2label. The label of a newly-formatted Lustre server may end in FFFF if the --index option is not specified to mkfs.lustre, meaning that it has yet to be assigned. The assignment takes place when the server is first started, and the disk label is updated. It is recommended that the --index option always be used, which will also ensure that the label is set at format time. Do not do this when the client and OSS are on the same node, as memory pressure between the client and OSS can lead to deadlocks. @@ -117,9 +117,9 @@ LABEL=testfs-OST0000 /mnt/test/ost0 lustre defaults,_netdev,noauto 0 0 By default, the Lustre file system uses failover mode for OSTs. To specify failout mode instead, run this command: - $ mkfs.lustre --fsname=<fsname> --ost --mgsnode=<MGS node NID> --param="failover.mode=failout" <block device name> + $ mkfs.lustre --fsname=<fsname> --mgsnode=<MGS node NID> --param="failover.mode=failout" --ost --index="OST index" <block device name> In this example, failout mode is specified for the OSTs on MGS uml1, file system testfs. - $ mkfs.lustre --fsname=testfs --ost --mgsnode=uml1 --param="failover.mode=failout" /dev/sdb + $ mkfs.lustre --fsname=testfs --mgsnode=uml1 --param="failover.mode=failout" --ost --index=3 /dev/sdb Before running this command, unmount all OSTs that will be affected by the change in the failover/failout mode. @@ -146,15 +146,15 @@ LABEL=testfs-OST0000 /mnt/test/ost0 lustre defaults,_netdev,noauto 0 0
<indexterm><primary>operations</primary><secondary>multiple file systems</secondary></indexterm>Running Multiple Lustre File Systems There may be situations in which you want to run multiple file systems. This is doable, as long as you follow specific naming conventions. - By default, the mkfs.lustre command creates a file system named lustre. To specify a different file system name (limited to 8 characters), run this command: - mkfs.lustre --fsname=<new file system name> + By default, the mkfs.lustre command creates a file system named lustre. To specify a different file system name (limited to 8 characters) at format time, use the --fsname option: + mkfs.lustre --fsname=<file system name> - The MDT, OSTs and clients in the new file system must share the same name (prepended to the device name). For example, for a new file system named foo, the MDT and two OSTs would be named foo-MDT0000, foo-OST0000, and foo-OST0001. + The MDT, OSTs and clients in the new file system must use the same filesystem name (prepended to the device name). For example, for a new file system named foo, the MDT and two OSTs would be named foo-MDT0000, foo-OST0000, and foo-OST0001. To mount a client on the file system, run: mount -t lustre mgsnode:/<new fsname> <mountpoint> - For example, to mount a client on file system foo at mount point /mnt/lustre1, run: - mount -t lustre mgsnode:/foo /mnt/lustre1 + For example, to mount a client on file system foo at mount point /mnt/foo, run: + mount -t lustre mgsnode:/foo /mnt/foo If a client(s) will be mounted on several file systems, add the following line to /etc/xattr.conf file to avoid problems when files are moved between the file systems: lustre.* skip @@ -162,26 +162,20 @@ LABEL=testfs-OST0000 /mnt/test/ost0 lustre defaults,_netdev,noauto 0 0 The MGS is universal; there is only one MGS per Lustre installation, not per file system. - There is only one file system per MDT. Therefore, specify --mdt --mgs on one file system and --mdt --mgsnode=<MGS node NID> on the other file systems. + There is only one file system per MDT. Therefore, specify --mdt --mgs on one file system and --mdt --mgsnode=<MGS node NID> on the other file systems. - A Lustre installation with two file systems (foo and bar) could look like this, where the MGS node is mgsnode@tcp0 and the mount points are /mnt/lustre1 and /mnt/lustre2. - mgsnode# mkfs.lustre --mgs /mnt/lustre1 -mdtfoonode# mkfs.lustre --fsname=foo --mdt --mgsnode=mgsnode@tcp0 /mnt/lust\ -re1 -ossfoonode# mkfs.lustre --fsname=foo --ost --mgsnode=mgsnode@tcp0 /mnt/lust\ -re1 -ossfoonode# mkfs.lustre --fsname=foo --ost --mgsnode=mgsnode@tcp0 /mnt/lust\ -re2 -mdtbarnode# mkfs.lustre --fsname=bar --mdt --mgsnode=mgsnode@tcp0 /mnt/lust\ -re1 -ossbarnode# mkfs.lustre --fsname=bar --ost --mgsnode=mgsnode@tcp0 /mnt/lust\ -re1 -ossbarnode# mkfs.lustre --fsname=bar --ost --mgsnode=mgsnode@tcp0 /mnt/lust\ -re2 - To mount a client on file system foo at mount point /mnt/lustre1, run: - mount -t lustre mgsnode@tcp0:/foo /mnt/lustre1 - To mount a client on file system bar at mount point /mnt/lustre2, run: - mount -t lustre mgsnode@tcp0:/bar /mnt/lustre2 + A Lustre installation with two file systems (foo and bar) could look like this, where the MGS node is mgsnode@tcp0 and the mount points are /mnt/foo and /mnt/bar. + mgsnode# mkfs.lustre --mgs /dev/sda +mdtfoonode# mkfs.lustre --fsname=foo --mgsnode=mgsnode@tcp0 --mdt --index=0 /dev/sdb +ossfoonode# mkfs.lustre --fsname=foo --mgsnode=mgsnode@tcp0 --ost --index=0 /dev/sda +ossfoonode# mkfs.lustre --fsname=foo --mgsnode=mgsnode@tcp0 --ost --index=1 /dev/sdb +mdtbarnode# mkfs.lustre --fsname=bar --mgsnode=mgsnode@tcp0 --mdt --index=0 /dev/sda +ossbarnode# mkfs.lustre --fsname=bar --mgsnode=mgsnode@tcp0 --ost --index=0 /dev/sdc +ossbarnode# mkfs.lustre --fsname=bar --mgsnode=mgsnode@tcp0 --ost --index=1 /dev/sdd + To mount a client on file system foo at mount point /mnt/foo, run: + mount -t lustre mgsnode@tcp0:/foo /mnt/foo + To mount a client on file system bar at mount point /mnt/bar, run: + mount -t lustre mgsnode@tcp0:/bar /mnt/bar
<indexterm><primary>operations</primary><secondary>parameters</secondary></indexterm>Setting and Retrieving Lustre Parameters @@ -281,10 +275,10 @@ osc.myth-OST0004-osc-ffff88006dd20000.filesfree=129651 lctl list_nids This displays the server's NIDs (networks configured to work with Lustre). This example has a combined MGS/MDT failover pair on uml1 and uml2, and a OST failover pair on uml3 and uml4. There are corresponding Elan addresses on uml1 and uml2. - uml1> mkfs.lustre --fsname=testfs --mdt --mgs --failnode=uml2,2@elan /dev/sda1 + uml1> mkfs.lustre --fsname=testfs --mgs --mdt --index=0 --failnode=uml2,2@elan /dev/sda1 uml1> mount -t lustre /dev/sda1 /mnt/test/mdt -uml3> mkfs.lustre --fsname=testfs --ost --failnode=uml4 --mgsnode=uml1,1@ela\ -n --mgsnode=uml2,2@elan /dev/sdb +uml3> mkfs.lustre --fsname=testfs --failnode=uml4 --mgsnode=uml1,1@elan \ +--mgsnode=uml2,2@elan --ost --index=0 /dev/sdb uml3> mount -t lustre /dev/sdb /mnt/test/ost0 client> mount -t lustre uml1,1@elan:uml2,2@elan:/testfs /mnt/testfs uml1> umount /mnt/mdt @@ -296,8 +290,8 @@ uml2> cat /proc/fs/lustre/mds/testfs-MDT0000/recovery_status On the OST, list the NIDs of all MGS nodes at mkfs time. - OST# mkfs.lustre --fsname sunfs --ost --mgsnode=10.0.0.1 \ - --mgsnode=10.0.0.2 /dev/{device} + OST# mkfs.lustre --fsname sunfs --mgsnode=10.0.0.1 \ + --mgsnode=10.0.0.2 --ost --index=0 /dev/sdb On the client, mount the file system. @@ -319,11 +313,11 @@ uml2> cat /proc/fs/lustre/mds/testfs-MDT0000/recovery_status Erase the file system and, presumably, replace it with another file system, run: - $ mkfs.lustre -reformat --fsname spfs --mdt --mgs /dev/sda + $ mkfs.lustre --reformat --fsname spfs --mgs --mdt --index=0 /dev/sda If you have a separate MGS (that you do not want to reformat), then add the "writeconf" flag to mkfs.lustre on the MDT, run: - $ mkfs.lustre --reformat --writeconf -fsname spfs --mdt \ --mgs /dev/sda + $ mkfs.lustre --reformat --writeconf -fsname spfs --mgs --mdt --index=0 /dev/sda diff --git a/ManagingFileSystemIO.xml b/ManagingFileSystemIO.xml index 05b2e21..55b01ec 100644 --- a/ManagingFileSystemIO.xml +++ b/ManagingFileSystemIO.xml @@ -297,9 +297,9 @@ filesystem summary: 11.8G 7.3G 3.9G 61% \ Add a new OST by passing on the following commands, run: - $ mkfs.lustre --fsname=spfs --ost --mgsnode=mds16@tcp0 /dev/sda -$ mkdir -p /mnt/test/ost0 -$ mount -t lustre /dev/sda /mnt/test/ost0 + $ mkfs.lustre --fsname=spfs --mgsnode=mds16@tcp0 --ost --index=12 /dev/sda +$ mkdir -p /mnt/test/ost12 +$ mount -t lustre /dev/sda /mnt/test/ost12 Migrate the data (possibly). diff --git a/ManagingLNET.xml b/ManagingLNET.xml index 4defefa..c754977 100644 --- a/ManagingLNET.xml +++ b/ManagingLNET.xml @@ -154,10 +154,10 @@ $ mkfs.lustre --fsname lustre --mdt --mgs /dev/sda $ mkdir -p /mnt/test/mdt $ mount -t lustre /dev/sda /mnt/test/mdt $ mount -t lustre mgs@o2ib0:/lustre /mnt/mdt -$ mkfs.lustre --fsname lustre --ost --mgsnode=mds@o2ib0 /dev/sda +$ mkfs.lustre --fsname lustre --mgsnode=mds@o2ib0 --ost --index=0 /dev/sda $ mkdir -p /mnt/test/mdt $ mount -t lustre /dev/sda /mnt/test/ost -$ mount -t lustre mgs@o2ib0:/lustre /mnt/ost +$ mount -t lustre mgs@o2ib0:/lustre /mnt/ost0 Mount the clients. diff --git a/SystemConfigurationUtilities.xml b/SystemConfigurationUtilities.xml index 7d30c8d..5d40c5e 100644 --- a/SystemConfigurationUtilities.xml +++ b/SystemConfigurationUtilities.xml @@ -1592,7 +1592,7 @@ mkfs.lustre --index=index - Forces a particular OST or MDT index. + Specifies the OST or MDT number. This should always be used when formatting OSTs, in order to ensure that there is a simple mapping between the OST index and the OSS node and device it is located on. @@ -1611,7 +1611,7 @@ mkfs.lustre Sets the mount options used when the backing file system is mounted. CAUTION: Unlike earlier versions of mkfs.lustre, this version completely replaces the default mount options with those specified on the command line, and issues a warning on stderr if any default mount options are omitted. The defaults for ldiskfs are: - OST: errors=remount-ro,mballoc,extents; + OST: errors=remount-ro; MGS/MDT: errors=remount-ro,iopen_nopriv,user_xattr Do not alter the default mount options unless you know what you are doing. @@ -1726,7 +1726,7 @@ mkfs.lustre Creates a combined MGS and MDT for file system testfs on, e.g., node cfs21: mkfs.lustre --fsname=testfs --mdt --mgs /dev/sda1 Creates an OST for file system testfs on any node (using the above MGS): - mkfs.lustre --fsname=testfs --ost --mgsnode=cfs21@tcp0 /dev/sdb + mkfs.lustre --fsname=testfs --mgsnode=cfs21@tcp0 --ost --index=0 /dev/sdb Creates a standalone MGS on, e.g., node cfs22: mkfs.lustre --mgs /dev/sda1 Creates an MDT for file system myfs1 on any node (using the above MGS):