summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Huddleston <eradicator@gentoo.org>2006-02-22 07:41:10 +0000
committerJeremy Huddleston <eradicator@gentoo.org>2006-02-22 07:41:10 +0000
commit0494a85dc52f9120eef72c1aaa2b0043b57114e6 (patch)
tree3565e517473914f37fa8c0cf9f469323527758e2 /sys-fs/evms
parentRESTRICT isn't required (diff)
downloadgentoo-2-0494a85dc52f9120eef72c1aaa2b0043b57114e6.tar.gz
gentoo-2-0494a85dc52f9120eef72c1aaa2b0043b57114e6.tar.bz2
gentoo-2-0494a85dc52f9120eef72c1aaa2b0043b57114e6.zip
Version bump.
(Portage version: 2.1_pre4-r1)
Diffstat (limited to 'sys-fs/evms')
-rw-r--r--sys-fs/evms/ChangeLog9
-rw-r--r--sys-fs/evms/Manifest44
-rw-r--r--sys-fs/evms/evms-2.5.3.ebuild84
-rw-r--r--sys-fs/evms/evms-2.5.4.ebuild (renamed from sys-fs/evms/evms-2.5.2-r1.ebuild)13
-rw-r--r--sys-fs/evms/files/2.5.4/corrolator_race.patch51
-rw-r--r--sys-fs/evms/files/2.5.4/degraded_raid.patch1144
-rw-r--r--sys-fs/evms/files/digest-evms-2.5.2-r11
-rw-r--r--sys-fs/evms/files/digest-evms-2.5.31
-rw-r--r--sys-fs/evms/files/digest-evms-2.5.43
9 files changed, 1250 insertions, 100 deletions
diff --git a/sys-fs/evms/ChangeLog b/sys-fs/evms/ChangeLog
index 90c089e6fed1..bdcead171663 100644
--- a/sys-fs/evms/ChangeLog
+++ b/sys-fs/evms/ChangeLog
@@ -1,6 +1,13 @@
# ChangeLog for sys-fs/evms
# Copyright 1999-2006 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/sys-fs/evms/ChangeLog,v 1.41 2006/02/07 02:38:17 agriffis Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-fs/evms/ChangeLog,v 1.42 2006/02/22 07:41:10 eradicator Exp $
+
+*evms-2.5.4 (22 Feb 2006)
+
+ 22 Feb 2006; Jeremy Huddleston <eradicator@gentoo.org>
+ +files/2.5.4/degraded_raid.patch, +files/2.5.4/corrolator_race.patch,
+ -evms-2.5.2-r1.ebuild, -evms-2.5.3.ebuild, +evms-2.5.4.ebuild:
+ Version bump.
07 Feb 2006; Aron Griffis <agriffis@gentoo.org> evms-2.5.3-r1.ebuild:
Mark 2.5.3-r1 stable on ia64
diff --git a/sys-fs/evms/Manifest b/sys-fs/evms/Manifest
index 794d52ab1cad..341b0f594f37 100644
--- a/sys-fs/evms/Manifest
+++ b/sys-fs/evms/Manifest
@@ -1,21 +1,45 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
MD5 0ce25574b39eb47c43dbae707882e800 ChangeLog 9313
+RMD160 64b9a8cddefabf59b2a19e0c65107693ea352279 ChangeLog 9313
+SHA256 22418664045dd67ed131a74ff2055001e56c062a711dd2aa36829768258fc9e3 ChangeLog 9313
MD5 a36fa36a0ee39f02fa7162fa72a488af evms-2.5.2-r1.ebuild 2297
+RMD160 078e8fe11c34da3035b6f4b71263d8399be986d8 evms-2.5.2-r1.ebuild 2297
+SHA256 a548ac99535b265f87f75fd06ea067d1ad05aed0112c62a59b580eb60b43fb4a evms-2.5.2-r1.ebuild 2297
MD5 a65a8d5f94a287aac2e99eee6c70d7b3 evms-2.5.3-r1.ebuild 2427
+RMD160 dde95093f500195860d3982835de1e3c5bceb6d2 evms-2.5.3-r1.ebuild 2427
+SHA256 77b81af199f49601526139c22baa2e075311bc021fdcedc789aaafa1f4f1015d evms-2.5.3-r1.ebuild 2427
MD5 56506764cd7ab73aa3b1a06b9e55f08a evms-2.5.3.ebuild 2295
+RMD160 f1116f1e1ebf01d6810fff654367a17e72f59704 evms-2.5.3.ebuild 2295
+SHA256 55dc835cf86e4e6e79ed2f68c8ba75e7b4d8961aeb56c3d4949b3da2e8fac946 evms-2.5.3.ebuild 2295
+MD5 c91896def8c4cc5a08bc798fb94760da evms-2.5.4.ebuild 2436
+RMD160 0a9352ffd2395ccfa4dc3b52729d867e8821cf37 evms-2.5.4.ebuild 2436
+SHA256 c9bb9455124abd4c94f1bd6afd764509a4357866ba89927032b725c7ef2f4731 evms-2.5.4.ebuild 2436
MD5 fea531c10f3cb1d47c0f0af40bad0fa9 files/2.5.3/compaq_segments.patch 478
+RMD160 a31b6197ded598606389d590d6c36d5a5853203c files/2.5.3/compaq_segments.patch 478
+SHA256 943748998e15b603fdd66a137903a4486b0b98c935a589c297d9c446ef93557e files/2.5.3/compaq_segments.patch 478
MD5 fbf48e09c52473403af443f9ee5d8403 files/2.5.3/md_expand.patch 2152
+RMD160 e795ba20bc9d525705b5b22f7c5b68e56ce6ae2c files/2.5.3/md_expand.patch 2152
+SHA256 56f5996874159ce456fcdc72b93aa9ac9011c4be93b2a2954fef8c75046fe85b files/2.5.3/md_expand.patch 2152
+MD5 598840887ca27d780eb4388bd45b6b60 files/2.5.4/corrolator_race.patch 1489
+RMD160 e5f8b7f9d73030ff4f804dcdebf9e0cc0eff1d3f files/2.5.4/corrolator_race.patch 1489
+SHA256 c759eedcfcf0694661f17fdc1d7a5062e3ac55338e70a29d7c056647d15af68b files/2.5.4/corrolator_race.patch 1489
+MD5 1879c686d1d98452cb679c201e045d17 files/2.5.4/degraded_raid.patch 48168
+RMD160 070ca8843f13dea77350b9042632cd8db4f1fe20 files/2.5.4/degraded_raid.patch 48168
+SHA256 ab718bf4cee77fafa4b57c54a3fcebc85e6dd6f59266b885eaf5fb9160c686c6 files/2.5.4/degraded_raid.patch 48168
MD5 996ea6d43e86cc65954540da80e56061 files/digest-evms-2.5.2-r1 63
+RMD160 d23bfcc28d14b97de9a13d30200a6a1e41b14b0b files/digest-evms-2.5.2-r1 63
+SHA256 07840eaa934fb1d6aa9dc363cb5d27674b26d46c1c37ef2cc26869e846414fd5 files/digest-evms-2.5.2-r1 63
MD5 2769ffa4ae1f09667ac42e94da8036ba files/digest-evms-2.5.3 63
+RMD160 f3bcc4f4915f04c44327a58436ba4d9c9d614c02 files/digest-evms-2.5.3 63
+SHA256 34b821017208669e49f9550fd6d718622704964a45fc2fab7daff6a39aca839e files/digest-evms-2.5.3 63
MD5 2769ffa4ae1f09667ac42e94da8036ba files/digest-evms-2.5.3-r1 63
+RMD160 f3bcc4f4915f04c44327a58436ba4d9c9d614c02 files/digest-evms-2.5.3-r1 63
+SHA256 34b821017208669e49f9550fd6d718622704964a45fc2fab7daff6a39aca839e files/digest-evms-2.5.3-r1 63
+MD5 b1b28090523dc3bea2165bd458bc0c4b files/digest-evms-2.5.4 235
+RMD160 67139c1be502af23663fff3dca4a136262fa8054 files/digest-evms-2.5.4 235
+SHA256 e2d83ef14ba9f7b40162d7a6963cefec2f634c99e4c831480821898924f1d16f files/digest-evms-2.5.4 235
MD5 5eb632aa94b0537e1983f935e3706d74 files/evms2-start.sh 382
+RMD160 a9dbd94b537ddcc9b0798cb904769b075e7d529a files/evms2-start.sh 382
+SHA256 ea36fbde78590067029405ab18b09106b083913688ea4863c2e63eece8e74021 files/evms2-start.sh 382
MD5 59d559390ca6a6d365a5d6c3012c0343 metadata.xml 387
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.4.1 (GNU/Linux)
-
-iD8DBQFD6AgjJrHF4yAQTrARAp1fAJ9EOfNh28nU0JsFAFSPZFTewQXhTACfVE32
-YjJuaQUBexB6XxCrWb6l0vg=
-=hWR7
------END PGP SIGNATURE-----
+RMD160 17a7b8226f7c5ef9dd45acd6fa6c9cfe7ba6dcc2 metadata.xml 387
+SHA256 b63984a06d4d8c776e0a1bffb39f36e1835e730d2636ff2a15550b17bb52eb03 metadata.xml 387
diff --git a/sys-fs/evms/evms-2.5.3.ebuild b/sys-fs/evms/evms-2.5.3.ebuild
deleted file mode 100644
index 662f4da7431b..000000000000
--- a/sys-fs/evms/evms-2.5.3.ebuild
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 1999-2005 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/sys-fs/evms/evms-2.5.3.ebuild,v 1.1 2005/06/24 07:09:15 eradicator Exp $
-
-inherit eutils flag-o-matic multilib
-
-DESCRIPTION="Utilities for the IBM Enterprise Volume Management System"
-HOMEPAGE="http://www.sourceforge.net/projects/evms"
-SRC_URI="mirror://sourceforge/${PN}/${P}.tar.gz"
-
-LICENSE="GPL-2"
-SLOT="0"
-KEYWORDS="~amd64 ~ppc ~sparc ~x86"
-IUSE="ncurses gtk"
-
-#EVMS uses libuuid from e2fsprogs
-DEPEND="virtual/libc
- sys-fs/e2fsprogs
- sys-fs/device-mapper
- >=sys-apps/baselayout-1.9.4-r6
- gtk? ( =x11-libs/gtk+-1* )
- ncurses? ( sys-libs/ncurses
- =dev-libs/glib-1* )"
-
-src_compile() {
- # Bug #54856
- # filter-flags "-fstack-protector"
- replace-flags -O3 -O2
- replace-flags -Os -O2
-
- local excluded_interfaces=""
- use ncurses || excluded_interfaces="--disable-text-mode"
- use gtk || excluded_interfaces="${excluded_interfaces} --disable-gui"
-
- econf \
- --libdir=/$(get_libdir) \
- --sbindir=/sbin \
- --includedir=/usr/include \
- ${excluded_interfaces} || die "Failed configure"
- emake || die "Failed emake"
-}
-
-src_install() {
- make DESTDIR="${D}" install || die "Make install died"
- dodoc ChangeLog INSTALL* PLUGIN.IDS README TERMINOLOGY doc/linuxrc
-
- insinto /$(get_libdir)/rcscripts/addons
- newins "${FILESDIR}"/evms2-start.sh evms-start.sh || die "rcscript addon failed"
-
- # install the sample configuration into the doc dir
- dodoc ${D}/etc/evms.conf.sample
- rm -f ${D}/etc/evms.conf.sample
-
- # the kernel patches may come handy for people compiling their own kernel
- docinto kernel/2.4
- dodoc kernel/2.4/*
- docinto kernel/2.6
- dodoc kernel/2.6/*
-
- # move static libraries to /usr/lib
- dodir /usr/$(get_libdir)
- mv -f ${D}/$(get_libdir)/*.a ${D}/usr/$(get_libdir)
-
- # Create linker scripts for dynamic libs in /lib, else gcc
- # links to the static ones in /usr/lib first. Bug #4411.
- for x in ${D}/usr/$(get_libdir)/*.a
- do
- if [ -f ${x} ]
- then
- local lib="${x##*/}"
- gen_usr_ldscript ${lib/\.a/\.so}
- fi
- done
-
- # the gtk+ frontend should live in /usr/sbin
- if use gtk
- then
- dodir /usr/sbin
- mv -f ${D}/sbin/evmsgui ${D}/usr/sbin
- fi
-
- # Needed for bug #51252
- dosym libevms-2.5.so.0.0 /$(get_libdir)/libevms-2.5.so.0
-}
diff --git a/sys-fs/evms/evms-2.5.2-r1.ebuild b/sys-fs/evms/evms-2.5.4.ebuild
index 9de862e5c469..45eb93be726e 100644
--- a/sys-fs/evms/evms-2.5.2-r1.ebuild
+++ b/sys-fs/evms/evms-2.5.4.ebuild
@@ -1,6 +1,6 @@
-# Copyright 1999-2005 Gentoo Foundation
+# Copyright 1999-2006 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/sys-fs/evms/evms-2.5.2-r1.ebuild,v 1.2 2005/07/05 16:04:13 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-fs/evms/evms-2.5.4.ebuild,v 1.1 2006/02/22 07:41:10 eradicator Exp $
inherit eutils flag-o-matic multilib
@@ -10,7 +10,7 @@ SRC_URI="mirror://sourceforge/${PN}/${P}.tar.gz"
LICENSE="GPL-2"
SLOT="0"
-KEYWORDS="amd64 ia64 ppc sparc x86"
+KEYWORDS="~amd64 ~ia64 ~ppc ~sparc ~x86"
IUSE="ncurses gtk"
#EVMS uses libuuid from e2fsprogs
@@ -22,6 +22,13 @@ DEPEND="virtual/libc
ncurses? ( sys-libs/ncurses
=dev-libs/glib-1* )"
+src_unpack() {
+ unpack ${A}
+ cd ${S}
+ epatch ${FILESDIR}/${PV}/corrolator_race.patch
+ epatch ${FILESDIR}/${PV}/degraded_raid.patch
+}
+
src_compile() {
# Bug #54856
# filter-flags "-fstack-protector"
diff --git a/sys-fs/evms/files/2.5.4/corrolator_race.patch b/sys-fs/evms/files/2.5.4/corrolator_race.patch
new file mode 100644
index 000000000000..16ac21c0b5bb
--- /dev/null
+++ b/sys-fs/evms/files/2.5.4/corrolator_race.patch
@@ -0,0 +1,51 @@
+Set the corrolator in the message before sending it to frag_send_msg_wait() to
+eliminate the race condition where a reply can come back before
+frag_send_msg_wait() completes. If the reply comes back before the send
+completes, the caller will not be able to match up the corrolator in the
+message received with the corrolator in the message that was sent.
+
+--- evms-2.5.4/plugins/ha/ece.c 2005-11-11 17:31:05.000000000 -0600
++++ evms-2.5.4-fix/plugins/ha/ece.c 2005-12-14 14:29:55.872855176 -0600
+@@ -1912,7 +1912,7 @@
+ char *node = NULL;
+ gboolean multicast=FALSE;
+ ece_nodeid_t allnode = ECE_ALL_NODES;
+- u_int32_t corr, crc, ackval;
++ u_int32_t orig_corr, crc, ackval;
+ int ret;
+
+
+@@ -1951,8 +1951,8 @@
+ pthread_mutex_lock(&gl_ece_mutex);
+ crc = get_curr_crc();
+ ackval = ++gl_ece_ackval;
+- if((corr = ecemsg->corrolator)==0) {
+- corr = gl_ece_cor++;
++ if((orig_corr = ecemsg->corrolator)==0) {
++ ecemsg->corrolator = gl_ece_cor++;
+ }
+ pthread_mutex_unlock(&gl_ece_mutex);
+
+@@ -1964,12 +1964,11 @@
+ ret = frag_send_msg_wait(ecemsg,
+ multicast,
+ node,
+- corr,
++ ecemsg->corrolator,
+ crc,
+ ackval);
+
+ if(!ret) {
+- ecemsg->corrolator = corr;
+ if(!multicast &&
+ (gl_ece_mode == SLAVE || strcmp(node, llm_getmynodeid())!=0)){
+ /* wait till an acknowledgement from the receiver is
+@@ -1977,6 +1976,8 @@
+ */
+ ret = condition_wait(node, ackval);
+ }
++ } else {
++ ecemsg->corrolator = orig_corr;
+ }
+
+ LOG_EXIT_INT(ret);
diff --git a/sys-fs/evms/files/2.5.4/degraded_raid.patch b/sys-fs/evms/files/2.5.4/degraded_raid.patch
new file mode 100644
index 000000000000..d91f1f227ab4
--- /dev/null
+++ b/sys-fs/evms/files/2.5.4/degraded_raid.patch
@@ -0,0 +1,1144 @@
+Allow creating degraded RAID-1 and RAID-5 regions. For evmsn and evmsgui, the
+user will be prompted to confirm.
+
+Apply with:
+cd /usr/src/evms-2.5.4/
+patch -p1 < degraded_raid.patch
+make
+make install
+
+
+--- evms-2.5.4a/plugins/md/raid5_mgr.c 28 Nov 2005 14:29:46 -0000
++++ evms-2.5.4b/plugins/md/raid5_mgr.c 2 Dec 2005 18:46:57 -0000
+@@ -1707,7 +1707,10 @@
+ char ** spare_disk,
+ unsigned int * chunk_size,
+ unsigned int * raid_level,
+- unsigned int * parity_algorithm ) {
++ unsigned int * parity_algorithm,
++ boolean * degrade,
++ int * missing_index)
++{
+ int i;
+ int rc = 0;
+ boolean ver1_superblock = FALSE;
+@@ -1722,6 +1725,14 @@
+ case RAID5_CREATE_OPT_SB1_INDEX:
+ ver1_superblock = options->option[i].value.b;
+ break;
++
++ case RAID5_CREATE_OPT_DEGRADE_INDEX:
++ *degrade = options->option[i].value.b;
++ break;
++
++ case RAID5_CREATE_OPT_MISSING_INDEX:
++ *missing_index = options->option[i].value.ui32;
++ break;
+
+ case RAID5_CREATE_OPT_SPARE_DISK_INDEX:
+ /*
+@@ -1759,22 +1770,23 @@
+ break;
+
+ }
+-
+ } else {
+ if (strcmp(options->option[i].name, RAID5_CREATE_OPT_SB1_NAME) == 0) {
+ ver1_superblock = options->option[i].value.b;
++ } else if (strcmp(options->option[i].name, RAID5_CREATE_OPT_DEGRADE_NAME) == 0) {
++ *degrade = options->option[i].value.b;
++ } else if (strcmp(options->option[i].name, RAID5_CREATE_OPT_MISSING_NAME) == 0) {
++ *missing_index = options->option[i].value.ui32;
+ } else if (strcmp(options->option[i].name, RAID5_CREATE_OPT_SPARE_DISK_NAME) == 0) {
+ *spare_disk = options->option[i].value.s;
+ } else if (strcmp(options->option[i].name, RAID5_CREATE_OPT_CHUNK_SIZE_NAME) == 0) {
+ *chunk_size = options->option[i].value.ui32 * 2;
+-
+ } else if (strcmp(options->option[i].name, RAID5_CREATE_OPT_RAID_LEVEL_NAME) == 0) {
+ if (strcmp(options->option[i].value.s, RAID4_LEVEL_NAME) == 0) {
+ *raid_level = 4;
+ } else if (strcmp(options->option[i].value.s, RAID5_LEVEL_NAME) == 0) {
+ *raid_level = 5;
+ }
+-
+ } else if (strcmp(options->option[i].name, RAID5_CREATE_OPT_PARITY_ALGORITHM_NAME) == 0) {
+ if (strcmp(options->option[i].value.s, ALGORITHM_LEFT_ASYMMETRIC_NAME) == 0) {
+ *parity_algorithm = ALGORITHM_LEFT_ASYMMETRIC;
+@@ -1872,6 +1884,10 @@
+ list_element_t iter1, iter2;
+ md_sb_ver_t sb_ver = {MD_SB_VER_0, 90, 0};
+ md_member_t *member;
++ boolean degrade = FALSE;
++ int missing_index;
++ boolean missing_added = FALSE;
++ int min_disks;
+ int rc = 0;
+
+ my_plugin = raid5_plugin;
+@@ -1882,10 +1898,15 @@
+ LOG_EXIT_INT(EFAULT);
+ return EFAULT;
+ }
++
++ raid5_get_create_options(options, &sb_ver, &spare_disk, &chunksize, &raid_level, &parity_algorithm, &degrade, &missing_index);
+
+- // Must have at least RAID5_MIN_RAID_DISKS
+- if (EngFncs->list_count(objects) < RAID5_MIN_RAID_DISKS) {
+- LOG_CRITICAL("Must have at least %d objects.\n", RAID5_MIN_RAID_DISKS);
++ // Must have at least RAID5_MIN_RAID_DISKS for a functional array
++ // and RAID5_MIN_RAID_DISKS - 1 for a degraded array
++ min_disks = degrade ? RAID5_MIN_RAID_DISKS - 1 : RAID5_MIN_RAID_DISKS;
++ if (EngFncs->list_count(objects) < min_disks) {
++ MESSAGE(_("A %s MD region requires a minimum of %d objects.\n"),
++ degrade ? "degraded" : "functional", min_disks);
+ LOG_EXIT_INT(EINVAL);
+ return EINVAL;
+ }
+@@ -1900,8 +1921,6 @@
+ goto error_free;
+ }
+
+- raid5_get_create_options(options, &sb_ver, &spare_disk, &chunksize, &raid_level, &parity_algorithm);
+-
+ LIST_FOR_EACH(objects, iter1, object) {
+ size = min(size, md_object_usable_size(object, &sb_ver, chunksize));
+ }
+@@ -1920,6 +1939,10 @@
+
+ // Add raid members
+ LIST_FOR_EACH_SAFE(objects, iter1, iter2, object) {
++ if (degrade && !missing_added && (missing_index == volume->nr_disks)) {
++ rc = md_volume_add_new_missing(volume);
++ missing_added = TRUE;
++ }
+ member = md_allocate_member(object);
+ if (member) {
+ // This will add the member and update the MD superblock.
+@@ -1933,29 +1956,40 @@
+ } else {
+ rc = ENOMEM;
+ }
++
+ if (rc) {
+ goto error_free;
+ }
+ EngFncs->delete_element(iter1);
+ }
+
+- // Add spare member
+- if (spare) {
+- member = md_allocate_member(spare);
+- if (member) {
+- // This will add the member and update the MD superblock.
+- member->flags |= (MD_MEMBER_NEW | MD_MEMBER_DISK_SPARE);
+- member->data_size = size;
+- rc = md_volume_add_new_member(volume, member);
++ if (degrade) {
++ if (!missing_added) {
++ rc = md_volume_add_new_missing(volume);
++ missing_added = TRUE;
+ if (rc) {
+- md_free_member(member);
+ goto error_free;
+- }
+- } else {
+- rc = ENOMEM;
++ }
+ }
+- if (rc) {
+- goto error_free;
++ } else {
++ // Add spare member
++ if (spare) {
++ member = md_allocate_member(spare);
++ if (member) {
++ // This will add the member and update the MD superblock.
++ member->flags |= (MD_MEMBER_NEW | MD_MEMBER_DISK_SPARE);
++ member->data_size = size;
++ rc = md_volume_add_new_member(volume, member);
++ if (rc) {
++ md_free_member(member);
++ goto error_free;
++ }
++ } else {
++ rc = ENOMEM;
++ }
++ if (rc) {
++ goto error_free;
++ }
+ }
+ }
+
+@@ -3542,11 +3576,11 @@
+ // Version 1 Superblock Option
+ if (md_can_create_sb_1() == TRUE) {
+ context->option_descriptors->option[RAID5_CREATE_OPT_SB1_INDEX].flags = 0;
+- context->min_selected_objects = RAID5_MIN_RAID_DISKS;
++ context->min_selected_objects = RAID5_MIN_RAID_DISKS - 1;
+ context->max_selected_objects = MD_SB_1_DISKS;
+ } else {
+ context->option_descriptors->option[RAID5_CREATE_OPT_SB1_INDEX].flags = EVMS_OPTION_FLAGS_INACTIVE;
+- context->min_selected_objects = RAID5_MIN_RAID_DISKS;
++ context->min_selected_objects = RAID5_MIN_RAID_DISKS - 1;
+ context->max_selected_objects = MD_SB_DISKS;
+ }
+ context->option_descriptors->option[RAID5_CREATE_OPT_SB1_INDEX].constraint.list = NULL;
+@@ -3561,6 +3595,40 @@
+ context->option_descriptors->option[RAID5_CREATE_OPT_SB1_INDEX].unit = EVMS_Unit_None;
+ context->option_descriptors->option[RAID5_CREATE_OPT_SB1_INDEX].value.b = FALSE;
+
++ /* Degrade option */
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED;
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].constraint.list = NULL;
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].constraint_type = EVMS_Collection_None;
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].help = NULL;
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].name =
++ EngFncs->engine_strdup( RAID5_CREATE_OPT_DEGRADE_NAME );
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].tip =
++ EngFncs->engine_strdup( _("Choose Yes if you want to create a degraded array.") );
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].title =
++ EngFncs->engine_strdup( _("Degraded array") );
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].type = EVMS_Type_Boolean;
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].unit = EVMS_Unit_None;
++ context->option_descriptors->option[RAID5_CREATE_OPT_DEGRADE_INDEX].value.b = FALSE;
++
++ /* Missing index option */
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED | EVMS_OPTION_FLAGS_INACTIVE;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].constraint.range =
++ EngFncs->engine_alloc(sizeof(value_range_t));
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].constraint.range->min.ui32 = 0;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].constraint.range->max.ui32 = 1;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].constraint.range->increment.ui32 = 1;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].constraint_type = EVMS_Collection_Range;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].help = NULL;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].name =
++ EngFncs->engine_strdup(RAID5_CREATE_OPT_MISSING_NAME );
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].tip =
++ EngFncs->engine_strdup(_("Disk index for missing member."));
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].title =
++ EngFncs->engine_strdup(_("Missing disk index"));
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].type = EVMS_Type_Unsigned_Int32;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].unit = EVMS_Unit_None;
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].value.ui32 = 0;
++
+ /* Spare disk option */
+ context->option_descriptors->option[RAID5_CREATE_OPT_SPARE_DISK_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED;
+ /* Get the list of disks that can be spares. */
+@@ -3623,7 +3691,7 @@
+ LOG_EXIT_INT(ENOMEM);
+ return ENOMEM;
+ }
+-
++
+ /* Get a list of all valid input disks, segments, and regions. */
+ EngFncs->get_object_list(DISK | SEGMENT | REGION,
+ DATA_TYPE,
+@@ -3911,11 +3979,16 @@
+ * appropriate. Reset the value if necessary and possible. Adjust other
+ * options as appropriate.
+ */
+-static int raid5_set_option( task_context_t * context,
+- u_int32_t index,
+- value_t * value,
+- task_effect_t * effect ) {
++static int raid5_set_option(
++ task_context_t * context,
++ u_int32_t index,
++ value_t * value,
++ task_effect_t * effect )
++{
+ int rc = 0;
++ boolean degrade;
++ int answer;
++ char * choice_text[3] = { _("Yes"), _("No"), NULL };
+
+ my_plugin = raid5_plugin;
+ LOG_ENTRY();
+@@ -3938,7 +4011,41 @@
+ context->max_selected_objects = MD_SB_DISKS;
+ }
+ break;
+-
++
++ case RAID5_CREATE_OPT_DEGRADE_INDEX:
++ if (value->b != context->option_descriptors->option[index].value.b) {
++ degrade = FALSE;
++ if (value->b == TRUE) {
++ answer = 1; /* index 1 is "No" */
++ QUESTION(&answer, choice_text, _("Do you really want to create a degraded array?"));
++ if (answer == 0) {
++ /* index 0 is "Yes" */
++ degrade = TRUE;
++ }
++ }
++ context->option_descriptors->option[index].value.b = degrade;
++ if (degrade) {
++ context->option_descriptors->option[RAID5_CREATE_OPT_SPARE_DISK_INDEX].flags |= (EVMS_OPTION_FLAGS_INACTIVE);
++ strcpy(context->option_descriptors->option[RAID5_CREATE_OPT_SPARE_DISK_INDEX].value.s, "");
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].flags &= ~(EVMS_OPTION_FLAGS_INACTIVE);
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].constraint.range->max.ui32 = EngFncs->list_count(context->selected_objects);
++ } else {
++ context->option_descriptors->option[RAID5_CREATE_OPT_SPARE_DISK_INDEX].flags &= ~(EVMS_OPTION_FLAGS_INACTIVE);
++ context->option_descriptors->option[RAID5_CREATE_OPT_MISSING_INDEX].flags |= (EVMS_OPTION_FLAGS_INACTIVE);
++ }
++ *effect |= EVMS_Effect_Reload_Options;
++ }
++ break;
++
++ case RAID5_CREATE_OPT_MISSING_INDEX:
++ /* Verify that the index is within range? */
++ if (value->ui32 > EngFncs->list_count(context->selected_objects)) {
++ rc = EINVAL;
++ } else {
++ context->option_descriptors->option[index].value.ui32 = value->ui32;
++ }
++ break;
++
+ case RAID5_CREATE_OPT_SPARE_DISK_INDEX:
+ /*
+ * Not worth validation, will catch when we try to find
+@@ -4005,7 +4112,6 @@
+ } else {
+ rc = EINVAL;
+ }
+-
+ break;
+
+ default:
+
+--- evms-2.5.4a/plugins/md/raid5_mgr.h 20 Jan 2005 15:09:53 -0000
++++ evms-2.5.4b/plugins/md/raid5_mgr.h 2 Dec 2005 18:46:57 -0000
+@@ -100,17 +100,21 @@
+
+ #define RAID5_CREATE_OPT_SB1_INDEX 0
+ #define RAID5_CREATE_OPT_SB1_NAME "ver1_superblock"
+-#define RAID5_CREATE_OPT_SPARE_DISK_INDEX 1
++#define RAID5_CREATE_OPT_DEGRADE_INDEX 1
++#define RAID5_CREATE_OPT_DEGRADE_NAME "degraded"
++#define RAID5_CREATE_OPT_MISSING_INDEX 2
++#define RAID5_CREATE_OPT_MISSING_NAME "missing_index"
++#define RAID5_CREATE_OPT_SPARE_DISK_INDEX 3
+ #define RAID5_CREATE_OPT_SPARE_DISK_NAME "sparedisk"
+-#define RAID5_CREATE_OPT_CHUNK_SIZE_INDEX 2
++#define RAID5_CREATE_OPT_CHUNK_SIZE_INDEX 4
+ #define RAID5_CREATE_OPT_CHUNK_SIZE_NAME "chunksize"
+-#define RAID5_CREATE_OPT_RAID_LEVEL_INDEX 3
++#define RAID5_CREATE_OPT_RAID_LEVEL_INDEX 5
+ #define RAID5_CREATE_OPT_RAID_LEVEL_NAME "level"
+-#define RAID5_CREATE_OPT_PARITY_ALGORITHM_INDEX 4
++#define RAID5_CREATE_OPT_PARITY_ALGORITHM_INDEX 6
+ #define RAID5_CREATE_OPT_PARITY_ALGORITHM_NAME "algorithm"
+ #define RAID5_CREATE_NO_SELECTION _("None")
+
+-#define MD_CREATE_OPTIONS_COUNT 5
++#define MD_CREATE_OPTIONS_COUNT 7
+
+
+ /*
+
+--- evms-2.5.4a/plugins/md/md_info.h 11 Feb 2005 04:13:16 -0000
++++ evms-2.5.4b/plugins/md/md_info.h 2 Dec 2005 18:46:57 -0000
+@@ -27,8 +27,7 @@
+ #define NUM_SUPER_INFO_ENTRIES 24
+
+ #define MD_SUPER_INFO_CLEAN (1<<0)
+-#define MD_SUPER_INFO_DIRTY (1<<1)
+-#define MD_SUPER_INFO_ERRORS (1<<2)
++#define MD_SUPER_INFO_ERRORS (1<<1)
+
+ typedef struct md_super_info {
+ u_int32_t md_magic;
+
+--- evms-2.5.4a/plugins/md/md_super.c 28 Nov 2005 14:29:46 -0000
++++ evms-2.5.4b/plugins/md/md_super.c 2 Dec 2005 18:46:57 -0000
+@@ -334,8 +334,6 @@
+ info->state_flags = MD_SUPER_INFO_ERRORS;
+ } else if ((sb->state & (1<<MD_SB_CLEAN))) {
+ info->state_flags = MD_SUPER_INFO_CLEAN;
+- } else {
+- info->state_flags = MD_SUPER_INFO_DIRTY;
+ }
+ info->sb_csum = sb->sb_csum;
+ info->layout = sb->layout;
+@@ -363,6 +361,14 @@
+ sb->working_disks = info->working_disks;
+ sb->failed_disks = info->failed_disks;
+ sb->spare_disks = info->spare_disks;
++ if (info->state_flags & MD_SUPER_INFO_CLEAN) {
++ sb->state |= (1<<MD_SB_CLEAN);
++ } else {
++ sb->state &= ~(1<<MD_SB_CLEAN);
++ }
++ if (info->state_flags & MD_SUPER_INFO_ERRORS) {
++ sb->state |= (1<<MD_SB_ERRORS);
++ }
+ LOG_WARNING("Superblock disk counts have been changed,"
+ " nr_disks(%03d) raid_disks(%03d) active_disks(%03d)"
+ " working_disks(%03d) failed_disks(%03d) spare_disks(%03d).\n",
+@@ -1063,9 +1069,9 @@
+
+ if (!rc) {
+ disk = &master_sb->disks[member->dev_number];
+- disk->state = MD_DISK_FAULTY;
++ disk->state = 1<<MD_DISK_FAULTY;
+ if (mark_removed == TRUE) {
+- disk->state |= MD_DISK_REMOVED;
++ disk->state |= (1<<MD_DISK_REMOVED);
+ }
+ if (master_sb->this_disk.number == disk->number) {
+ master_sb->this_disk.state = disk->state;
+@@ -1076,6 +1082,32 @@
+ return rc;
+ }
+
++static int sb0_mark_disk_missing(md_volume_t *vol, int dev_number)
++{
++ mdp_super_t *master_sb;
++ mdp_disk_t *disk;
++ int rc = 0;
++
++ LOG_ENTRY();
++
++ if (!vol || !vol->sb) {
++ LOG_MD_BUG();
++ rc = EINVAL;
++ goto out;
++ }
++
++ master_sb = (mdp_super_t *)vol->sb;
++ disk = &master_sb->disks[dev_number];
++ disk->state = (1<<MD_DISK_FAULTY) | (1<<MD_DISK_REMOVED);
++ if (master_sb->this_disk.number == dev_number) {
++ master_sb->this_disk.state = disk->state;
++ }
++
++out:
++ LOG_EXIT_INT(rc);
++ return rc;
++}
++
+ static void sb0_increment_events(void *super)
+ {
+ mdp_super_t *sb = (mdp_super_t *)super;
+@@ -1343,6 +1375,7 @@
+ init_sb : sb0_init_sb,
+ load_this_device_info : sb0_load_this_device_info,
+ mark_disk_faulty : sb0_mark_disk_faulty,
++ mark_disk_missing : sb0_mark_disk_missing,
+ max_disks : sb0_max_disks,
+ read_saved_info : sb0_read_saved_info,
+ remove_disk : sb0_remove_disk,
+@@ -2114,6 +2147,25 @@
+ return rc;
+ }
+
++static int sb1_mark_disk_missing(md_volume_t *vol, int dev_number)
++{
++ mdp_sb_1_t *master_sb;
++ int rc = 0;
++
++ LOG_ENTRY();
++
++ if (!vol || !vol->sb) {
++ LOG_MD_BUG();
++ rc = EINVAL;
++ goto out;
++ }
++ master_sb = (mdp_sb_1_t *)vol->sb;
++ master_sb->dev_roles[dev_number] = 0xFFFE;
++out:
++ LOG_EXIT_INT(rc);
++ return rc;
++}
++
+ static void sb1_set_utime(void *super)
+ {
+ mdp_sb_1_t *sb = (mdp_sb_1_t *)super;
+@@ -2606,6 +2658,7 @@
+ init_sb : sb1_init_sb,
+ load_this_device_info : sb1_load_this_device_info,
+ mark_disk_faulty : sb1_mark_disk_faulty,
++ mark_disk_missing : sb1_mark_disk_missing,
+ max_disks : sb1_max_disks,
+ read_saved_info : sb1_read_saved_info,
+ remove_disk : sb1_remove_disk,
+@@ -2749,6 +2802,74 @@
+ }
+
+ /*
++ * md_volume_add_new_missing
++ * - This function appends a missing disk entry at the current position (nr_disks)
++ * - To create a degraded array with a missing entry at any position, the caller
++ * must coordinate. For example, to create a [disk1, missing, disk2] raid5 array,
++ * the raid5 plugin should do:
++ * md_volume_add_new_member(vol, disk1)
++ * md_volume_add_new_missing(vol)
++ * md_volume_add_new_member(vol, disk2)
++ */
++int md_volume_add_new_missing(md_volume_t *vol)
++{
++ int rc = 0;
++ int rc2;
++ md_member_t *my_member;
++ list_element_t iter;
++ md_super_info_t info;
++
++ LOG_ENTRY();
++
++ if (!vol || !vol->sb_func) {
++ LOG_MD_BUG();
++ rc = EINVAL;
++ goto out;
++ }
++
++ /* Get current superblock */
++ md_volume_get_super_info(vol, &info);
++ info.nr_disks++;
++ info.raid_disks++;
++ info.failed_disks++;
++ info.state_flags = MD_SUPER_INFO_CLEAN;
++ vol->sb_func->set_sb_info(vol->sb, &info);
++ vol->sb_func->mark_disk_missing(vol, info.nr_disks-1);
++
++ /* Free all existing superblocks, then re-create from the master */
++ LIST_FOR_EACH(vol->members, iter, my_member) {
++ if (my_member->sb) {
++ EngFncs->engine_free(my_member->sb);
++ my_member->sb = NULL;
++ }
++ rc2 = vol->sb_func->duplicate_sb(&my_member->sb, vol->sb);
++ if (!rc2) {
++ vol->sb_func->set_this_device_info(my_member);
++ } else if (!rc) {
++ rc = rc2; //Save and return the first error (if any)
++ }
++ }
++
++ md_volume_get_super_info(vol, &info);
++ vol->nr_disks = info.nr_disks;
++ vol->raid_disks = info.raid_disks;
++ vol->active_disks = info.active_disks;
++ vol->spare_disks = info.spare_disks;
++ vol->working_disks = info.working_disks;
++ vol->failed_disks = info.failed_disks;
++ vol->flags |= MD_DEGRADED;
++
++ LOG_DEBUG("MD region %s: nr_disks(%d) raid_disks(%d) active_disks(%d)"
++ " spare_disks(%d) working_disks(%d) failed_disks(%d).\n",
++ vol->name, vol->nr_disks, vol->raid_disks, vol->active_disks,
++ vol->spare_disks, vol->working_disks, vol->failed_disks);
++
++out:
++ LOG_EXIT_INT(rc);
++ return rc;
++}
++
++/*
+ * md_write_sbs_to_disk
+ *
+ * Write superblocks for all members of the array.
+
+--- evms-2.5.4a/plugins/md/raid1_mgr.h 17 Jan 2005 05:50:23 -0000
++++ evms-2.5.4b/plugins/md/raid1_mgr.h 2 Dec 2005 18:46:57 -0000
+@@ -24,12 +24,16 @@
+
+
+ // create options:
+-#define RAID1_CREATE_OPTION_COUNT 2
++#define RAID1_CREATE_OPT_COUNT 4
+
+-#define RAID1_CREATE_OPTION_SB1_INDEX 0
+-#define RAID1_CREATE_OPTION_SB1_NAME "ver1_superblock"
+-#define RAID1_OPTION_SPARE_DISK_INDEX 1
+-#define RAID1_OPTION_SPARE_DISK_NAME "sparedisk"
++#define RAID1_CREATE_OPT_SB1_INDEX 0
++#define RAID1_CREATE_OPT_SB1_NAME "ver1_superblock"
++#define RAID1_CREATE_OPT_DEGRADE_INDEX 1
++#define RAID1_CREATE_OPT_DEGRADE_NAME "degraded"
++#define RAID1_CREATE_OPT_MISSING_INDEX 2
++#define RAID1_CREATE_OPT_MISSING_NAME "missing_index"
++#define RAID1_OPT_SPARE_DISK_INDEX 3
++#define RAID1_OPT_SPARE_DISK_NAME "sparedisk"
+ #define RAID1_NO_SELECTION _("None")
+
+
+@@ -38,20 +42,20 @@
+ //
+ // Expand Option Info
+ //
+-#define RAID1_EXPAND_OPTION_COUNT 1
++#define RAID1_EXPAND_OPT_COUNT 1
+
+-#define RAID1_EXPAND_OPTION_SIZE_INDEX 0
+-#define RAID1_EXPAND_OPTION_SIZE_NAME "Size"
++#define RAID1_EXPAND_OPT_SIZE_INDEX 0
++#define RAID1_EXPAND_OPT_SIZE_NAME "Size"
+ #define RAID1_MINIMUM_EXPAND_SIZE (2 * 1024)
+
+
+ //
+ // Shrink Option Info
+ //
+-#define RAID1_SHRINK_OPTION_COUNT 1
++#define RAID1_SHRINK_OPT_COUNT 1
+
+-#define RAID1_SHRINK_OPTION_SIZE_INDEX 0
+-#define RAID1_SHRINK_OPTION_SIZE_NAME "Size"
++#define RAID1_SHRINK_OPT_SIZE_INDEX 0
++#define RAID1_SHRINK_OPT_SIZE_NAME "Size"
+ #define RAID1_MINIMUM_SHRINK_SIZE (2 * 1024)
+ #define RAID1_PERCENT_SHRINK_THRESHOLD 90
+
+
+--- evms-2.5.4a/plugins/md/raid1_mgr.c 28 Nov 2005 14:29:46 -0000
++++ evms-2.5.4b/plugins/md/raid1_mgr.c 2 Dec 2005 18:46:57 -0000
+@@ -290,28 +290,28 @@
+ rc = raid1_can_children_expand(region, -1, &max_expand_size);
+ if (!rc) {
+
+- context->option_descriptors->count = RAID1_EXPAND_OPTION_COUNT;
++ context->option_descriptors->count = RAID1_EXPAND_OPT_COUNT;
+
+ // Expanded RAID1 region size delta
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.list = NULL;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range = EngFncs->engine_alloc( sizeof(value_range_t) );
+- if (context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range==NULL) {
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.list = NULL;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range = EngFncs->engine_alloc( sizeof(value_range_t) );
++ if (context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range==NULL) {
+ LOG_EXIT_INT(ENOMEM);
+ return ENOMEM;
+ }
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint_type = EVMS_Collection_Range;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].flags = 0;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].help = NULL;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].name = EngFncs->engine_strdup( RAID1_EXPAND_OPTION_SIZE_NAME );
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].tip =
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint_type = EVMS_Collection_Range;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].flags = 0;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].help = NULL;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].name = EngFncs->engine_strdup( RAID1_EXPAND_OPT_SIZE_NAME );
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].tip =
+ EngFncs->engine_strdup( _("Use this option to specify how much space to add to the region.") );
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].title = EngFncs->engine_strdup( _("Additional Size") );
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].type = EVMS_Type_Unsigned_Int64;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].unit = EVMS_Unit_Sectors;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_EXPAND_SIZE;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range->max.ui64 = max_expand_size;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range->increment.ui64 = 1;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].value.ui64 = max_expand_size;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].title = EngFncs->engine_strdup( _("Additional Size") );
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].type = EVMS_Type_Unsigned_Int64;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].unit = EVMS_Unit_Sectors;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_EXPAND_SIZE;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range->max.ui64 = max_expand_size;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range->increment.ui64 = 1;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].value.ui64 = max_expand_size;
+
+ }
+ }
+@@ -347,28 +347,28 @@
+ rc = raid1_can_children_shrink(region, -1, &max_shrink_size);
+ if (!rc) {
+
+- context->option_descriptors->count = RAID1_SHRINK_OPTION_COUNT;
++ context->option_descriptors->count = RAID1_SHRINK_OPT_COUNT;
+
+ // Expanded RAID1 region size delta
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.list = NULL;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range = EngFncs->engine_alloc( sizeof(value_range_t) );
+- if (context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range==NULL) {
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.list = NULL;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range = EngFncs->engine_alloc( sizeof(value_range_t) );
++ if (context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range==NULL) {
+ LOG_EXIT_INT(ENOMEM);
+ return ENOMEM;
+ }
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint_type = EVMS_Collection_Range;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].flags = 0;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].help = NULL;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].name = EngFncs->engine_strdup( RAID1_SHRINK_OPTION_SIZE_NAME );
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].tip =
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint_type = EVMS_Collection_Range;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].flags = 0;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].help = NULL;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].name = EngFncs->engine_strdup( RAID1_SHRINK_OPT_SIZE_NAME );
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].tip =
+ EngFncs->engine_strdup( _("Use this option to specify how much space to reduce from the region.") );
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].title = EngFncs->engine_strdup( _("Shrink by Size") );
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].type = EVMS_Type_Unsigned_Int64;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].unit = EVMS_Unit_Sectors;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_SHRINK_SIZE;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range->max.ui64 = max_shrink_size;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range->increment.ui64 = 1;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].value.ui64 = max_shrink_size;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].title = EngFncs->engine_strdup( _("Shrink by Size") );
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].type = EVMS_Type_Unsigned_Int64;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].unit = EVMS_Unit_Sectors;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_SHRINK_SIZE;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range->max.ui64 = max_shrink_size;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range->increment.ui64 = 1;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].value.ui64 = max_shrink_size;
+
+ }
+ }
+@@ -414,9 +414,9 @@
+ expand_sectors = RAID1_MINIMUM_EXPAND_SIZE;
+ *effect |= EVMS_Effect_Inexact;
+ }
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_EXPAND_SIZE;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].constraint.range->max.ui64 = max_expand_size;
+- context->option_descriptors->option[RAID1_EXPAND_OPTION_SIZE_INDEX].value.ui64 = expand_sectors;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_EXPAND_SIZE;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].constraint.range->max.ui64 = max_expand_size;
++ context->option_descriptors->option[RAID1_EXPAND_OPT_SIZE_INDEX].value.ui64 = expand_sectors;
+ }
+ }
+ }
+@@ -461,9 +461,9 @@
+ shrink_sectors = RAID1_MINIMUM_SHRINK_SIZE;
+ *effect |= EVMS_Effect_Inexact;
+ }
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_SHRINK_SIZE;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].constraint.range->max.ui64 = max_shrink_size;
+- context->option_descriptors->option[RAID1_SHRINK_OPTION_SIZE_INDEX].value.ui64 = shrink_sectors;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range->min.ui64 = RAID1_MINIMUM_SHRINK_SIZE;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].constraint.range->max.ui64 = max_shrink_size;
++ context->option_descriptors->option[RAID1_SHRINK_OPT_SIZE_INDEX].value.ui64 = shrink_sectors;
+ }
+ }
+ }
+@@ -483,14 +483,14 @@
+
+ if (options->option[i].is_number_based) {
+
+- if (options->option[i].number == RAID1_EXPAND_OPTION_SIZE_INDEX) {
++ if (options->option[i].number == RAID1_EXPAND_OPT_SIZE_INDEX) {
+ *size = options->option[i].value.ui64;
+ }
+
+ }
+ else {
+
+- if (strcmp(options->option[i].name, RAID1_EXPAND_OPTION_SIZE_NAME) == 0) {
++ if (strcmp(options->option[i].name, RAID1_EXPAND_OPT_SIZE_NAME) == 0) {
+ *size = options->option[i].value.ui64;
+ }
+
+@@ -511,14 +511,14 @@
+
+ if (options->option[i].is_number_based) {
+
+- if (options->option[i].number == RAID1_SHRINK_OPTION_SIZE_INDEX) {
++ if (options->option[i].number == RAID1_SHRINK_OPT_SIZE_INDEX) {
+ *size = options->option[i].value.ui64;
+ }
+
+ }
+ else {
+
+- if (strcmp(options->option[i].name, RAID1_SHRINK_OPTION_SIZE_NAME) == 0) {
++ if (strcmp(options->option[i].name, RAID1_SHRINK_OPT_SIZE_NAME) == 0) {
+ *size = options->option[i].value.ui64;
+ }
+
+@@ -1008,7 +1008,10 @@
+ static int raid1_get_create_options(
+ option_array_t * options,
+ char ** spare_disk,
+- md_sb_ver_t *sb_ver)
++ md_sb_ver_t *sb_ver,
++ boolean * degrade,
++ int * missing_index)
++
+ {
+ int i;
+ int rc = 0;
+@@ -1022,11 +1025,18 @@
+
+ switch (options->option[i].number) {
+
+- case RAID1_CREATE_OPTION_SB1_INDEX:
++ case RAID1_CREATE_OPT_SB1_INDEX:
+ ver1_superblock = options->option[i].value.b;
+ break;
++ case RAID1_CREATE_OPT_DEGRADE_INDEX:
++ *degrade = options->option[i].value.b;
++ break;
++
++ case RAID1_CREATE_OPT_MISSING_INDEX:
++ *missing_index = options->option[i].value.ui32;
++ break;
+
+- case RAID1_OPTION_SPARE_DISK_INDEX:
++ case RAID1_OPT_SPARE_DISK_INDEX:
+ // Not worth validation, will catch when we try to find the original
+ *spare_disk = options->option[i].value.s;
+ break;
+@@ -1037,9 +1047,13 @@
+
+ } else {
+
+- if (strcmp(options->option[i].name, RAID1_OPTION_SPARE_DISK_NAME) == 0) {
++ if (strcmp(options->option[i].name, RAID1_OPT_SPARE_DISK_NAME) == 0) {
+ *spare_disk = options->option[i].value.s;
+- } else if ((strcmp(options->option[i].name, RAID1_CREATE_OPTION_SB1_NAME) == 0) ) {
++ } else if (strcmp(options->option[i].name, RAID1_CREATE_OPT_DEGRADE_NAME) == 0) {
++ *degrade = options->option[i].value.b;
++ } else if (strcmp(options->option[i].name, RAID1_CREATE_OPT_MISSING_NAME) == 0) {
++ *missing_index = options->option[i].value.ui32;
++ } else if ((strcmp(options->option[i].name, RAID1_CREATE_OPT_SB1_NAME) == 0) ) {
+ ver1_superblock = options->option[i].value.b;
+ }
+ }
+@@ -1114,6 +1128,9 @@
+ list_element_t iter1, iter2;
+ md_sb_ver_t sb_ver = {MD_SB_VER_0, 90, 0};
+ md_member_t *member;
++ boolean degrade = FALSE;
++ int missing_index;
++ boolean missing_added = FALSE;
+ int rc = 0;
+
+ my_plugin = raid1_plugin;
+@@ -1142,7 +1159,7 @@
+ goto error_free;
+ }
+
+- raid1_get_create_options(options, &spare_disk, &sb_ver);
++ raid1_get_create_options(options, &spare_disk, &sb_ver, &degrade, &missing_index);
+
+ LIST_FOR_EACH(objects, iter1, object) {
+ size = min(size, md_object_usable_size(object, &sb_ver, 0));
+@@ -1162,6 +1179,10 @@
+
+ // Add raid members
+ LIST_FOR_EACH_SAFE(objects, iter1, iter2, object) {
++ if (degrade && !missing_added && (missing_index == volume->nr_disks)) {
++ rc = md_volume_add_new_missing(volume);
++ missing_added = TRUE;
++ }
+ member = md_allocate_member(object);
+ if (member) {
+ // This will add the member and update the MD superblock.
+@@ -1181,26 +1202,35 @@
+ EngFncs->delete_element(iter1);
+ }
+
+- // Add spare member
+- if (spare) {
+- member = md_allocate_member(spare);
+- if (member) {
+- // This will add the member and update the MD superblock.
+- member->flags |= (MD_MEMBER_NEW | MD_MEMBER_DISK_SPARE);
+- member->data_size = size;
+- rc = md_volume_add_new_member(volume, member);
++ if (degrade) {
++ if (!missing_added) {
++ rc = md_volume_add_new_missing(volume);
++ missing_added = TRUE;
+ if (rc) {
+- md_free_member(member);
+ goto error_free;
+- }
+- } else {
+- rc = ENOMEM;
++ }
+ }
+- if (rc) {
+- goto error_free;
++ } else {
++ // Add spare member
++ if (spare) {
++ member = md_allocate_member(spare);
++ if (member) {
++ // This will add the member and update the MD superblock.
++ member->flags |= (MD_MEMBER_NEW | MD_MEMBER_DISK_SPARE);
++ member->data_size = size;
++ rc = md_volume_add_new_member(volume, member);
++ if (rc) {
++ md_free_member(member);
++ goto error_free;
++ }
++ } else {
++ rc = ENOMEM;
++ }
++ if (rc) {
++ goto error_free;
++ }
+ }
+ }
+-
+ rc = raid1_create_new_region(volume, new_region_list);
+ if (rc) {
+ goto error_free;
+@@ -1303,7 +1333,7 @@
+
+ option_array.count = 1;
+ option_array.option[0].is_number_based = FALSE;
+- option_array.option[0].name = RAID1_EXPAND_OPTION_SIZE_NAME;
++ option_array.option[0].name = RAID1_EXPAND_OPT_SIZE_NAME;
+ option_array.option[0].value.ui64 = sectors;
+
+ LOG_DEBUG(" %s region %s. current size = %"PRIu64" sectors\n",
+@@ -1683,13 +1713,13 @@
+
+ switch (task->action) {
+ case EVMS_Task_Create:
+- count = RAID1_CREATE_OPTION_COUNT;
++ count = RAID1_CREATE_OPT_COUNT;
+ break;
+ case EVMS_Task_Expand:
+- count = RAID1_EXPAND_OPTION_COUNT;
++ count = RAID1_EXPAND_OPT_COUNT;
+ break;
+ case EVMS_Task_Shrink:
+- count = RAID1_SHRINK_OPTION_COUNT;
++ count = RAID1_SHRINK_OPT_COUNT;
+ break;
+
+ case MD_RAID1_FUNCTION_ADD_SPARE:
+@@ -1936,51 +1966,85 @@
+
+ case EVMS_Task_Create:
+
+- context->option_descriptors->count = RAID1_CREATE_OPTION_COUNT;
++ context->option_descriptors->count = RAID1_CREATE_OPT_COUNT;
+
+ // Version 1 Superblock Option
+ if (md_can_create_sb_1() == TRUE) {
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].flags = 0;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].flags = 0;
+ context->min_selected_objects = 1;
+ context->max_selected_objects = MD_SB_1_DISKS;
+ } else {
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].flags = EVMS_OPTION_FLAGS_INACTIVE;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].flags = EVMS_OPTION_FLAGS_INACTIVE;
+ context->min_selected_objects = 1;
+ context->max_selected_objects = MD_SB_DISKS;
+ }
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].constraint.list = NULL;
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].constraint_type = EVMS_Collection_None;
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].help = NULL;
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].name =
+- EngFncs->engine_strdup( RAID1_CREATE_OPTION_SB1_NAME );
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].tip =
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].constraint.list = NULL;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].constraint_type = EVMS_Collection_None;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].help = NULL;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].name =
++ EngFncs->engine_strdup( RAID1_CREATE_OPT_SB1_NAME );
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].tip =
+ EngFncs->engine_strdup( _("Choose Yes if you want to create MD version 1 super block.") );
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].title = EngFncs->engine_strdup( _("Version 1 Super Block") );
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].type = EVMS_Type_Boolean;
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].unit = EVMS_Unit_None;
+- context->option_descriptors->option[RAID1_CREATE_OPTION_SB1_INDEX].value.b = FALSE;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].title = EngFncs->engine_strdup( _("Version 1 Super Block") );
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].type = EVMS_Type_Boolean;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].unit = EVMS_Unit_None;
++ context->option_descriptors->option[RAID1_CREATE_OPT_SB1_INDEX].value.b = FALSE;
++
++ /* Degrade option */
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED;
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].constraint.list = NULL;
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].constraint_type = EVMS_Collection_None;
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].help = NULL;
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].name =
++ EngFncs->engine_strdup( RAID1_CREATE_OPT_DEGRADE_NAME );
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].tip =
++ EngFncs->engine_strdup( _("Choose Yes if you want to create a degraded array.") );
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].title =
++ EngFncs->engine_strdup( _("Degraded array") );
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].type = EVMS_Type_Boolean;
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].unit = EVMS_Unit_None;
++ context->option_descriptors->option[RAID1_CREATE_OPT_DEGRADE_INDEX].value.b = FALSE;
++
++ /* Missing index option */
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED | EVMS_OPTION_FLAGS_INACTIVE;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].constraint.range =
++ EngFncs->engine_alloc(sizeof(value_range_t));
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].constraint.range->min.ui32 = 0;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].constraint.range->max.ui32 = 1;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].constraint.range->increment.ui32 = 1;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].constraint_type = EVMS_Collection_Range;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].help = NULL;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].name =
++ EngFncs->engine_strdup(RAID1_CREATE_OPT_MISSING_NAME );
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].tip =
++ EngFncs->engine_strdup(_("Disk index for missing member."));
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].title =
++ EngFncs->engine_strdup(_("Missing disk index"));
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].type = EVMS_Type_Unsigned_Int32;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].unit = EVMS_Unit_None;
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].value.ui32 = 0;
+
+ // Spare Disk Option
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].flags = EVMS_OPTION_FLAGS_NOT_REQUIRED;
+ // get the list of disks that can be spares.
+ raid1_create_selectable_spare_list(
+- (value_list_t **)&context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].constraint.list,
++ (value_list_t **)&context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].constraint.list,
+ context->selected_objects,0);
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].constraint_type = EVMS_Collection_List;
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].help = NULL;
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].name =
+- EngFncs->engine_strdup(RAID1_OPTION_SPARE_DISK_NAME );
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].min_len = 1;
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].max_len = EVMS_VOLUME_NAME_SIZE;
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].tip =
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].constraint_type = EVMS_Collection_List;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].help = NULL;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].name =
++ EngFncs->engine_strdup(RAID1_OPT_SPARE_DISK_NAME );
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].min_len = 1;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].max_len = EVMS_VOLUME_NAME_SIZE;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].tip =
+ EngFncs->engine_strdup(_("Object to use as a spare disk in the array") );
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].title =
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].title =
+ EngFncs->engine_strdup(_("Spare Disk") );
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].type = EVMS_Type_String;
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].unit = EVMS_Unit_None;
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].value.s =
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].type = EVMS_Type_String;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].unit = EVMS_Unit_None;
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].value.s =
+ EngFncs->engine_alloc(EVMS_VOLUME_NAME_SIZE+1);
+- strcpy(context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].value.s, RAID1_NO_SELECTION);
++ strcpy(context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].value.s, RAID1_NO_SELECTION);
+
+ // get a list of all valid input disks, segments, and regions.
+ EngFncs->get_object_list(DISK | SEGMENT | REGION,
+@@ -1994,9 +2058,6 @@
+ md_transfer_list(tmp_list, context->acceptable_objects);
+ EngFncs->destroy_list(tmp_list);
+
+- //Option 1 is a boolean value for version 1 super block
+-
+-
+ break;
+
+ case EVMS_Task_Expand:
+@@ -2156,9 +2217,9 @@
+ * is specified and see if it is the smallest.
+ */
+ if (smallest_size != -1) {
+- if (context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].value.s != NULL) {
++ if (context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].value.s != NULL) {
+ spare = md_find_valid_input_object(
+- context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].value.s);
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].value.s);
+
+ if (spare != NULL) {
+ if (vol) {
+@@ -2244,12 +2305,16 @@
+ * appropriate. Reset the value if necessary and possible. Adjust other
+ * options as appropriate.
+ */
+-static int raid1_set_option( task_context_t * context,
+- u_int32_t index,
+- value_t * value,
+- task_effect_t * effect )
++static int raid1_set_option(
++ task_context_t * context,
++ u_int32_t index,
++ value_t * value,
++ task_effect_t * effect )
+ {
+ int rc = 0;
++ boolean degrade;
++ int answer;
++ char * choice_text[3] = { _("Yes"), _("No"), NULL };
+
+ my_plugin = raid1_plugin;
+ LOG_ENTRY();
+@@ -2260,14 +2325,12 @@
+ return EFAULT;
+ }
+
+-
+-
+ switch (context->action) {
+
+ case EVMS_Task_Create:
+ switch (index) {
+
+- case RAID1_CREATE_OPTION_SB1_INDEX:
++ case RAID1_CREATE_OPT_SB1_INDEX:
+ context->option_descriptors->option[index].value.b = value->b;
+ if (value->b == TRUE) {
+ context->max_selected_objects = MD_SB_1_DISKS;
+@@ -2275,8 +2338,42 @@
+ context->max_selected_objects = MD_SB_DISKS;
+ }
+ break;
++
++ case RAID1_CREATE_OPT_DEGRADE_INDEX:
++ if (value->b != context->option_descriptors->option[index].value.b) {
++ degrade = FALSE;
++ if (value->b == TRUE) {
++ answer = 1; /* index 1 is "No" */
++ QUESTION(&answer, choice_text, _("Do you really want to create a degraded array?"));
++ if (answer == 0) {
++ /* index 0 is "Yes" */
++ degrade = TRUE;
++ }
++ }
++ context->option_descriptors->option[index].value.b = degrade;
++ if (degrade) {
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].flags |= (EVMS_OPTION_FLAGS_INACTIVE);
++ strcpy(context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].value.s, "");
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].flags &= ~(EVMS_OPTION_FLAGS_INACTIVE);
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].constraint.range->max.ui32 = EngFncs->list_count(context->selected_objects);
++ } else {
++ context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].flags &= ~(EVMS_OPTION_FLAGS_INACTIVE);
++ context->option_descriptors->option[RAID1_CREATE_OPT_MISSING_INDEX].flags |= (EVMS_OPTION_FLAGS_INACTIVE);
++ }
++ *effect |= EVMS_Effect_Reload_Options;
++ }
++ break;
++
++ case RAID1_CREATE_OPT_MISSING_INDEX:
++ /* Verify that the index is within range? */
++ if (value->ui32 > EngFncs->list_count(context->selected_objects)) {
++ rc = EINVAL;
++ } else {
++ context->option_descriptors->option[index].value.ui32 = value->ui32;
++ }
++ break;
+
+- case RAID1_OPTION_SPARE_DISK_INDEX:
++ case RAID1_OPT_SPARE_DISK_INDEX:
+ // Not worth validation, will catch when we try to find the original
+ strcpy(context->option_descriptors->option[index].value.s, value->s);
+ warn_if_big_objects(context);
+@@ -2333,7 +2430,7 @@
+
+ case EVMS_Task_Create:
+ raid1_create_selectable_spare_list(
+- (value_list_t **)&context->option_descriptors->option[RAID1_OPTION_SPARE_DISK_INDEX].constraint.list,
++ (value_list_t **)&context->option_descriptors->option[RAID1_OPT_SPARE_DISK_INDEX].constraint.list,
+ context->selected_objects, 0);
+ warn_if_big_objects(context);
+ *effect |= EVMS_Effect_Reload_Options;
+
+--- evms-2.5.4a/plugins/md/md_super.h 17 Jan 2005 05:50:21 -0000
++++ evms-2.5.4b/plugins/md/md_super.h 2 Dec 2005 18:46:57 -0000
+@@ -352,6 +352,7 @@
+ u_int64_t size, u_int32_t chunk_size);
+ void (*load_this_device_info)(md_member_t *member);
+ int (*mark_disk_faulty)(md_member_t *member, boolean mark_removed);
++ int (*mark_disk_missing)(md_volume_t *vol, int dev_number);
+ int (*max_disks) (void);
+ int (*read_saved_info)(md_member_t *member);
+ int (*remove_disk)(md_member_t *member, boolean resize);
+@@ -381,6 +382,7 @@
+ u_int64_t size,
+ u_int32_t chunksize );
+ int md_volume_add_new_member(md_volume_t *vol, md_member_t *member);
++int md_volume_add_new_missing(md_volume_t *vol);
+
+ //void md_print_sb(char *buf, u_int32_t buf_size, md_volume_t *vol);
+
diff --git a/sys-fs/evms/files/digest-evms-2.5.2-r1 b/sys-fs/evms/files/digest-evms-2.5.2-r1
deleted file mode 100644
index d565bd559480..000000000000
--- a/sys-fs/evms/files/digest-evms-2.5.2-r1
+++ /dev/null
@@ -1 +0,0 @@
-MD5 5374d003f4478db737720c9b62e7cb86 evms-2.5.2.tar.gz 2235846
diff --git a/sys-fs/evms/files/digest-evms-2.5.3 b/sys-fs/evms/files/digest-evms-2.5.3
deleted file mode 100644
index d3ac22f759f3..000000000000
--- a/sys-fs/evms/files/digest-evms-2.5.3
+++ /dev/null
@@ -1 +0,0 @@
-MD5 702c57921292934bb6c393a2c28c73ba evms-2.5.3.tar.gz 2239383
diff --git a/sys-fs/evms/files/digest-evms-2.5.4 b/sys-fs/evms/files/digest-evms-2.5.4
new file mode 100644
index 000000000000..6c53d751bfee
--- /dev/null
+++ b/sys-fs/evms/files/digest-evms-2.5.4
@@ -0,0 +1,3 @@
+MD5 def675ffe19037dcb74f1393530ee4c7 evms-2.5.4.tar.gz 2245357
+RMD160 0767bea4beedfd3354d30d366229f2100d4c3384 evms-2.5.4.tar.gz 2245357
+SHA256 d117677b3319ffe54c561315726e8ca2ccbba3f6bf0422a3df34388da79651ff evms-2.5.4.tar.gz 2245357