Skip to content

Report Deprecated Indices That Are Flagged To Ignore Migration Reindex As A Warning #120629

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions docs/changelog/120629.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 120629
summary: Report Deprecated Indices That Are Flagged To Ignore Migration Reindex As
A Warning
area: Data streams
type: enhancement
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,38 @@ public class DeprecatedIndexPredicate {

public static final IndexVersion MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE = IndexVersions.UPGRADE_TO_LUCENE_10_0_0;

/*
/**
* This predicate allows through only indices that were created with a previous lucene version, meaning that they need to be reindexed
* in order to be writable in the _next_ lucene version.
* in order to be writable in the _next_ lucene version. It excludes searchable snapshots as they are not writable.
*
* It ignores searchable snapshots as they are not writable.
*
* @param metadata the cluster metadata
* @param filterToBlockedStatus if true, only indices that are write blocked will be returned,
* if false, only those without a block are returned
* @return a predicate that returns true for indices that need to be reindexed
*/
public static Predicate<Index> getReindexRequiredPredicate(Metadata metadata) {
public static Predicate<Index> getReindexRequiredPredicate(Metadata metadata, boolean filterToBlockedStatus) {
return index -> {
IndexMetadata indexMetadata = metadata.index(index);
return reindexRequired(indexMetadata);
return reindexRequired(indexMetadata, filterToBlockedStatus);
};
}

public static boolean reindexRequired(IndexMetadata indexMetadata) {
/**
* This method check if the indices that were created with a previous lucene version, meaning that they need to be reindexed
* in order to be writable in the _next_ lucene version. It excludes searchable snapshots as they are not writable.
*
* @param indexMetadata the index metadata
* @param filterToBlockedStatus if true, only indices that are write blocked will be returned,
* if false, only those without a block are returned
* @return a predicate that returns true for indices that need to be reindexed
*/
public static boolean reindexRequired(IndexMetadata indexMetadata, boolean filterToBlockedStatus) {
return creationVersionBeforeMinimumWritableVersion(indexMetadata)
&& isNotSearchableSnapshot(indexMetadata)
&& isNotClosed(indexMetadata)
&& isNotVerifiedReadOnly(indexMetadata);
}

private static boolean isNotVerifiedReadOnly(IndexMetadata indexMetadata) {
// no need to check blocks.
return MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.get(indexMetadata.getSettings()) == false;
&& matchBlockedStatus(indexMetadata, filterToBlockedStatus);
}

private static boolean isNotSearchableSnapshot(IndexMetadata indexMetadata) {
Expand All @@ -57,4 +66,7 @@ private static boolean isNotClosed(IndexMetadata indexMetadata) {
return indexMetadata.getState().equals(IndexMetadata.State.CLOSE) == false;
}

private static boolean matchBlockedStatus(IndexMetadata indexMetadata, boolean filterToBlockedStatus) {
return MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.get(indexMetadata.getSettings()) == filterToBlockedStatus;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,7 @@ public class DataStreamDeprecationChecks {
static DeprecationIssue oldIndicesCheck(DataStream dataStream, ClusterState clusterState) {
List<Index> backingIndices = dataStream.getIndices();

Set<String> indicesNeedingUpgrade = backingIndices.stream()
.filter(DeprecatedIndexPredicate.getReindexRequiredPredicate(clusterState.metadata()))
.map(Index::getName)
.collect(Collectors.toUnmodifiableSet());
Set<String> indicesNeedingUpgrade = getReindexRequiredIndices(backingIndices, clusterState, false);

if (indicesNeedingUpgrade.isEmpty() == false) {
return new DeprecationIssue(
Expand All @@ -47,4 +44,40 @@ static DeprecationIssue oldIndicesCheck(DataStream dataStream, ClusterState clus

return null;
}

static DeprecationIssue ignoredOldIndicesCheck(DataStream dataStream, ClusterState clusterState) {
List<Index> backingIndices = dataStream.getIndices();

Set<String> ignoredIndices = getReindexRequiredIndices(backingIndices, clusterState, true);

if (ignoredIndices.isEmpty() == false) {
return new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Old data stream with a compatibility version < 9.0 Have Been Ignored",
"https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html",
"This data stream has read only backing indices that were created before Elasticsearch 9.0.0 and have been marked as "
+ "OK to remain read-only after upgrade",
false,
ofEntries(
entry("reindex_required", true),
entry("total_backing_indices", backingIndices.size()),
entry("ignored_indices_requiring_upgrade_count", ignoredIndices.size()),
entry("ignored_indices_requiring_upgrade", ignoredIndices)
)
);
}

return null;
}

private static Set<String> getReindexRequiredIndices(
List<Index> backingIndices,
ClusterState clusterState,
boolean filterToBlockedStatus
) {
return backingIndices.stream()
.filter(DeprecatedIndexPredicate.getReindexRequiredPredicate(clusterState.metadata(), filterToBlockedStatus))
.map(Index::getName)
.collect(Collectors.toUnmodifiableSet());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ private DeprecationChecks() {}

static List<BiFunction<IndexMetadata, ClusterState, DeprecationIssue>> INDEX_SETTINGS_CHECKS = List.of(
IndexDeprecationChecks::oldIndicesCheck,
IndexDeprecationChecks::ignoredOldIndicesCheck,
IndexDeprecationChecks::translogRetentionSettingCheck,
IndexDeprecationChecks::checkIndexDataPath,
IndexDeprecationChecks::storeTypeSettingCheck,
Expand All @@ -102,7 +103,8 @@ private DeprecationChecks() {}
);

static List<BiFunction<DataStream, ClusterState, DeprecationIssue>> DATA_STREAM_CHECKS = List.of(
DataStreamDeprecationChecks::oldIndicesCheck
DataStreamDeprecationChecks::oldIndicesCheck,
DataStreamDeprecationChecks::ignoredOldIndicesCheck
);

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata, ClusterStat
// TODO: this check needs to be revised. It's trivially true right now.
IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion();
// We intentionally exclude indices that are in data streams because they will be picked up by DataStreamDeprecationChecks
if (DeprecatedIndexPredicate.reindexRequired(indexMetadata) && isNotDataStreamIndex(indexMetadata, clusterState)) {
if (DeprecatedIndexPredicate.reindexRequired(indexMetadata, false) && isNotDataStreamIndex(indexMetadata, clusterState)) {
return new DeprecationIssue(
DeprecationIssue.Level.CRITICAL,
"Old index with a compatibility version < 9.0",
Expand All @@ -49,6 +49,24 @@ static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata, ClusterStat
return null;
}

static DeprecationIssue ignoredOldIndicesCheck(IndexMetadata indexMetadata, ClusterState clusterState) {
IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion();
// We intentionally exclude indices that are in data streams because they will be picked up by DataStreamDeprecationChecks
if (DeprecatedIndexPredicate.reindexRequired(indexMetadata, true) && isNotDataStreamIndex(indexMetadata, clusterState)) {
return new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Old index with a compatibility version < 9.0 Has Been Ignored",
"https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html",
"This read-only index has version: "
+ currentCompatibilityVersion.toReleaseVersion()
+ " and will be supported as read-only in 9.0",
false,
Collections.singletonMap("reindex_required", true)
);
}
return null;
}

private static boolean isNotDataStreamIndex(IndexMetadata indexMetadata, ClusterState clusterState) {
return clusterState.metadata().findDataStreams(indexMetadata.getIndex().getName()).isEmpty();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import org.elasticsearch.cluster.metadata.DataStreamOptions;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.MetadataIndexStateService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
Expand Down Expand Up @@ -224,4 +225,75 @@ private Index createIndex(
nameToIndexMetadata.put(indexMetadata.getIndex().getName(), indexMetadata);
return indexMetadata.getIndex();
}

public void testOldIndicesIgnoredWarningCheck() {
int oldIndexCount = randomIntBetween(1, 100);
int newIndexCount = randomIntBetween(1, 100);

List<Index> allIndices = new ArrayList<>();
Map<String, IndexMetadata> nameToIndexMetadata = new HashMap<>();
Set<String> expectedIndices = new HashSet<>();

for (int i = 0; i < oldIndexCount; i++) {
Settings.Builder settings = settings(IndexVersion.fromId(7170099));

String indexName = "old-data-stream-index-" + i;
settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), true);
expectedIndices.add(indexName);

Settings.Builder settingsBuilder = settings;
IndexMetadata oldIndexMetadata = IndexMetadata.builder(indexName)
.settings(settingsBuilder)
.numberOfShards(1)
.numberOfReplicas(0)
.build();
allIndices.add(oldIndexMetadata.getIndex());
nameToIndexMetadata.put(oldIndexMetadata.getIndex().getName(), oldIndexMetadata);
}

for (int i = 0; i < newIndexCount; i++) {
Index newIndex = createNewIndex(i, false, nameToIndexMetadata);
allIndices.add(newIndex);
}

DataStream dataStream = new DataStream(
randomAlphaOfLength(10),
allIndices,
randomNegativeLong(),
Map.of(),
randomBoolean(),
false,
false,
randomBoolean(),
randomFrom(IndexMode.values()),
null,
randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null),
List.of(),
randomBoolean(),
null
);

Metadata metadata = Metadata.builder().indices(nameToIndexMetadata).build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build();

DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Old data stream with a compatibility version < 9.0 Have Been Ignored",
"https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html",
"This data stream has read only backing indices that were created before Elasticsearch 9.0.0 and have been marked as "
+ "OK to remain read-only after upgrade",
false,
ofEntries(
entry("reindex_required", true),
entry("total_backing_indices", oldIndexCount + newIndexCount),
entry("ignored_indices_requiring_upgrade_count", expectedIndices.size()),
entry("ignored_indices_requiring_upgrade", expectedIndices)
)
);

List<DeprecationIssue> issues = DeprecationChecks.filterChecks(DATA_STREAM_CHECKS, c -> c.apply(dataStream, clusterState));

assertThat(issues, equalTo(singletonList(expected)));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import org.elasticsearch.cluster.metadata.DataStreamOptions;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.MetadataIndexStateService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexMode;
Expand Down Expand Up @@ -132,6 +133,25 @@ public void testOldIndicesCheckClosedIgnored() {
assertThat(issues, empty());
}

public void testOldIndicesIgnoredWarningCheck() {
IndexVersion createdWith = IndexVersion.fromId(7170099);
Settings.Builder settings = settings(createdWith).put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), true);
IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build();
ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE)
.metadata(Metadata.builder().put(indexMetadata, true))
.build();
DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Old index with a compatibility version < 9.0 Has Been Ignored",
"https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html",
"This read-only index has version: " + createdWith.toReleaseVersion() + " and will be supported as read-only in 9.0",
false,
singletonMap("reindex_required", true)
);
List<DeprecationIssue> issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState));
assertEquals(singletonList(expected), issues);
}

public void testTranslogRetentionSettings() {
Settings.Builder settings = settings(IndexVersion.current());
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ protected void doExecute(
IndexMetadata sourceIndex = clusterService.state().getMetadata().index(sourceIndexName);
Settings settingsBefore = sourceIndex.getSettings();

var hasOldVersion = DeprecatedIndexPredicate.getReindexRequiredPredicate(clusterService.state().metadata());
var hasOldVersion = DeprecatedIndexPredicate.getReindexRequiredPredicate(clusterService.state().metadata(), false);
if (hasOldVersion.test(sourceIndex.getIndex()) == false) {
logger.warn(
"Migrating index [{}] with version [{}] is unnecessary as its version is not before [{}]",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ protected void doExecute(Task task, ReindexDataStreamRequest request, ActionList
return;
}
int totalIndices = dataStream.getIndices().size();
int totalIndicesToBeUpgraded = (int) dataStream.getIndices().stream().filter(getReindexRequiredPredicate(metadata)).count();
int totalIndicesToBeUpgraded = (int) dataStream.getIndices().stream().filter(getReindexRequiredPredicate(metadata, false)).count();
ReindexDataStreamTaskParams params = new ReindexDataStreamTaskParams(
sourceDataStreamName,
transportService.getThreadPool().absoluteTimeInMillis(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ protected void nodeOperation(
List<GetDataStreamAction.Response.DataStreamInfo> dataStreamInfos = response.getDataStreams();
if (dataStreamInfos.size() == 1) {
DataStream dataStream = dataStreamInfos.getFirst().getDataStream();
if (getReindexRequiredPredicate(clusterService.state().metadata()).test(dataStream.getWriteIndex())) {
if (getReindexRequiredPredicate(clusterService.state().metadata(), false).test(dataStream.getWriteIndex())) {
RolloverRequest rolloverRequest = new RolloverRequest(sourceDataStream, null);
rolloverRequest.setParentTask(taskId);
reindexClient.execute(
Expand Down Expand Up @@ -161,7 +161,9 @@ private void reindexIndices(
TaskId parentTaskId
) {
List<Index> indices = dataStream.getIndices();
List<Index> indicesToBeReindexed = indices.stream().filter(getReindexRequiredPredicate(clusterService.state().metadata())).toList();
List<Index> indicesToBeReindexed = indices.stream()
.filter(getReindexRequiredPredicate(clusterService.state().metadata(), false))
.toList();
final ReindexDataStreamPersistentTaskState updatedState;
if (params.totalIndices() != totalIndicesInDataStream
|| params.totalIndicesToBeUpgraded() != indicesToBeReindexed.size()
Expand Down