diff --git a/src/meta/MetaVersionMan.cpp b/src/meta/MetaVersionMan.cpp index 82bc7a8c842..258da5fc523 100644 --- a/src/meta/MetaVersionMan.cpp +++ b/src/meta/MetaVersionMan.cpp @@ -377,8 +377,8 @@ Status MetaVersionMan::doUpgradeV2ToV3(kvstore::KVEngine* engine) { std::unique_ptr zoneIter; auto code = engine->prefix(zonePrefix, &zoneIter); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get active hosts failed"; - return Status::Error("Get hosts failed"); + LOG(ERROR) << "Get zones failed"; + return Status::Error("Get zones failed"); } while (zoneIter->valid()) { diff --git a/src/meta/processors/zone/DivideZoneProcessor.cpp b/src/meta/processors/zone/DivideZoneProcessor.cpp index 6b43915a5ad..ec1084f81b5 100644 --- a/src/meta/processors/zone/DivideZoneProcessor.cpp +++ b/src/meta/processors/zone/DivideZoneProcessor.cpp @@ -14,6 +14,7 @@ void DivideZoneProcessor::process(const cpp2::DivideZoneReq& req) { auto zoneName = req.get_zone_name(); auto zoneKey = MetaKeyUtils::zoneKey(zoneName); auto zoneValueRet = doGet(zoneKey); + // Check the source zone exist or not if (!nebula::ok(zoneValueRet)) { LOG(ERROR) << "Zone " << zoneName << " not existed error: " << apache::thrift::util::enumNameSafe(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND); @@ -24,6 +25,23 @@ void DivideZoneProcessor::process(const cpp2::DivideZoneReq& req) { auto& zoneItems = req.get_zone_items(); auto zoneHosts = MetaKeyUtils::parseZoneHosts(std::move(nebula::value(zoneValueRet))); + + // if the source zone have only one host, it should not be split + if (zoneHosts.size() == 1) { + LOG(ERROR) << "Only one host is no need to split"; + handleErrorCode(nebula::cpp2::ErrorCode::E_INVALID_PARM); + onFinished(); + return; + } + + // if the target zone list hold only one item, it should not be split + if (zoneItems.size() == 1) { + LOG(ERROR) << "Only one zone is no need to split"; + handleErrorCode(nebula::cpp2::ErrorCode::E_INVALID_PARM); + onFinished(); + return; + } + if (zoneItems.size() > zoneHosts.size()) { LOG(ERROR) << "Zone Item should not greater than hosts size"; handleErrorCode(nebula::cpp2::ErrorCode::E_INVALID_PARM); @@ -35,12 +53,14 @@ void DivideZoneProcessor::process(const cpp2::DivideZoneReq& req) { std::unordered_set totalHosts; size_t totalHostsSize = 0; auto batchHolder = std::make_unique(); + // Remove original zone + batchHolder->remove(std::move(zoneKey)); nebula::cpp2::ErrorCode code = nebula::cpp2::ErrorCode::SUCCEEDED; for (auto iter = zoneItems.begin(); iter != zoneItems.end(); iter++) { auto zone = iter->first; auto hosts = iter->second; auto valueRet = doGet(MetaKeyUtils::zoneKey(zone)); - if (nebula::ok(valueRet)) { + if (nebula::ok(valueRet) && zone != zoneName) { LOG(ERROR) << "Zone " << zone << " have existed"; code = nebula::cpp2::ErrorCode::E_EXISTED; break; @@ -48,9 +68,9 @@ void DivideZoneProcessor::process(const cpp2::DivideZoneReq& req) { auto it = std::find(zoneNames.begin(), zoneNames.end(), zone); if (it == zoneNames.end()) { - LOG(ERROR) << "Zone have duplicated name"; zoneNames.emplace_back(zone); } else { + LOG(ERROR) << "Zone have duplicated name " << zone; code = nebula::cpp2::ErrorCode::E_INVALID_PARM; break; } @@ -109,8 +129,6 @@ void DivideZoneProcessor::process(const cpp2::DivideZoneReq& req) { return; } - // Remove original zone - batchHolder->remove(std::move(zoneKey)); code = updateSpacesZone(batchHolder.get(), zoneName, zoneNames); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(code); diff --git a/src/meta/test/MetaClientTest.cpp b/src/meta/test/MetaClientTest.cpp index 3c3ff5aa3ce..d5c628100ba 100644 --- a/src/meta/test/MetaClientTest.cpp +++ b/src/meta/test/MetaClientTest.cpp @@ -2606,9 +2606,10 @@ TEST(MetaClientTest, DivideZoneTest) { } { std::unordered_map> zoneItems; - std::vector oneHosts = {{"127.0.0.1", 8976}, {"127.0.0.1", 8977}}; + std::vector oneHosts = {{"127.0.0.1", 8976}}; zoneItems.emplace("one_zone_1", std::move(oneHosts)); - std::vector anotherHosts = {{"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + std::vector anotherHosts = { + {"127.0.0.1", 8977}, {"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; zoneItems.emplace("another_zone_1", std::move(anotherHosts)); auto result = client->divideZone("default_zone", std::move(zoneItems)).get(); EXPECT_TRUE(result.ok()); @@ -2639,6 +2640,51 @@ TEST(MetaClientTest, DivideZoneTest) { ASSERT_EQ("one_zone_1", zones[2]); ASSERT_EQ("another_zone_1", zones[3]); } + { + std::unordered_map> zoneItems; + std::vector oneHosts = {{"127.0.0.1", 8976}}; + zoneItems.emplace("one_zone_1_1", std::move(oneHosts)); + std::vector anotherHosts = {{"127.0.0.1", 8976}}; + zoneItems.emplace("one_zone_1_2", std::move(anotherHosts)); + auto result = client->divideZone("one_zone_1", std::move(zoneItems)).get(); + EXPECT_FALSE(result.ok()); + } + { + std::unordered_map> zoneItems; + std::vector hosts = {{"127.0.0.1", 8977}, {"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + zoneItems.emplace("another_zone_1_1", std::move(hosts)); + auto result = client->divideZone("another_zone_1", std::move(zoneItems)).get(); + EXPECT_FALSE(result.ok()); + } + { + std::unordered_map> zoneItems; + std::vector oneHosts = {{"127.0.0.1", 8977}}; + zoneItems.emplace("another_zone_1", std::move(oneHosts)); + std::vector anotherHosts = {{"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + zoneItems.emplace("another_zone_1_1", std::move(anotherHosts)); + auto result = client->divideZone("another_zone_1", std::move(zoneItems)).get(); + EXPECT_TRUE(result.ok()); + } + { + auto result = client->listZones().get(); + ASSERT_TRUE(result.ok()); + auto zones = result.value(); + ASSERT_EQ(5, zones.size()); + ASSERT_EQ("another_zone", zones[0].get_zone_name()); + ASSERT_EQ("another_zone_1", zones[1].get_zone_name()); + ASSERT_EQ("another_zone_1_1", zones[2].get_zone_name()); + ASSERT_EQ("one_zone", zones[3].get_zone_name()); + ASSERT_EQ("one_zone_1", zones[4].get_zone_name()); + } + { + std::unordered_map> zoneItems; + std::vector oneHosts = {{"127.0.0.1", 8977}}; + zoneItems.emplace("another_zone_1", std::move(oneHosts)); + std::vector anotherHosts = {{"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + zoneItems.emplace("another_zone_1", std::move(anotherHosts)); + auto result = client->divideZone("another_zone_1", std::move(zoneItems)).get(); + EXPECT_FALSE(result.ok()); + } cluster.stop(); } diff --git a/src/meta/test/ProcessorTest.cpp b/src/meta/test/ProcessorTest.cpp index 58bce373c42..c6028f01801 100644 --- a/src/meta/test/ProcessorTest.cpp +++ b/src/meta/test/ProcessorTest.cpp @@ -4192,7 +4192,9 @@ TEST(ProcessorTest, DivideZoneTest) { ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); ASSERT_EQ(2, resp.get_zones().size()); ASSERT_EQ("another_zone", resp.get_zones()[0].get_zone_name()); + ASSERT_EQ(2, resp.get_zones()[0].get_nodes().size()); ASSERT_EQ("one_zone", resp.get_zones()[1].get_zone_name()); + ASSERT_EQ(2, resp.get_zones()[1].get_nodes().size()); } { cpp2::AddHostsIntoZoneReq req; @@ -4275,9 +4277,10 @@ TEST(ProcessorTest, DivideZoneTest) { cpp2::DivideZoneReq req; req.zone_name_ref() = "default_zone"; std::unordered_map> zoneItems; - std::vector oneHosts = {{"127.0.0.1", 8976}, {"127.0.0.1", 8977}}; + std::vector oneHosts = {{"127.0.0.1", 8976}}; zoneItems.emplace("one_zone_1", std::move(oneHosts)); - std::vector anotherHosts = {{"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + std::vector anotherHosts = { + {"127.0.0.1", 8977}, {"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; zoneItems.emplace("another_zone_1", std::move(anotherHosts)); req.zone_items_ref() = std::move(zoneItems); auto* processor = DivideZoneProcessor::instance(kv.get()); @@ -4295,9 +4298,85 @@ TEST(ProcessorTest, DivideZoneTest) { ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); ASSERT_EQ(4, resp.get_zones().size()); ASSERT_EQ("another_zone", resp.get_zones()[0].get_zone_name()); + ASSERT_EQ(2, resp.get_zones()[0].get_nodes().size()); ASSERT_EQ("another_zone_1", resp.get_zones()[1].get_zone_name()); + ASSERT_EQ(3, resp.get_zones()[1].get_nodes().size()); ASSERT_EQ("one_zone", resp.get_zones()[2].get_zone_name()); + ASSERT_EQ(2, resp.get_zones()[2].get_nodes().size()); ASSERT_EQ("one_zone_1", resp.get_zones()[3].get_zone_name()); + ASSERT_EQ(1, resp.get_zones()[3].get_nodes().size()); + } + { + cpp2::DivideZoneReq req; + req.zone_name_ref() = "one_zone_1"; + std::unordered_map> zoneItems; + std::vector oneHosts = {{"127.0.0.1", 8976}}; + zoneItems.emplace("one_zone_1_1", std::move(oneHosts)); + std::vector anotherHosts = {{"127.0.0.1", 8976}}; + zoneItems.emplace("one_zone_1_2", std::move(anotherHosts)); + req.zone_items_ref() = std::move(zoneItems); + auto* processor = DivideZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); + } + { + cpp2::DivideZoneReq req; + req.zone_name_ref() = "another_zone_1"; + std::unordered_map> zoneItems; + std::vector hosts = {{"127.0.0.1", 8977}, {"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + zoneItems.emplace("another_zone_1_1", std::move(hosts)); + req.zone_items_ref() = std::move(zoneItems); + auto* processor = DivideZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); + } + { + cpp2::DivideZoneReq req; + req.zone_name_ref() = "another_zone_1"; + std::unordered_map> zoneItems; + std::vector oneHosts = {{"127.0.0.1", 8977}}; + zoneItems.emplace("another_zone_1", std::move(oneHosts)); + std::vector anotherHosts = {{"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + zoneItems.emplace("another_zone_1_1", std::move(anotherHosts)); + req.zone_items_ref() = std::move(zoneItems); + auto* processor = DivideZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); + } + { + cpp2::ListZonesReq req; + auto* processor = ListZonesProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); + ASSERT_EQ(5, resp.get_zones().size()); + ASSERT_EQ("another_zone", resp.get_zones()[0].get_zone_name()); + ASSERT_EQ("another_zone_1", resp.get_zones()[1].get_zone_name()); + ASSERT_EQ("another_zone_1_1", resp.get_zones()[2].get_zone_name()); + ASSERT_EQ("one_zone", resp.get_zones()[3].get_zone_name()); + ASSERT_EQ("one_zone_1", resp.get_zones()[4].get_zone_name()); + } + { + cpp2::DivideZoneReq req; + req.zone_name_ref() = "another_zone_1"; + std::unordered_map> zoneItems; + std::vector oneHosts = {{"127.0.0.1", 8977}}; + zoneItems.emplace("another_zone_1", std::move(oneHosts)); + std::vector anotherHosts = {{"127.0.0.1", 8978}, {"127.0.0.1", 8979}}; + zoneItems.emplace("another_zone_1", std::move(anotherHosts)); + req.zone_items_ref() = std::move(zoneItems); + auto* processor = DivideZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); } }