fix: harden apitest seeder and pk schedulers

This commit is contained in:
irving
2026-01-02 01:57:41 -05:00
parent 17a8c358a8
commit 6fbc28d6f2
4 changed files with 141 additions and 1 deletions

View File

@@ -144,6 +144,7 @@ class PkIntegrationTest extends AbstractApiTest {
if (stringRedisTemplate.getConnectionFactory() == null) {
return;
}
SecurityUtils.setTenantId(DEFAULT_TENANT);
stringRedisTemplate.getConnectionFactory().getConnection().flushDb();
}
@@ -792,6 +793,56 @@ class PkIntegrationTest extends AbstractApiTest {
assertThat(SecurityUtils.getTenantId()).isEqualTo(TENANT_ORIGIN);
}
@Test
@DisplayName("Start Scheduler 应处理多个租户的PK")
void startSchedulerShouldProcessMultipleTenants() {
LocalDateTime now = LocalDateTime.now();
String pkIdA = IdUtils.getUuid();
String pkIdB = IdUtils.getUuid();
String originalTenant = SecurityUtils.getTenantId();
PlayClerkPkEntity pkA = buildPk(pkIdA, newClerkId(), newClerkId(),
now.minusMinutes(MINUTES_BEFORE_START),
now.plusMinutes(MINUTES_AFTER_START),
ClerkPkEnum.TO_BE_STARTED.name(),
SETTLED_FALSE);
pkA.setTenantId(TENANT_A);
PlayClerkPkEntity pkB = buildPk(pkIdB, newClerkId(), newClerkId(),
now.minusMinutes(MINUTES_BEFORE_START),
now.plusMinutes(MINUTES_AFTER_START),
ClerkPkEnum.TO_BE_STARTED.name(),
SETTLED_FALSE);
pkB.setTenantId(TENANT_B);
SecurityUtils.setTenantId(TENANT_A);
clerkPkService.save(pkA);
SecurityUtils.setTenantId(TENANT_B);
clerkPkService.save(pkB);
long beginEpochSeconds = pkA.getPkBeginTime().toInstant().getEpochSecond();
stringRedisTemplate.opsForZSet().add(PkRedisKeyConstants.startScheduleKey(TENANT_A), pkIdA, beginEpochSeconds);
stringRedisTemplate.opsForZSet().add(PkRedisKeyConstants.startScheduleKey(TENANT_B), pkIdB, beginEpochSeconds);
SysTenantEntity tenantA = buildTenant(TENANT_A);
SysTenantEntity tenantB = buildTenant(TENANT_B);
try {
Mockito.doReturn(Arrays.asList(tenantA, tenantB))
.when(sysTenantServiceSpy).listAll();
startSchedulerJob.scanStartSchedule();
} finally {
Mockito.reset(sysTenantServiceSpy);
SecurityUtils.setTenantId(originalTenant);
}
SecurityUtils.setTenantId(TENANT_A);
PlayClerkPkEntity persistedA = clerkPkService.selectPlayClerkPkById(pkIdA);
assertThat(persistedA.getStatus()).isEqualTo(ClerkPkEnum.IN_PROGRESS.name());
SecurityUtils.setTenantId(TENANT_B);
PlayClerkPkEntity persistedB = clerkPkService.selectPlayClerkPkById(pkIdB);
assertThat(persistedB.getStatus()).isEqualTo(ClerkPkEnum.IN_PROGRESS.name());
}
@Test
@DisplayName("Start Scheduler 不应启动未到时间的PK")
void startSchedulerShouldSkipFuturePk() {
@@ -872,6 +923,58 @@ class PkIntegrationTest extends AbstractApiTest {
assertThat(stringRedisTemplate.opsForZSet().score(finishKey, pkId)).isNull();
}
@Test
@DisplayName("Finish Scheduler 应处理多个租户的PK")
void finishSchedulerShouldProcessMultipleTenants() {
LocalDateTime now = LocalDateTime.now();
String pkIdA = IdUtils.getUuid();
String pkIdB = IdUtils.getUuid();
String originalTenant = SecurityUtils.getTenantId();
PlayClerkPkEntity pkA = buildPk(pkIdA, newClerkId(), newClerkId(),
now.minusMinutes(MINUTES_BEFORE_START),
now.minusMinutes(MINUTES_AFTER_START),
ClerkPkEnum.IN_PROGRESS.name(),
SETTLED_FALSE);
pkA.setTenantId(TENANT_A);
PlayClerkPkEntity pkB = buildPk(pkIdB, newClerkId(), newClerkId(),
now.minusMinutes(MINUTES_BEFORE_START),
now.minusMinutes(MINUTES_AFTER_START),
ClerkPkEnum.IN_PROGRESS.name(),
SETTLED_FALSE);
pkB.setTenantId(TENANT_B);
SecurityUtils.setTenantId(TENANT_A);
clerkPkService.save(pkA);
SecurityUtils.setTenantId(TENANT_B);
clerkPkService.save(pkB);
long endEpochSeconds = pkA.getPkEndTime().toInstant().getEpochSecond();
stringRedisTemplate.opsForZSet().add(PkRedisKeyConstants.finishScheduleKey(TENANT_A), pkIdA, endEpochSeconds);
stringRedisTemplate.opsForZSet().add(PkRedisKeyConstants.finishScheduleKey(TENANT_B), pkIdB, endEpochSeconds);
SysTenantEntity tenantA = buildTenant(TENANT_A);
SysTenantEntity tenantB = buildTenant(TENANT_B);
try {
Mockito.doReturn(Arrays.asList(tenantA, tenantB))
.when(sysTenantServiceSpy).listAll();
finishSchedulerJob.scanFinishSchedule();
} finally {
Mockito.reset(sysTenantServiceSpy);
SecurityUtils.setTenantId(originalTenant);
}
SecurityUtils.setTenantId(TENANT_A);
PlayClerkPkEntity persistedA = clerkPkService.selectPlayClerkPkById(pkIdA);
assertThat(persistedA.getStatus()).isEqualTo(ClerkPkEnum.FINISHED.name());
assertThat(persistedA.getSettled()).isEqualTo(SETTLED_TRUE);
SecurityUtils.setTenantId(TENANT_B);
PlayClerkPkEntity persistedB = clerkPkService.selectPlayClerkPkById(pkIdB);
assertThat(persistedB.getStatus()).isEqualTo(ClerkPkEnum.FINISHED.name());
assertThat(persistedB.getSettled()).isEqualTo(SETTLED_TRUE);
}
@Test
@DisplayName("Finish Scheduler 应恢复租户上下文")
void finishSchedulerShouldRestoreTenantContext() {

View File

@@ -67,6 +67,7 @@ class WxPkApiTest extends AbstractApiTest {
private static final int HISTORY_PAGE_NUM = 1;
private static final int HISTORY_PAGE_SIZE = 10;
private static final long REMAINING_SECONDS_MIN = 1L;
private static final long TIME_SYNC_TOLERANCE_SECONDS = 2L;
private static final int SETTLED_FALSE = 0;
private static final int SETTLED_TRUE = 1;
private static final double ZSET_SCORE_PAST = 1D;
@@ -146,6 +147,7 @@ class WxPkApiTest extends AbstractApiTest {
assertThat(data.get("remainingSeconds").asLong()).isGreaterThanOrEqualTo(REMAINING_SECONDS_MIN);
assertThat(data.get("serverEpochSeconds").asLong()).isGreaterThanOrEqualTo(REMAINING_SECONDS_MIN);
assertThat(data.get("pkEndEpochSeconds").asLong()).isGreaterThanOrEqualTo(REMAINING_SECONDS_MIN);
assertTimeSync(data);
}
@Test
@@ -639,4 +641,16 @@ class WxPkApiTest extends AbstractApiTest {
JsonNode root = OBJECT_MAPPER.readTree(result.getResponse().getContentAsString());
return root.get("data");
}
private static void assertTimeSync(JsonNode data) {
assertThat(data.get("remainingSeconds")).isNotNull();
assertThat(data.get("serverEpochSeconds")).isNotNull();
assertThat(data.get("pkEndEpochSeconds")).isNotNull();
long remainingSeconds = data.get("remainingSeconds").asLong();
long serverEpochSeconds = data.get("serverEpochSeconds").asLong();
long pkEndEpochSeconds = data.get("pkEndEpochSeconds").asLong();
long deltaSeconds = pkEndEpochSeconds - serverEpochSeconds;
long drift = Math.abs(deltaSeconds - remainingSeconds);
assertThat(drift).isLessThanOrEqualTo(TIME_SYNC_TOLERANCE_SECONDS);
}
}