style: apply ruff formatting fixes to test files

Apply ruff-format fixes to 6 test files to pass pre-commit checks:
- test_dimension_mismatch.py
- test_e2e_multi_instance.py
- test_no_model_suffix_safety.py
- test_postgres_migration.py
- test_unified_lock_safety.py
- test_workspace_migration_isolation.py

Changes are primarily assert statement reformatting to match ruff style guide.
This commit is contained in:
BukeLy 2025-11-23 16:59:02 +08:00
parent 510baebf62
commit 3b8a1e64b7
6 changed files with 100 additions and 102 deletions

View file

@ -171,9 +171,9 @@ class TestPostgresDimensionMismatch:
for call in db.execute.call_args_list
if call[0][0] and "INSERT INTO" in call[0][0]
]
assert len(insert_calls) == 0, (
"Migration should be skipped due to dimension mismatch"
)
assert (
len(insert_calls) == 0
), "Migration should be skipped due to dimension mismatch"
@pytest.mark.asyncio
async def test_postgres_dimension_mismatch_skip_migration_sampling(self):
@ -311,6 +311,6 @@ class TestPostgresDimensionMismatch:
for call in db.execute.call_args_list
if call[0][0] and "INSERT INTO" in call[0][0]
]
assert len(insert_calls) > 0, (
"Migration should proceed with matching dimensions"
)
assert (
len(insert_calls) > 0
), "Migration should proceed with matching dimensions"

View file

@ -295,9 +295,9 @@ async def test_legacy_migration_postgres(
)
new_count = new_count_result.get("count", 0)
assert new_count == legacy_count, (
f"Expected {legacy_count} records migrated, got {new_count}"
)
assert (
new_count == legacy_count
), f"Expected {legacy_count} records migrated, got {new_count}"
print(f"✅ Migration successful: {new_count}/{legacy_count} records migrated")
print(f"✅ New table: {new_table}")
@ -312,9 +312,9 @@ async def test_legacy_migration_postgres(
check_legacy_query, [legacy_table.lower()]
)
legacy_exists = legacy_result.get("exists", True)
assert not legacy_exists, (
f"Legacy table '{legacy_table}' should be deleted after successful migration"
)
assert (
not legacy_exists
), f"Legacy table '{legacy_table}' should be deleted after successful migration"
print(f"✅ Legacy table '{legacy_table}' automatically deleted after migration")
await rag.finalize_storages()
@ -425,18 +425,18 @@ async def test_workspace_migration_isolation_e2e_postgres(
["workspace_a"],
)
workspace_a_count = workspace_a_count_result.get("count", 0)
assert workspace_a_count == 3, (
f"Expected 3 workspace_a records, got {workspace_a_count}"
)
assert (
workspace_a_count == 3
), f"Expected 3 workspace_a records, got {workspace_a_count}"
workspace_b_count_result = await pg_cleanup.query(
f"SELECT COUNT(*) as count FROM {legacy_table} WHERE workspace=$1",
["workspace_b"],
)
workspace_b_count = workspace_b_count_result.get("count", 0)
assert workspace_b_count == 3, (
f"Expected 3 workspace_b records, got {workspace_b_count}"
)
assert (
workspace_b_count == 3
), f"Expected 3 workspace_b records, got {workspace_b_count}"
print(
f"✅ Legacy table created: {total_count} records (workspace_a: {workspace_a_count}, workspace_b: {workspace_b_count})"
@ -484,9 +484,9 @@ async def test_workspace_migration_isolation_e2e_postgres(
["workspace_a"],
)
new_workspace_a_count = new_workspace_a_result.get("count", 0)
assert new_workspace_a_count == 3, (
f"Expected 3 workspace_a records in new table, got {new_workspace_a_count}"
)
assert (
new_workspace_a_count == 3
), f"Expected 3 workspace_a records in new table, got {new_workspace_a_count}"
print(
f"✅ Migration successful: {new_workspace_a_count} workspace_a records migrated"
)
@ -497,9 +497,9 @@ async def test_workspace_migration_isolation_e2e_postgres(
["workspace_b"],
)
new_workspace_b_count = new_workspace_b_result.get("count", 0)
assert new_workspace_b_count == 0, (
f"workspace_b data leaked! Found {new_workspace_b_count} records in new table"
)
assert (
new_workspace_b_count == 0
), f"workspace_b data leaked! Found {new_workspace_b_count} records in new table"
print("✅ No data leakage: 0 workspace_b records in new table (isolated)")
# Verify: LEGACY table still exists (because workspace_b data remains)
@ -513,9 +513,9 @@ async def test_workspace_migration_isolation_e2e_postgres(
check_legacy_query, [legacy_table.lower()]
)
legacy_exists = legacy_result.get("exists", False)
assert legacy_exists, (
f"Legacy table '{legacy_table}' should still exist (has workspace_b data)"
)
assert (
legacy_exists
), f"Legacy table '{legacy_table}' should still exist (has workspace_b data)"
# Verify: LEGACY table still has workspace_b data (3 records)
legacy_workspace_b_result = await pg_cleanup.query(
@ -523,9 +523,9 @@ async def test_workspace_migration_isolation_e2e_postgres(
["workspace_b"],
)
legacy_workspace_b_count = legacy_workspace_b_result.get("count", 0)
assert legacy_workspace_b_count == 3, (
f"workspace_b data lost! Only {legacy_workspace_b_count} remain in legacy table"
)
assert (
legacy_workspace_b_count == 3
), f"workspace_b data lost! Only {legacy_workspace_b_count} remain in legacy table"
print(
f"✅ Legacy table preserved: {legacy_workspace_b_count} workspace_b records remain (not migrated)"
)
@ -536,9 +536,9 @@ async def test_workspace_migration_isolation_e2e_postgres(
["workspace_a"],
)
legacy_workspace_a_count = legacy_workspace_a_result.get("count", 0)
assert legacy_workspace_a_count == 0, (
f"workspace_a data should be removed from legacy after migration, found {legacy_workspace_a_count}"
)
assert (
legacy_workspace_a_count == 0
), f"workspace_a data should be removed from legacy after migration, found {legacy_workspace_a_count}"
print(
"✅ Legacy cleanup verified: 0 workspace_a records in legacy (cleaned after migration)"
)
@ -653,32 +653,30 @@ async def test_legacy_migration_qdrant(
assert "text_embedding_ada_002_1536d" in new_collection
# Verify new collection exists
assert qdrant_cleanup.collection_exists(new_collection), (
f"New collection {new_collection} should exist"
)
assert qdrant_cleanup.collection_exists(
new_collection
), f"New collection {new_collection} should exist"
new_count = qdrant_cleanup.count(new_collection).count
assert new_count == legacy_count, (
f"Expected {legacy_count} vectors migrated, got {new_count}"
)
assert (
new_count == legacy_count
), f"Expected {legacy_count} vectors migrated, got {new_count}"
print(f"✅ Migration successful: {new_count}/{legacy_count} vectors migrated")
print(f"✅ New collection: {new_collection}")
# Verify vector dimension
collection_info = qdrant_cleanup.get_collection(new_collection)
assert collection_info.config.params.vectors.size == 1536, (
"Migrated collection should have 1536 dimensions"
)
assert (
collection_info.config.params.vectors.size == 1536
), "Migrated collection should have 1536 dimensions"
print(
f"✅ Vector dimension verified: {collection_info.config.params.vectors.size}d"
)
# Verify legacy collection was automatically deleted after migration (Case 4)
legacy_exists = qdrant_cleanup.collection_exists(legacy_collection)
assert not legacy_exists, (
f"Legacy collection '{legacy_collection}' should be deleted after successful migration"
)
assert not legacy_exists, f"Legacy collection '{legacy_collection}' should be deleted after successful migration"
print(
f"✅ Legacy collection '{legacy_collection}' automatically deleted after migration"
)
@ -894,12 +892,12 @@ async def test_multi_instance_qdrant(
print(f"✅ Collection isolation verified: {collection_a} != {collection_b}")
# Verify both collections exist in Qdrant
assert qdrant_cleanup.collection_exists(collection_a), (
f"Collection {collection_a} should exist"
)
assert qdrant_cleanup.collection_exists(collection_b), (
f"Collection {collection_b} should exist"
)
assert qdrant_cleanup.collection_exists(
collection_a
), f"Collection {collection_a} should exist"
assert qdrant_cleanup.collection_exists(
collection_b
), f"Collection {collection_b} should exist"
print("✅ Both collections exist in Qdrant")
# Verify vector dimensions
@ -907,9 +905,9 @@ async def test_multi_instance_qdrant(
info_b = qdrant_cleanup.get_collection(collection_b)
assert info_a.config.params.vectors.size == 768, "Model A should use 768 dimensions"
assert info_b.config.params.vectors.size == 1024, (
"Model B should use 1024 dimensions"
)
assert (
info_b.config.params.vectors.size == 1024
), "Model B should use 1024 dimensions"
print(
f"✅ Vector dimensions verified: {info_a.config.params.vectors.size}d vs {info_b.config.params.vectors.size}d"
)
@ -1451,9 +1449,9 @@ async def test_dimension_mismatch_postgres(
# 2. Legacy table should be preserved (not deleted)
check_legacy = f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '{legacy_table}')"
legacy_exists = await pg_cleanup.query(check_legacy, [])
assert legacy_exists.get("exists") is True, (
"Legacy table should be preserved when dimensions don't match"
)
assert (
legacy_exists.get("exists") is True
), "Legacy table should be preserved when dimensions don't match"
print(f"✅ Legacy table preserved: {legacy_table}")
# 3. Legacy table should still have original data (not migrated)
@ -1461,9 +1459,9 @@ async def test_dimension_mismatch_postgres(
f"SELECT COUNT(*) as count FROM {legacy_table}", []
)
legacy_count = legacy_count_result.get("count", 0)
assert legacy_count == 3, (
f"Legacy table should still have 3 records, got {legacy_count}"
)
assert (
legacy_count == 3
), f"Legacy table should still have 3 records, got {legacy_count}"
print(f"✅ Legacy data preserved: {legacy_count} records")
# 4. New table should be empty (migration skipped)
@ -1471,9 +1469,9 @@ async def test_dimension_mismatch_postgres(
f"SELECT COUNT(*) as count FROM {new_table}", []
)
new_count = new_count_result.get("count", 0)
assert new_count == 0, (
f"New table should be empty (migration skipped), got {new_count}"
)
assert (
new_count == 0
), f"New table should be empty (migration skipped), got {new_count}"
print(
f"✅ New table is empty (migration correctly skipped): {new_count} records"
)
@ -1586,30 +1584,30 @@ async def test_dimension_mismatch_qdrant(
# 1. New collection should exist with model suffix
assert "bge_large_1024d" in new_collection
assert client.collection_exists(new_collection), (
f"New collection {new_collection} should exist"
)
assert client.collection_exists(
new_collection
), f"New collection {new_collection} should exist"
print(f"✅ New collection created: {new_collection}")
# 2. Legacy collection should be preserved (not deleted)
legacy_exists = client.collection_exists(legacy_collection)
assert legacy_exists, (
"Legacy collection should be preserved when dimensions don't match"
)
assert (
legacy_exists
), "Legacy collection should be preserved when dimensions don't match"
print(f"✅ Legacy collection preserved: {legacy_collection}")
# 3. Legacy collection should still have original data (not migrated)
legacy_count = client.count(legacy_collection).count
assert legacy_count == 3, (
f"Legacy collection should still have 3 vectors, got {legacy_count}"
)
assert (
legacy_count == 3
), f"Legacy collection should still have 3 vectors, got {legacy_count}"
print(f"✅ Legacy data preserved: {legacy_count} vectors")
# 4. New collection should be empty (migration skipped)
new_count = client.count(new_collection).count
assert new_count == 0, (
f"New collection should be empty (migration skipped), got {new_count}"
)
assert (
new_count == 0
), f"New collection should be empty (migration skipped), got {new_count}"
print(
f"✅ New collection is empty (migration correctly skipped): {new_count} vectors"
)

View file

@ -110,9 +110,9 @@ class TestNoModelSuffixSafety:
for call in db.execute.call_args_list
if call[0][0] and "DROP TABLE" in call[0][0]
]
assert len(drop_calls) == 0, (
"Should not drop table when new and legacy are the same"
)
assert (
len(drop_calls) == 0
), "Should not drop table when new and legacy are the same"
# Also should not try to count (we returned early)
count_calls = [
@ -120,9 +120,9 @@ class TestNoModelSuffixSafety:
for call in db.query.call_args_list
if call[0][0] and "COUNT(*)" in call[0][0]
]
assert len(count_calls) == 0, (
"Should not check count when new and legacy are the same"
)
assert (
len(count_calls) == 0
), "Should not check count when new and legacy are the same"
def test_qdrant_with_suffix_case1_still_works(self):
"""

View file

@ -364,14 +364,14 @@ async def test_scenario_2_legacy_upgrade_migration(
for call in mock_pg_db.execute.call_args_list
if call[0][0] and "DROP TABLE" in call[0][0]
]
assert len(delete_calls) >= 1, (
"Legacy table should be deleted after successful migration"
)
assert (
len(delete_calls) >= 1
), "Legacy table should be deleted after successful migration"
# Check if legacy table was dropped
dropped_table = storage.legacy_table_name
assert any(dropped_table in str(call) for call in delete_calls), (
f"Expected to drop '{dropped_table}'"
)
assert any(
dropped_table in str(call) for call in delete_calls
), f"Expected to drop '{dropped_table}'"
@pytest.mark.asyncio
@ -504,9 +504,9 @@ async def test_case1_empty_legacy_auto_cleanup(
assert len(delete_calls) >= 1, "Empty legacy table should be auto-deleted"
# Check if legacy table was dropped
dropped_table = storage.legacy_table_name
assert any(dropped_table in str(call) for call in delete_calls), (
f"Expected to drop empty legacy table '{dropped_table}'"
)
assert any(
dropped_table in str(call) for call in delete_calls
), f"Expected to drop empty legacy table '{dropped_table}'"
print(
f"✅ Case 1a: Empty legacy table '{dropped_table}' auto-deleted successfully"

View file

@ -138,9 +138,9 @@ class TestUnifiedLockSafety:
assert "Async lock release failed" in str(e)
# Verify async_lock.release() was called only ONCE, not twice
assert release_call_count == 1, (
f"async_lock.release() should be called only once, but was called {release_call_count} times"
)
assert (
release_call_count == 1
), f"async_lock.release() should be called only once, but was called {release_call_count} times"
# Main lock should have been released successfully
main_lock.release.assert_called_once()

View file

@ -124,9 +124,9 @@ class TestWorkspaceMigrationIsolation:
and "WHERE workspace" in call[0][0]
]
assert len(count_calls) > 0, "Count query should use workspace filter"
assert count_calls[0][0][1][0] == "workspace_a", (
"Count should filter by workspace_a"
)
assert (
count_calls[0][0][1][0] == "workspace_a"
), "Count should filter by workspace_a"
select_calls = [
call
@ -136,9 +136,9 @@ class TestWorkspaceMigrationIsolation:
and "WHERE workspace" in call[0][0]
]
assert len(select_calls) > 0, "Select query should use workspace filter"
assert select_calls[0][0][1][0] == "workspace_a", (
"Select should filter by workspace_a"
)
assert (
select_calls[0][0][1][0] == "workspace_a"
), "Select should filter by workspace_a"
# Verify INSERT was called (migration happened)
insert_calls = [
@ -224,9 +224,9 @@ class TestWorkspaceMigrationIsolation:
has_workspace_filter = any(
"WHERE workspace" in call[0][0] for call in count_calls
)
assert not has_workspace_filter, (
"Count should NOT filter by workspace when workspace=None"
)
assert (
not has_workspace_filter
), "Count should NOT filter by workspace when workspace=None"
@pytest.mark.asyncio
async def test_no_cross_workspace_contamination(self):