feat: Enhance BinaryANNIndex with vectorized search and performance benchmarking

This commit is contained in:
catlog22
2026-01-02 11:49:54 +08:00
parent da68ba0b82
commit 9129c981a4
4 changed files with 479 additions and 140 deletions

View File

@@ -0,0 +1,209 @@
#!/usr/bin/env python
"""Micro-benchmark for BinaryANNIndex search performance.
Measures the actual speedup of vectorized Hamming distance computation.
"""
from __future__ import annotations
import gc
import statistics
import sys
import time
from pathlib import Path
# Add src to path
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
import numpy as np
def old_search_implementation(query_arr: np.ndarray, vectors: dict, id_list: list, top_k: int):
"""Original O(N) loop-based implementation for comparison."""
packed_dim = len(query_arr)
distances = []
for vec_id in id_list:
vec = vectors[vec_id]
vec_arr = np.frombuffer(vec, dtype=np.uint8)
xor = np.bitwise_xor(query_arr, vec_arr)
dist = int(np.unpackbits(xor).sum())
distances.append((vec_id, dist))
distances.sort(key=lambda x: x[1])
top_results = distances[:top_k]
ids = [r[0] for r in top_results]
dists = [r[1] for r in top_results]
return ids, dists
def new_search_implementation(query_arr: np.ndarray, vectors_matrix: np.ndarray, ids_array: np.ndarray, top_k: int):
"""Optimized vectorized implementation."""
# Broadcast XOR
xor_result = np.bitwise_xor(query_arr, vectors_matrix)
# Vectorized popcount using lookup table
popcount_lut = np.array([bin(i).count('1') for i in range(256)], dtype=np.uint8)
bit_counts = popcount_lut[xor_result]
# Sum across packed bytes
distances = bit_counts.sum(axis=1)
# Get top-k using argpartition
n_vectors = len(distances)
k = min(top_k, n_vectors)
if k == n_vectors:
sorted_indices = np.argsort(distances)
else:
partition_indices = np.argpartition(distances, k)[:k]
top_k_distances = distances[partition_indices]
sorted_order = np.argsort(top_k_distances)
sorted_indices = partition_indices[sorted_order]
result_ids = ids_array[sorted_indices].tolist()
result_dists = distances[sorted_indices].tolist()
return result_ids, result_dists
def run_benchmark(n_vectors: int, dim: int = 256, top_k: int = 100, n_iterations: int = 50):
"""Run benchmark comparing old and new implementations."""
packed_dim = dim // 8 # 32 bytes for 256-bit
print(f"\n{'='*60}")
print(f"Binary Search Micro-Benchmark")
print(f"{'='*60}")
print(f"Vectors: {n_vectors}")
print(f"Dimension: {dim} bits ({packed_dim} bytes packed)")
print(f"Top-K: {top_k}")
print(f"Iterations: {n_iterations}")
print(f"{'='*60}\n")
# Generate random binary vectors
print("Generating test data...")
vectors_dict = {}
id_list = []
for i in range(n_vectors):
vec_bytes = np.random.randint(0, 256, size=packed_dim, dtype=np.uint8).tobytes()
vectors_dict[i] = vec_bytes
id_list.append(i)
# Build matrix for vectorized search
vectors_matrix = np.empty((n_vectors, packed_dim), dtype=np.uint8)
ids_array = np.array(id_list, dtype=np.int64)
for i, vec_id in enumerate(id_list):
vec_bytes = vectors_dict[vec_id]
vectors_matrix[i] = np.frombuffer(vec_bytes, dtype=np.uint8)
# Generate random query
query_bytes = np.random.randint(0, 256, size=packed_dim, dtype=np.uint8).tobytes()
query_arr = np.frombuffer(query_bytes, dtype=np.uint8)
# Warmup
print("Running warmup...")
for _ in range(3):
old_search_implementation(query_arr, vectors_dict, id_list, top_k)
new_search_implementation(query_arr, vectors_matrix, ids_array, top_k)
# Benchmark old implementation
print("Benchmarking old implementation...")
old_times = []
for _ in range(n_iterations):
gc.collect()
start = time.perf_counter()
old_ids, old_dists = old_search_implementation(query_arr, vectors_dict, id_list, top_k)
elapsed = (time.perf_counter() - start) * 1000
old_times.append(elapsed)
# Benchmark new implementation
print("Benchmarking new implementation...")
new_times = []
for _ in range(n_iterations):
gc.collect()
start = time.perf_counter()
new_ids, new_dists = new_search_implementation(query_arr, vectors_matrix, ids_array, top_k)
elapsed = (time.perf_counter() - start) * 1000
new_times.append(elapsed)
# Verify correctness
print("\nVerifying correctness...")
# Check that distances are correct (IDs may differ for ties)
if old_dists == new_dists:
print("Distances match! (IDs may differ for ties)")
else:
# Check if difference is just in tie-breaking
old_dist_set = set(old_dists)
new_dist_set = set(new_dists)
if old_dist_set == new_dist_set:
print("Distances equivalent (tie-breaking differs, which is acceptable)")
else:
print("WARNING: Distance distributions differ!")
print(f" Old dists (first 5): {old_dists[:5]}")
print(f" New dists (first 5): {new_dists[:5]}")
# Calculate statistics
old_avg = statistics.mean(old_times)
old_std = statistics.stdev(old_times) if len(old_times) > 1 else 0
new_avg = statistics.mean(new_times)
new_std = statistics.stdev(new_times) if len(new_times) > 1 else 0
speedup = old_avg / new_avg if new_avg > 0 else 0
# Print results
print(f"\n{'='*60}")
print("RESULTS")
print(f"{'='*60}")
print(f"{'Metric':<25} {'Old (loop)':>15} {'New (vectorized)':>18}")
print(f"{'-'*25} {'-'*15} {'-'*18}")
print(f"{'Avg Latency (ms)':<25} {old_avg:>15.3f} {new_avg:>18.3f}")
print(f"{'Std Dev (ms)':<25} {old_std:>15.3f} {new_std:>18.3f}")
print(f"{'Min Latency (ms)':<25} {min(old_times):>15.3f} {min(new_times):>18.3f}")
print(f"{'Max Latency (ms)':<25} {max(old_times):>15.3f} {max(new_times):>18.3f}")
print(f"{'P50 (ms)':<25} {sorted(old_times)[len(old_times)//2]:>15.3f} {sorted(new_times)[len(new_times)//2]:>18.3f}")
print(f"\n{'Speedup:':<25} {speedup:>15.2f}x")
print(f"{'='*60}\n")
return {
"n_vectors": n_vectors,
"dim": dim,
"top_k": top_k,
"old_avg_ms": old_avg,
"new_avg_ms": new_avg,
"speedup": speedup,
}
def main():
print("\n" + "="*70)
print(" BINARY SEARCH OPTIMIZATION MICRO-BENCHMARK")
print("="*70)
# Test different vector counts
results = []
for n_vectors in [1000, 5000, 10000, 50000]:
result = run_benchmark(
n_vectors=n_vectors,
dim=256,
top_k=100,
n_iterations=20,
)
results.append(result)
# Summary
print("\n" + "="*70)
print(" SUMMARY")
print("="*70)
print(f"{'N Vectors':<12} {'Old (ms)':<12} {'New (ms)':<12} {'Speedup':>10}")
print("-"*50)
for r in results:
print(f"{r['n_vectors']:<12} {r['old_avg_ms']:<12.3f} {r['new_avg_ms']:<12.3f} {r['speedup']:>10.2f}x")
print("="*70)
if __name__ == "__main__":
main()

View File

@@ -1,30 +1,30 @@
{
"timestamp": "2026-01-02 11:22:34",
"timestamp": "2026-01-02 11:48:33",
"summaries": {
"binary": {
"strategy": "binary",
"total_queries": 15,
"successful_queries": 15,
"avg_latency_ms": 850.328753333209,
"min_latency_ms": 750.9617999967304,
"max_latency_ms": 1015.733200001705,
"p50_latency_ms": 847.9711999971187,
"p95_latency_ms": 976.768470002571,
"p99_latency_ms": 1007.9402540018782,
"avg_results": 0,
"avg_latency_ms": 1133.4008666667312,
"min_latency_ms": 959.5361000028788,
"max_latency_ms": 1330.8978999993997,
"p50_latency_ms": 1125.8439999946859,
"p95_latency_ms": 1330.0081999987015,
"p99_latency_ms": 1330.71995999926,
"avg_results": 10,
"errors": []
},
"hybrid": {
"strategy": "hybrid",
"total_queries": 15,
"successful_queries": 15,
"avg_latency_ms": 821.3745733330143,
"min_latency_ms": 720.5589000004693,
"max_latency_ms": 943.0299999949057,
"p50_latency_ms": 819.5875000019441,
"p95_latency_ms": 916.3381599981221,
"p99_latency_ms": 937.691631995549,
"avg_results": 0,
"avg_latency_ms": 1111.1401133336283,
"min_latency_ms": 857.0021999985329,
"max_latency_ms": 1278.8890000010724,
"p50_latency_ms": 1130.696000000171,
"p95_latency_ms": 1254.2417899981956,
"p99_latency_ms": 1273.959558000497,
"avg_results": 10,
"errors": []
}
},
@@ -33,121 +33,121 @@
{
"strategy": "binary",
"query": "def search",
"latency_ms": 862.7266999974381,
"num_results": 0,
"top_result": null,
"latency_ms": 1044.525999997859,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\storage\\dir_index.py:0",
"error": null
},
{
"strategy": "binary",
"query": "class Engine",
"latency_ms": 773.8472999990336,
"num_results": 0,
"top_result": null,
"latency_ms": 1052.5979999947594,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\parsers\\factory.py:0",
"error": null
},
{
"strategy": "binary",
"query": "import numpy",
"latency_ms": 858.1023000006098,
"num_results": 0,
"top_result": null,
"latency_ms": 1217.217100005655,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\__main__.py:0",
"error": null
},
{
"strategy": "binary",
"query": "async def",
"latency_ms": 877.2815999982413,
"num_results": 0,
"top_result": null,
"latency_ms": 1276.9802000038908,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\reranker\\api_reranker.py:0",
"error": null
},
{
"strategy": "binary",
"query": "raise ValueError",
"latency_ms": 824.3320999972639,
"num_results": 0,
"top_result": null,
"latency_ms": 1005.9053000004496,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\cli\\commands.py:0",
"error": null
},
{
"strategy": "binary",
"query": "how to parse json",
"latency_ms": 948.0362000031164,
"num_results": 0,
"top_result": null,
"latency_ms": 1330.8978999993997,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\cli\\commands.py:0",
"error": null
},
{
"strategy": "binary",
"query": "database connection",
"latency_ms": 789.3126000053599,
"num_results": 0,
"top_result": null,
"latency_ms": 1041.6685000018333,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\indexing\\symbol_extractor.py:0",
"error": null
},
{
"strategy": "binary",
"query": "error handling",
"latency_ms": 960.0693000029423,
"num_results": 0,
"top_result": null,
"latency_ms": 959.5361000028788,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\storage\\migrations\\migration_004_dual_fts.py:0",
"error": null
},
{
"strategy": "binary",
"query": "authentication logic",
"latency_ms": 757.247900000948,
"num_results": 0,
"top_result": null,
"latency_ms": 1060.9395999999833,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\search\\query_parser.py:0",
"error": null
},
{
"strategy": "binary",
"query": "file read write",
"latency_ms": 750.9617999967304,
"num_results": 0,
"top_result": null,
"latency_ms": 971.8680000005406,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\watcher\\incremental_indexer.py:0",
"error": null
},
{
"strategy": "binary",
"query": "embedding vector",
"latency_ms": 871.1426000008942,
"num_results": 0,
"top_result": null,
"latency_ms": 1135.879900000873,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\embedder.py:0",
"error": null
},
{
"strategy": "binary",
"query": "cosine similarity",
"latency_ms": 817.1380999992834,
"num_results": 0,
"top_result": null,
"latency_ms": 1188.1732000038028,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\vector_store.py:0",
"error": null
},
{
"strategy": "binary",
"query": "binary quantization",
"latency_ms": 1015.733200001705,
"num_results": 0,
"top_result": null,
"latency_ms": 1259.3522999959532,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\vector_store.py:0",
"error": null
},
{
"strategy": "binary",
"query": "hamming distance",
"latency_ms": 847.9711999971187,
"num_results": 0,
"top_result": null,
"latency_ms": 1329.6268999984022,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\ann_index.py:0",
"error": null
},
{
"strategy": "binary",
"query": "reranking",
"latency_ms": 801.028399997449,
"num_results": 0,
"top_result": null,
"latency_ms": 1125.8439999946859,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\search\\ranking.py:0",
"error": null
}
],
@@ -155,121 +155,121 @@
{
"strategy": "hybrid",
"query": "def search",
"latency_ms": 720.5589000004693,
"num_results": 0,
"top_result": null,
"latency_ms": 1117.0937999995658,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\storage\\dir_index.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "class Engine",
"latency_ms": 792.9914000051212,
"num_results": 0,
"top_result": null,
"latency_ms": 1039.3984000038472,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\parsers\\factory.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "import numpy",
"latency_ms": 943.0299999949057,
"num_results": 0,
"top_result": null,
"latency_ms": 1144.7916999968584,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\__main__.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "async def",
"latency_ms": 819.5875000019441,
"num_results": 0,
"top_result": null,
"latency_ms": 857.0021999985329,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\reranker\\api_reranker.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "raise ValueError",
"latency_ms": 835.5114000005415,
"num_results": 0,
"top_result": null,
"latency_ms": 957.5578000003588,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\cli\\commands.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "how to parse json",
"latency_ms": 867.8118999960134,
"num_results": 0,
"top_result": null,
"latency_ms": 1216.5708000029554,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\cli\\commands.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "database connection",
"latency_ms": 824.6361999990768,
"num_results": 0,
"top_result": null,
"latency_ms": 1154.8929000055068,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\indexing\\symbol_extractor.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "error handling",
"latency_ms": 742.638600000646,
"num_results": 0,
"top_result": null,
"latency_ms": 1130.696000000171,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\storage\\migrations\\migration_004_dual_fts.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "authentication logic",
"latency_ms": 840.4286999939359,
"num_results": 0,
"top_result": null,
"latency_ms": 1112.8943000003346,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\search\\query_parser.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "file read write",
"latency_ms": 810.9049000049708,
"num_results": 0,
"top_result": null,
"latency_ms": 1172.5986000019475,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\watcher\\incremental_indexer.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "embedding vector",
"latency_ms": 876.5335000061896,
"num_results": 0,
"top_result": null,
"latency_ms": 1278.8890000010724,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\embedder.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "cosine similarity",
"latency_ms": 797.3090999948909,
"num_results": 0,
"top_result": null,
"latency_ms": 1024.2393000007723,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\vector_store.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "binary quantization",
"latency_ms": 767.9803999999422,
"num_results": 0,
"top_result": null,
"latency_ms": 1243.6786999969627,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\vector_store.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "hamming distance",
"latency_ms": 775.7972999970661,
"num_results": 0,
"top_result": null,
"latency_ms": 1081.3100999948801,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\semantic\\ann_index.py:0",
"error": null
},
{
"strategy": "hybrid",
"query": "reranking",
"latency_ms": 904.8987999995006,
"num_results": 0,
"top_result": null,
"latency_ms": 1135.4881000006571,
"num_results": 10,
"top_result": "D:\\Claude_dms3\\codex-lens\\src\\codexlens\\search\\ranking.py:0",
"error": null
}
]