import time
import html
import gradio as gr
from datasets import load_dataset, load_from_disk
from huggingface_hub import hf_hub_download
import pandas as pd
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
import faiss
import numpy as np
# Load titles, texts, and int8 embeddings in a lazy Dataset, allowing us to efficiently access specific rows on demand
# Note that we never actually use the int8 embeddings for search directly, they are only used for rescoring after the binary search
title_text_int8_dataset = load_dataset(
"sentence-transformers/quantized-retrieval-data", split="train"
).select_columns(["url", "title", "text", "embedding"])
# title_text_int8_dataset = load_from_disk("wikipedia-mxbai-embed-int8-index").select_columns(["url", "title", "text", "embedding"])
TOTAL_NUM_DOCS = title_text_int8_dataset.num_rows
# Load the binary indices
binary_index_path = hf_hub_download(
repo_id="sentence-transformers/quantized-retrieval-data",
filename="wikipedia_ubinary_faiss_50m.index",
local_dir=".",
repo_type="dataset",
)
binary_ivf_index_path = hf_hub_download(
repo_id="sentence-transformers/quantized-retrieval-data",
filename="wikipedia_ubinary_ivf_faiss_50m.index",
local_dir=".",
repo_type="dataset",
)
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary(binary_index_path)
binary_ivf_index: faiss.IndexBinaryIVF = faiss.read_index_binary(binary_ivf_index_path)
# Load the SentenceTransformer model for embedding the queries
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
if model.device.type == "cuda":
model.bfloat16()
warmup_queries = [
"What is the capital of France?",
"Who is the president of the United States?",
"What is the largest mammal?",
"How to bake a chocolate cake?",
"What is the theory of relativity?",
]
model.encode_query(warmup_queries)
def search(
query,
top_k: int = 20,
rescore_multiplier: int = 4,
use_approx: bool = True,
):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode_query(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(
query_embedding.reshape(1, -1), "ubinary"
)
quantize_time = time.time() - start_time
# 3. Search the binary index (either exact or approximate)
index = binary_ivf_index if use_approx else binary_index
start_time = time.time()
_scores, binary_ids = index.search(
query_embedding_ubinary, top_k * rescore_multiplier
)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
num_docs_searched = len(binary_ids)
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = np.array(
title_text_int8_dataset[binary_ids]["embedding"], dtype=np.int8
)
load_int8_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = scores.argsort()[::-1][:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
# 7. Load titles and texts for the top_k results
start_time = time.time()
raw_top_k_titles = title_text_int8_dataset[top_k_indices]["title"]
top_k_urls = title_text_int8_dataset[top_k_indices]["url"]
top_k_texts = title_text_int8_dataset[top_k_indices]["text"]
load_text_time = time.time() - start_time
# Build HTML cards for each result so the full row is visible at once
cards = []
for i in range(len(top_k_indices)):
title = html.escape(str(raw_top_k_titles[i]))
url = html.escape(str(top_k_urls[i]))
text = html.escape(str(top_k_texts[i]))
score_str = f"{top_k_scores[i]:.2f}"
rank_str = str(i + 1)
binary_rank_str = str(indices[i] + 1)
card_html = f"""
Score: {score_str} • Rank: {rank_str} • Binary rank: {binary_rank_str}
{text}
"""
cards.append(card_html)
if cards:
cards_html = "\n".join(cards)
else:
cards_html = "No results.
"
total_retrieval_time = (
quantize_time
+ search_time
+ load_int8_time
+ rescore_time
+ sort_time
+ load_text_time
)
num_docs_retrieved = len(top_k_indices)
search_mode = "Approximate (IVF)" if use_approx else "Exact"
summary_md = f"""
Search Summary
- Total docs in corpus: {TOTAL_NUM_DOCS:,}
- Docs searched: {num_docs_searched}
- Docs retrieved: {num_docs_retrieved}
- Search mode: {search_mode}
Timings (in seconds)
- Embed on CPU: {embed_time:.4f}
- Quantize: {quantize_time:.4f}
- Search: {search_time:.4f}
- Load int8: {load_int8_time:.4f}
- Rescore: {rescore_time:.4f}
- Sort: {sort_time:.4f}
- Load text: {load_text_time:.4f}
Total retrieval time: {total_retrieval_time:.4f} seconds
"""
return cards_html, summary_md
css = """
.no-pad-container {
--block-padding: 0px;
}
"""
with gr.Blocks(title="Quantized Retrieval") as demo:
with gr.Row():
with gr.Column(scale=3):
gr.HTML(
"""
Quantized Retrieval - Binary Search with Scalar (int8) Rescoring
This demo showcases retrieval using
quantized embeddingson a CPU. The corpus consists of
41 million textsfrom Wikipedia articles.
""",
elem_classes="no-pad-container",
)
with gr.Accordion("Click to learn about the retrieval process", open=False):
gr.Markdown(
"""
Details:
1. The query is embedded using the [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) SentenceTransformer model.
2. The query is quantized to binary using the `quantize_embeddings` function from the SentenceTransformers library.
3. A binary index (41M binary embeddings; 5.2GB of memory/disk space) is searched using the quantized query for the top 80 documents.
4. The top 80 documents are loaded on the fly from an int8 index on disk (41M int8 embeddings; 0 bytes of memory, 47.5GB of disk space).
5. The top 80 documents are rescored using the float32 query and the int8 embeddings to get the top 20 documents.
6. The top 20 documents are sorted by score.
7. The titles and texts of the top 20 documents are loaded on the fly from disk and displayed.
This process is designed to be memory efficient and fast, with the binary index being small enough to fit in memory and the int8 index being loaded as a view to save memory.
In total, this process requires keeping 1) the model in memory, 2) the binary index in memory, and 3) the int8 index on disk. With a dimensionality of 1024,
we need `1024 / 8 * num_docs` bytes for the binary index and `1024 * num_docs` bytes for the int8 index.
This is notably cheaper than doing the same process with float32 embeddings, which would require `4 * 1024 * num_docs` bytes of memory/disk space for the float32 index, i.e. 32x as much memory and 4x as much disk space.
Additionally, the binary index is much faster (up to 32x) to search than the float32 index, while the rescoring is also extremely efficient. In conclusion, this process allows for fast, scalable, cheap, and memory-efficient retrieval.
Feel free to check out the [code for this demo](https://huggingface.co/spaces/sentence-transformers/quantized-retrieval/blob/main/app.py) to learn more about how to apply this in practice.
Notes:
- The approximate search index (a binary Inverted File Index (IVF)) is in beta and has not been trained with a lot of data.
"""
)
query = gr.Textbox(
label="Query for Wikipedia articles",
placeholder="Enter a query to search for relevant texts from Wikipedia.",
)
search_button = gr.Button(value="Search", variant="secondary")
with gr.Column(scale=1):
top_k = gr.Slider(
minimum=10,
maximum=1000,
step=1,
value=20,
label="Number of documents to retrieve",
info="Number of documents to retrieve using binary search",
)
rescore_multiplier = gr.Slider(
minimum=1,
maximum=10,
step=1,
value=4,
label="Rescore multiplier",
info="Search for `rescore_multiplier` as many documents to rescore",
)
use_approx = gr.Radio(
choices=[("Approximate Search", True), ("Exact Search", False)],
value=True,
label="Search Settings",
)
with gr.Row():
with gr.Column(scale=3):
cards = gr.HTML(label="Results", elem_classes="no-pad-container")
with gr.Column(scale=1):
summary = gr.Markdown(label="Search Summary")
examples = gr.Examples(
examples=[
"What is the coldest metal to the touch?",
"Who won the FIFA World Cup in 2018?",
"How to make a paper airplane?",
"Who was the first woman to cross the Pacific ocean by plane?",
],
fn=search,
inputs=[query],
outputs=[cards, summary],
cache_examples=False,
run_on_click=True,
)
query.submit(
search,
inputs=[query, top_k, rescore_multiplier, use_approx],
outputs=[cards, summary],
)
search_button.click(
search,
inputs=[query, top_k, rescore_multiplier, use_approx],
outputs=[cards, summary],
)
demo.queue()
demo.launch(theme=gr.themes.Base(), css=css)