2023-03-26 13:03:31 +00:00
|
|
|
from io import BytesIO
|
2023-08-26 12:13:29 +00:00
|
|
|
|
2023-08-04 07:23:31 +00:00
|
|
|
import copy
|
2023-08-26 12:13:29 +00:00
|
|
|
import datetime
|
2023-11-19 11:08:33 +00:00
|
|
|
import json
|
2023-08-26 12:13:29 +00:00
|
|
|
import pickle
|
2019-08-02 21:32:08 +00:00
|
|
|
import pytest
|
2024-01-21 20:16:47 +00:00
|
|
|
|
2023-09-13 10:03:39 +00:00
|
|
|
import tantivy
|
2024-01-21 20:16:47 +00:00
|
|
|
from conftest import schema, schema_numeric_fields
|
2024-04-22 23:27:51 +00:00
|
|
|
from tantivy import Document, Index, SchemaBuilder, SnippetGenerator, Query, Occur
|
2019-08-02 11:23:10 +00:00
|
|
|
|
2019-08-29 00:55:36 +00:00
|
|
|
|
2019-08-03 11:28:12 +00:00
|
|
|
class TestClass(object):
|
2019-10-01 16:50:09 +00:00
|
|
|
def test_simple_search_in_dir(self, dir_index):
|
|
|
|
_, index = dir_index
|
|
|
|
query = index.parse_query("sea whale", ["title", "body"])
|
|
|
|
|
2019-10-01 18:05:38 +00:00
|
|
|
result = index.searcher().search(query, 10)
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 1
|
2019-10-01 16:50:09 +00:00
|
|
|
|
|
|
|
def test_simple_search_after_reuse(self, dir_index):
|
|
|
|
index_dir, _ = dir_index
|
|
|
|
index = Index(schema(), str(index_dir))
|
|
|
|
query = index.parse_query("sea whale", ["title", "body"])
|
|
|
|
|
2019-10-01 18:05:38 +00:00
|
|
|
result = index.searcher().search(query, 10)
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 1
|
2019-10-01 16:50:09 +00:00
|
|
|
|
|
|
|
def test_simple_search_in_ram(self, ram_index):
|
2019-08-02 11:23:10 +00:00
|
|
|
index = ram_index
|
|
|
|
query = index.parse_query("sea whale", ["title", "body"])
|
2019-06-04 09:09:58 +00:00
|
|
|
|
2019-10-01 18:05:38 +00:00
|
|
|
result = index.searcher().search(query, 10)
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 1
|
|
|
|
_, doc_address = result.hits[0]
|
2019-08-02 11:23:10 +00:00
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["The Old Man and the Sea"]
|
2019-08-02 21:32:08 +00:00
|
|
|
|
2023-02-14 13:20:59 +00:00
|
|
|
def test_simple_search_in_spanish(self, spanish_index):
|
|
|
|
index = spanish_index
|
|
|
|
query = index.parse_query("vieja", ["title", "body"])
|
|
|
|
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
search_doc = index.searcher().doc(doc_address)
|
|
|
|
assert search_doc["title"] == ["El viejo y el mar"]
|
|
|
|
|
2019-08-03 11:28:12 +00:00
|
|
|
def test_and_query(self, ram_index):
|
2019-08-02 11:23:10 +00:00
|
|
|
index = ram_index
|
2022-04-15 03:50:37 +00:00
|
|
|
query = index.parse_query(
|
|
|
|
"title:men AND body:summer", default_field_names=["title", "body"]
|
|
|
|
)
|
2019-08-02 21:32:08 +00:00
|
|
|
# look for an intersection of documents
|
2019-08-02 11:23:10 +00:00
|
|
|
searcher = index.searcher()
|
2019-10-01 18:05:38 +00:00
|
|
|
result = searcher.search(query, 10)
|
2019-08-02 21:32:08 +00:00
|
|
|
|
|
|
|
# summer isn't present
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 0
|
2019-08-02 21:32:08 +00:00
|
|
|
|
2019-08-02 11:23:10 +00:00
|
|
|
query = index.parse_query("title:men AND body:winter", ["title", "body"])
|
2019-10-01 18:05:38 +00:00
|
|
|
result = searcher.search(query)
|
2019-08-02 21:32:08 +00:00
|
|
|
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 1
|
2019-08-02 21:32:08 +00:00
|
|
|
|
2023-01-17 00:43:39 +00:00
|
|
|
def test_and_query_numeric_fields(self, ram_index_numeric_fields):
|
|
|
|
index = ram_index_numeric_fields
|
|
|
|
searcher = index.searcher()
|
|
|
|
|
|
|
|
# 1 result
|
|
|
|
float_query = index.parse_query("3.5", ["rating"])
|
|
|
|
result = searcher.search(float_query)
|
|
|
|
assert len(result.hits) == 1
|
2023-07-21 22:13:03 +00:00
|
|
|
assert searcher.doc(result.hits[0][1])["rating"][0] == 3.5
|
2023-01-17 00:43:39 +00:00
|
|
|
|
|
|
|
integer_query = index.parse_query("1", ["id"])
|
|
|
|
result = searcher.search(integer_query)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
|
|
|
|
# 0 result
|
|
|
|
integer_query = index.parse_query("10", ["id"])
|
|
|
|
result = searcher.search(integer_query)
|
|
|
|
assert len(result.hits) == 0
|
|
|
|
|
2019-08-02 11:23:10 +00:00
|
|
|
def test_and_query_parser_default_fields(self, ram_index):
|
|
|
|
query = ram_index.parse_query("winter", default_field_names=["title"])
|
2023-07-20 23:44:47 +00:00
|
|
|
assert repr(query) == """Query(TermQuery(Term(field=0, type=Str, "winter")))"""
|
2019-08-02 11:23:10 +00:00
|
|
|
|
|
|
|
def test_and_query_parser_default_fields_undefined(self, ram_index):
|
|
|
|
query = ram_index.parse_query("winter")
|
2019-09-09 12:30:50 +00:00
|
|
|
assert (
|
2022-04-15 03:50:37 +00:00
|
|
|
repr(query)
|
2023-07-20 23:44:47 +00:00
|
|
|
== """Query(BooleanQuery { subqueries: [(Should, TermQuery(Term(field=0, type=Str, "winter"))), (Should, TermQuery(Term(field=1, type=Str, "winter")))] })"""
|
2019-09-09 12:30:50 +00:00
|
|
|
)
|
2019-08-02 11:23:10 +00:00
|
|
|
|
2024-02-05 11:01:26 +00:00
|
|
|
def test_parse_query_field_boosts(self, ram_index):
|
|
|
|
query = ram_index.parse_query("winter", field_boosts={"title": 2.3})
|
|
|
|
assert (
|
|
|
|
repr(query)
|
|
|
|
== """Query(BooleanQuery { subqueries: [(Should, Boost(query=TermQuery(Term(field=0, type=Str, "winter")), boost=2.3)), (Should, TermQuery(Term(field=1, type=Str, "winter")))] })"""
|
|
|
|
)
|
|
|
|
|
2024-04-24 13:10:45 +00:00
|
|
|
def test_parse_query_fuzzy_fields(self, ram_index):
|
2024-02-05 11:01:26 +00:00
|
|
|
query = ram_index.parse_query("winter", fuzzy_fields={"title": (True, 1, False)})
|
|
|
|
assert (
|
|
|
|
repr(query)
|
|
|
|
== """Query(BooleanQuery { subqueries: [(Should, FuzzyTermQuery { term: Term(field=0, type=Str, "winter"), distance: 1, transposition_cost_one: false, prefix: true }), (Should, TermQuery(Term(field=1, type=Str, "winter")))] })"""
|
|
|
|
)
|
|
|
|
|
2019-08-03 11:28:12 +00:00
|
|
|
def test_query_errors(self, ram_index):
|
2019-08-02 11:23:10 +00:00
|
|
|
index = ram_index
|
2019-08-02 21:32:08 +00:00
|
|
|
# no "bod" field
|
|
|
|
with pytest.raises(ValueError):
|
2019-08-02 11:23:10 +00:00
|
|
|
index.parse_query("bod:men", ["title", "body"])
|
2019-08-02 23:39:02 +00:00
|
|
|
|
2023-10-03 07:52:22 +00:00
|
|
|
def test_query_lenient(self, ram_index_numeric_fields):
|
|
|
|
from tantivy import query_parser_error
|
|
|
|
|
|
|
|
index = ram_index_numeric_fields
|
|
|
|
|
|
|
|
query, errors = index.parse_query_lenient("rating:3.5")
|
|
|
|
assert len(errors) == 0
|
|
|
|
assert repr(query) == """Query(TermQuery(Term(field=1, type=F64, 3.5)))"""
|
|
|
|
|
|
|
|
_, errors = index.parse_query_lenient("bod:men")
|
|
|
|
assert len(errors) == 1
|
|
|
|
assert isinstance(errors[0], query_parser_error.FieldDoesNotExistError)
|
|
|
|
|
|
|
|
query, errors = index.parse_query_lenient(
|
|
|
|
"body:'hello' AND id:<3.5 OR rating:'hi'"
|
|
|
|
)
|
|
|
|
assert len(errors) == 2
|
|
|
|
assert isinstance(errors[0], query_parser_error.ExpectedIntError)
|
|
|
|
assert isinstance(errors[1], query_parser_error.ExpectedFloatError)
|
|
|
|
assert (
|
|
|
|
repr(query)
|
|
|
|
== """Query(BooleanQuery { subqueries: [(Should, BooleanQuery { subqueries: [(Must, TermQuery(Term(field=3, type=Str, "hello")))] })] })"""
|
|
|
|
)
|
|
|
|
|
2020-04-19 10:26:08 +00:00
|
|
|
def test_order_by_search(self):
|
2022-04-15 03:50:37 +00:00
|
|
|
schema = (
|
|
|
|
SchemaBuilder()
|
2023-07-20 23:44:47 +00:00
|
|
|
.add_unsigned_field("order", fast=True)
|
2022-04-15 03:50:37 +00:00
|
|
|
.add_text_field("title", stored=True)
|
|
|
|
.build()
|
2020-04-19 10:26:08 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
index = Index(schema)
|
|
|
|
writer = index.writer()
|
|
|
|
|
|
|
|
doc = Document()
|
|
|
|
doc.add_unsigned("order", 0)
|
|
|
|
doc.add_text("title", "Test title")
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
doc = Document()
|
|
|
|
doc.add_unsigned("order", 2)
|
|
|
|
doc.add_text("title", "Final test title")
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
doc = Document()
|
|
|
|
doc.add_unsigned("order", 1)
|
|
|
|
doc.add_text("title", "Another test title")
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
writer.commit()
|
|
|
|
index.reload()
|
|
|
|
|
|
|
|
query = index.parse_query("test")
|
|
|
|
|
|
|
|
searcher = index.searcher()
|
2020-09-06 10:26:17 +00:00
|
|
|
|
|
|
|
result = searcher.search(query, 10, offset=2, order_by_field="order")
|
|
|
|
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
|
2020-04-19 10:26:08 +00:00
|
|
|
result = searcher.search(query, 10, order_by_field="order")
|
|
|
|
|
|
|
|
assert len(result.hits) == 3
|
|
|
|
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Final test title"]
|
|
|
|
|
|
|
|
_, doc_address = result.hits[1]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Another test title"]
|
|
|
|
|
|
|
|
_, doc_address = result.hits[2]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Test title"]
|
|
|
|
|
2023-11-19 11:08:33 +00:00
|
|
|
result = searcher.search(
|
|
|
|
query, 10, order_by_field="order", order=tantivy.Order.Asc
|
|
|
|
)
|
2023-09-28 08:17:15 +00:00
|
|
|
|
|
|
|
assert len(result.hits) == 3
|
|
|
|
|
|
|
|
_, doc_address = result.hits[2]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Final test title"]
|
|
|
|
|
|
|
|
_, doc_address = result.hits[1]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Another test title"]
|
|
|
|
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Test title"]
|
|
|
|
|
2020-04-19 10:26:08 +00:00
|
|
|
def test_order_by_search_without_fast_field(self):
|
2022-04-15 03:50:37 +00:00
|
|
|
schema = (
|
|
|
|
SchemaBuilder()
|
2020-04-19 10:26:08 +00:00
|
|
|
.add_unsigned_field("order")
|
2022-04-15 03:50:37 +00:00
|
|
|
.add_text_field("title", stored=True)
|
|
|
|
.build()
|
2020-04-19 10:26:08 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
index = Index(schema)
|
|
|
|
writer = index.writer()
|
|
|
|
|
|
|
|
doc = Document()
|
|
|
|
doc.add_unsigned("order", 0)
|
|
|
|
doc.add_text("title", "Test title")
|
|
|
|
|
|
|
|
query = index.parse_query("test")
|
|
|
|
|
|
|
|
searcher = index.searcher()
|
|
|
|
result = searcher.search(query, 10, order_by_field="order")
|
|
|
|
assert len(result.hits) == 0
|
|
|
|
|
2023-07-22 19:57:30 +00:00
|
|
|
def test_with_merges(self):
|
|
|
|
# This test is taken from tantivy's test suite:
|
|
|
|
# https://github.com/quickwit-oss/tantivy/blob/42acd334f49d5ff7e4fe846b5c12198f24409b50/src/indexer/index_writer.rs#L1130
|
|
|
|
schema = SchemaBuilder().add_text_field("text", stored=True).build()
|
|
|
|
|
|
|
|
index = Index(schema)
|
|
|
|
index.config_reader(reload_policy="Manual")
|
|
|
|
|
|
|
|
writer = index.writer()
|
|
|
|
|
|
|
|
for _ in range(100):
|
|
|
|
doc = Document()
|
|
|
|
doc.add_text("text", "a")
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
writer.commit()
|
|
|
|
|
|
|
|
for _ in range(100):
|
|
|
|
doc = Document()
|
|
|
|
doc.add_text("text", "a")
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
# This should create 8 segments and trigger a merge.
|
|
|
|
writer.commit()
|
|
|
|
writer.wait_merging_threads()
|
|
|
|
|
|
|
|
# Accessing the writer again should result in an error.
|
|
|
|
with pytest.raises(RuntimeError):
|
|
|
|
writer.wait_merging_threads()
|
|
|
|
|
|
|
|
index.reload()
|
|
|
|
|
|
|
|
query = index.parse_query("a")
|
|
|
|
searcher = index.searcher()
|
|
|
|
result = searcher.search(query, limit=500, count=True)
|
|
|
|
assert result.count == 200
|
|
|
|
|
|
|
|
assert searcher.num_segments < 8
|
|
|
|
|
2023-11-19 11:08:33 +00:00
|
|
|
def test_doc_from_dict_numeric_validation(self):
|
2023-07-21 22:13:03 +00:00
|
|
|
schema = (
|
|
|
|
SchemaBuilder()
|
|
|
|
.add_unsigned_field("unsigned")
|
|
|
|
.add_integer_field("signed")
|
|
|
|
.add_float_field("float")
|
|
|
|
.build()
|
|
|
|
)
|
|
|
|
|
|
|
|
good = Document.from_dict(
|
|
|
|
{"unsigned": 1000, "signed": -5, "float": 0.4},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
|
|
|
good = Document.from_dict(
|
|
|
|
{"unsigned": 1000, "signed": -5, "float": 0.4},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{"unsigned": -50, "signed": -5, "float": 0.4},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{"unsigned": 1000, "signed": 50.4, "float": 0.4},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{
|
|
|
|
"unsigned": 1000,
|
|
|
|
"signed": -5,
|
|
|
|
"float": "bad_string",
|
|
|
|
},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{
|
|
|
|
"unsigned": [1000, -50],
|
|
|
|
"signed": -5,
|
|
|
|
"float": 0.4,
|
|
|
|
},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{
|
|
|
|
"unsigned": 1000,
|
|
|
|
"signed": [-5, 150, -3.14],
|
|
|
|
"float": 0.4,
|
|
|
|
},
|
|
|
|
schema,
|
|
|
|
)
|
|
|
|
|
2023-11-19 11:08:33 +00:00
|
|
|
def test_doc_from_dict_bytes_validation(self):
|
|
|
|
schema = SchemaBuilder().add_bytes_field("bytes").build()
|
|
|
|
|
|
|
|
good = Document.from_dict({"bytes": b"hello"}, schema)
|
|
|
|
good = Document.from_dict({"bytes": [[1, 2, 3], [4, 5, 6]]}, schema)
|
|
|
|
good = Document.from_dict({"bytes": [1, 2, 3]}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"bytes": [1, 2, 256]}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"bytes": "hello"}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"bytes": [1024, "there"]}, schema)
|
|
|
|
|
|
|
|
def test_doc_from_dict_ip_addr_validation(self):
|
|
|
|
schema = SchemaBuilder().add_ip_addr_field("ip").build()
|
|
|
|
|
|
|
|
good = Document.from_dict({"ip": "127.0.0.1"}, schema)
|
|
|
|
good = Document.from_dict({"ip": "::1"}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"ip": 12309812348}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"ip": "256.100.0.1"}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{"ip": "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0:1234"}, schema
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict(
|
|
|
|
{"ip": "1234:5678:9ABC:DEF0:1234:5678:9ABC:GHIJ"}, schema
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_doc_from_dict_json_validation(self):
|
|
|
|
# Test implicit JSON
|
|
|
|
good = Document.from_dict({"dict": {"hello": "world"}})
|
|
|
|
|
|
|
|
schema = SchemaBuilder().add_json_field("json").build()
|
|
|
|
|
|
|
|
good = Document.from_dict({"json": {}}, schema)
|
|
|
|
good = Document.from_dict({"json": {"hello": "world"}}, schema)
|
|
|
|
good = Document.from_dict(
|
|
|
|
{"nested": {"hello": ["world", "!"]}, "numbers": [1, 2, 3]}, schema
|
|
|
|
)
|
|
|
|
|
|
|
|
list_of_jsons = [
|
|
|
|
{"hello": "world"},
|
|
|
|
{"nested": {"hello": ["world", "!"]}, "numbers": [1, 2, 3]},
|
|
|
|
]
|
|
|
|
good = Document.from_dict({"json": list_of_jsons}, schema)
|
|
|
|
|
|
|
|
good = Document.from_dict({"json": json.dumps(list_of_jsons[1])}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"json": 123}, schema)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
bad = Document.from_dict({"json": "hello"}, schema)
|
|
|
|
|
2023-08-04 07:23:31 +00:00
|
|
|
def test_search_result_eq(self, ram_index, spanish_index):
|
|
|
|
eng_index = ram_index
|
|
|
|
eng_query = eng_index.parse_query("sea whale", ["title", "body"])
|
|
|
|
|
|
|
|
esp_index = spanish_index
|
|
|
|
esp_query = esp_index.parse_query("vieja", ["title", "body"])
|
|
|
|
|
|
|
|
eng_result1 = eng_index.searcher().search(eng_query, 10)
|
|
|
|
eng_result2 = eng_index.searcher().search(eng_query, 10)
|
|
|
|
esp_result = esp_index.searcher().search(esp_query, 10)
|
|
|
|
|
|
|
|
assert eng_result1 == eng_result2
|
|
|
|
assert eng_result1 != esp_result
|
|
|
|
assert eng_result2 != esp_result
|
|
|
|
|
2023-08-26 12:13:29 +00:00
|
|
|
def test_search_result_pickle(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
query = index.parse_query("sea whale", ["title", "body"])
|
|
|
|
|
|
|
|
orig = index.searcher().search(query, 10)
|
|
|
|
pickled = pickle.loads(pickle.dumps(orig))
|
|
|
|
|
|
|
|
assert orig == pickled
|
|
|
|
|
2023-09-27 21:42:38 +00:00
|
|
|
def test_delete_all_documents(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
writer = index.writer()
|
|
|
|
writer.delete_all_documents()
|
|
|
|
writer.commit()
|
|
|
|
|
|
|
|
index.reload()
|
|
|
|
query = index.parse_query("sea whale", ["title", "body"])
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
|
|
|
|
assert len(result.hits) == 0
|
|
|
|
|
2020-04-19 10:26:08 +00:00
|
|
|
|
2019-09-09 12:30:50 +00:00
|
|
|
class TestUpdateClass(object):
|
|
|
|
def test_delete_update(self, ram_index):
|
|
|
|
query = ram_index.parse_query("Frankenstein", ["title"])
|
2019-10-01 18:05:38 +00:00
|
|
|
result = ram_index.searcher().search(query, 10)
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 1
|
2019-09-09 12:30:50 +00:00
|
|
|
|
|
|
|
writer = ram_index.writer()
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
writer.delete_documents("fake_field", "frankenstein")
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
writer.delete_documents("title", b"frankenstein")
|
|
|
|
|
|
|
|
writer.delete_documents("title", "frankenstein")
|
|
|
|
writer.commit()
|
|
|
|
ram_index.reload()
|
|
|
|
|
2019-10-01 18:05:38 +00:00
|
|
|
result = ram_index.searcher().search(query)
|
2019-12-17 22:17:44 +00:00
|
|
|
assert len(result.hits) == 0
|
2019-09-09 12:30:50 +00:00
|
|
|
|
|
|
|
|
2019-08-03 11:28:12 +00:00
|
|
|
class TestFromDiskClass(object):
|
2022-04-27 01:48:19 +00:00
|
|
|
def test_opens_from_dir_invalid_schema(self, dir_index):
|
|
|
|
invalid_schema = SchemaBuilder().add_text_field("🐱").build()
|
|
|
|
index_dir, _ = dir_index
|
2020-01-05 11:04:27 +00:00
|
|
|
with pytest.raises(ValueError):
|
2022-04-27 01:48:19 +00:00
|
|
|
Index(invalid_schema, str(index_dir), reuse=True)
|
2020-01-05 11:04:27 +00:00
|
|
|
|
|
|
|
def test_opens_from_dir(self, dir_index):
|
|
|
|
index_dir, _ = dir_index
|
|
|
|
|
|
|
|
index = Index(schema(), str(index_dir), reuse=True)
|
2019-08-02 11:23:10 +00:00
|
|
|
assert index.searcher().num_docs == 3
|
|
|
|
|
|
|
|
def test_create_readers(self):
|
|
|
|
# not sure what is the point of this test.
|
|
|
|
idx = Index(schema())
|
2019-09-06 00:45:29 +00:00
|
|
|
idx.config_reader("Manual", 4)
|
2019-08-29 00:55:36 +00:00
|
|
|
assert idx.searcher().num_docs == 0
|
2019-08-02 11:23:10 +00:00
|
|
|
# by default this is manual mode
|
|
|
|
writer = idx.writer(30000000, 1)
|
2019-08-29 00:55:36 +00:00
|
|
|
writer.add_document(Document(title="mytitle", body="mybody"))
|
2019-08-02 11:23:10 +00:00
|
|
|
writer.commit()
|
2019-08-29 00:55:36 +00:00
|
|
|
assert idx.searcher().num_docs == 0
|
2019-08-02 11:23:10 +00:00
|
|
|
# Manual is the default setting.
|
|
|
|
# In this case, change are reflected only when
|
|
|
|
# the index is manually reloaded.
|
|
|
|
idx.reload()
|
2019-08-29 00:55:36 +00:00
|
|
|
assert idx.searcher().num_docs == 1
|
2019-08-02 11:23:10 +00:00
|
|
|
idx.config_reader("OnCommit", 4)
|
2019-08-29 00:55:36 +00:00
|
|
|
writer.add_document(Document(title="mytitle2", body="mybody2"))
|
2019-08-02 11:23:10 +00:00
|
|
|
writer.commit()
|
|
|
|
import time
|
2019-09-09 12:30:50 +00:00
|
|
|
|
2019-08-02 11:23:10 +00:00
|
|
|
for i in range(50):
|
|
|
|
# The index should be automatically reloaded.
|
|
|
|
# Wait for at most 5s for it to happen.
|
|
|
|
time.sleep(0.1)
|
2019-08-29 00:55:36 +00:00
|
|
|
if idx.searcher().num_docs == 2:
|
2019-08-02 11:23:10 +00:00
|
|
|
return
|
|
|
|
assert False
|
|
|
|
|
2019-08-29 00:55:36 +00:00
|
|
|
|
2019-08-02 11:23:10 +00:00
|
|
|
class TestSearcher(object):
|
2023-01-17 00:43:39 +00:00
|
|
|
def test_searcher_repr(self, ram_index, ram_index_numeric_fields):
|
2019-08-02 11:23:10 +00:00
|
|
|
assert repr(ram_index.searcher()) == "Searcher(num_docs=3, num_segments=1)"
|
2023-08-26 12:13:29 +00:00
|
|
|
assert (
|
|
|
|
repr(ram_index_numeric_fields.searcher())
|
|
|
|
== "Searcher(num_docs=2, num_segments=1)"
|
|
|
|
)
|
2019-08-02 11:23:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestDocument(object):
|
|
|
|
def test_document(self):
|
|
|
|
doc = tantivy.Document(name="Bill", reference=[1, 2])
|
|
|
|
assert doc["reference"] == [1, 2]
|
|
|
|
assert doc["name"] == ["Bill"]
|
|
|
|
assert doc.get_first("name") == "Bill"
|
|
|
|
assert doc.get_first("reference") == 1
|
|
|
|
assert doc.to_dict() == {"name": ["Bill"], "reference": [1, 2]}
|
|
|
|
|
|
|
|
def test_document_with_date(self):
|
2019-09-09 12:30:50 +00:00
|
|
|
date = datetime.datetime(2019, 8, 12, 13, 0, 0)
|
2019-08-02 11:23:10 +00:00
|
|
|
doc = tantivy.Document(name="Bill", date=date)
|
|
|
|
assert doc["date"][0] == date
|
|
|
|
|
|
|
|
def test_document_repr(self):
|
|
|
|
doc = tantivy.Document(name="Bill", reference=[1, 2])
|
|
|
|
assert repr(doc) == "Document(name=[Bill],reference=[1,2])"
|
|
|
|
|
2020-10-01 13:35:36 +00:00
|
|
|
def test_document_repr_utf8(self):
|
|
|
|
doc = tantivy.Document(name="野菜食べないとやばい", reference=[1, 2])
|
|
|
|
assert repr(doc) == "Document(name=[野菜食べないとやばい],reference=[1,2])"
|
|
|
|
|
2019-08-02 11:23:10 +00:00
|
|
|
def test_document_with_facet(self):
|
|
|
|
doc = tantivy.Document()
|
|
|
|
facet = tantivy.Facet.from_string("/europe/france")
|
|
|
|
doc.add_facet("facet", facet)
|
2019-09-09 12:30:50 +00:00
|
|
|
assert doc["facet"][0].to_path() == ["europe", "france"]
|
2019-08-02 11:23:10 +00:00
|
|
|
doc = tantivy.Document()
|
|
|
|
facet = tantivy.Facet.from_string("/asia\\/oceania/fiji")
|
|
|
|
doc.add_facet("facet", facet)
|
2019-09-09 12:30:50 +00:00
|
|
|
assert doc["facet"][0].to_path() == ["asia/oceania", "fiji"]
|
2019-08-02 11:23:10 +00:00
|
|
|
assert doc["facet"][0].to_path_str() == "/asia\\/oceania/fiji"
|
|
|
|
assert repr(doc["facet"][0]) == "Facet(/asia\\/oceania/fiji)"
|
|
|
|
doc = tantivy.Document(facet=facet)
|
2019-09-09 12:30:50 +00:00
|
|
|
assert doc["facet"][0].to_path() == ["asia/oceania", "fiji"]
|
2019-08-02 11:23:10 +00:00
|
|
|
|
2023-08-04 07:23:31 +00:00
|
|
|
def test_document_eq(self):
|
|
|
|
doc1 = tantivy.Document(name="Bill", reference=[1, 2])
|
|
|
|
doc2 = tantivy.Document.from_dict({"name": "Bill", "reference": [1, 2]})
|
|
|
|
doc3 = tantivy.Document(name="Bob", reference=[3, 4])
|
|
|
|
|
|
|
|
assert doc1 == doc2
|
|
|
|
assert doc1 != doc3
|
|
|
|
assert doc2 != doc3
|
|
|
|
|
|
|
|
def test_document_copy(self):
|
|
|
|
doc1 = tantivy.Document(name="Bill", reference=[1, 2])
|
|
|
|
doc2 = copy.copy(doc1)
|
|
|
|
doc3 = copy.deepcopy(doc2)
|
|
|
|
|
|
|
|
assert doc1 == doc2
|
|
|
|
assert doc1 == doc3
|
|
|
|
assert doc2 == doc3
|
|
|
|
|
2023-08-26 12:13:29 +00:00
|
|
|
def test_document_pickle(self):
|
|
|
|
orig = Document()
|
|
|
|
orig.add_unsigned("unsigned", 1)
|
|
|
|
orig.add_integer("integer", 5)
|
|
|
|
orig.add_float("float", 1.0)
|
|
|
|
orig.add_date("birth", datetime.datetime(2019, 8, 12, 13, 0, 5))
|
|
|
|
orig.add_text("title", "hello world!")
|
|
|
|
orig.add_json("json", '{"a": 1, "b": 2}')
|
|
|
|
orig.add_bytes("bytes", b"abc")
|
|
|
|
|
|
|
|
facet = tantivy.Facet.from_string("/europe/france")
|
|
|
|
orig.add_facet("facet", facet)
|
|
|
|
|
|
|
|
pickled = pickle.loads(pickle.dumps(orig))
|
|
|
|
|
|
|
|
assert orig == pickled
|
|
|
|
|
2022-04-15 03:50:37 +00:00
|
|
|
|
|
|
|
class TestJsonField:
|
|
|
|
def test_query_from_json_field(self):
|
|
|
|
schema = (
|
|
|
|
SchemaBuilder()
|
|
|
|
.add_json_field(
|
|
|
|
"attributes",
|
|
|
|
stored=True,
|
|
|
|
tokenizer_name="default",
|
|
|
|
index_option="position",
|
|
|
|
)
|
|
|
|
.build()
|
|
|
|
)
|
|
|
|
|
|
|
|
index = Index(schema)
|
|
|
|
|
|
|
|
writer = index.writer()
|
|
|
|
|
|
|
|
doc = Document()
|
|
|
|
doc.add_json(
|
|
|
|
"attributes",
|
|
|
|
"""{
|
|
|
|
"order":1.1,
|
|
|
|
"target": "submit-button",
|
|
|
|
"cart": {"product_id": 103},
|
|
|
|
"description": "the best vacuum cleaner ever"
|
|
|
|
}""",
|
|
|
|
)
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
doc = Document()
|
|
|
|
doc.add_json(
|
|
|
|
"attributes",
|
2023-11-21 19:44:21 +00:00
|
|
|
{
|
|
|
|
"order": 1.2,
|
2022-04-15 03:50:37 +00:00
|
|
|
"target": "submit-button",
|
|
|
|
"cart": {"product_id": 133},
|
2023-11-21 19:44:21 +00:00
|
|
|
"description": "das keyboard",
|
|
|
|
},
|
2022-04-15 03:50:37 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
|
|
|
|
writer.commit()
|
|
|
|
index.reload()
|
|
|
|
|
|
|
|
query = index.parse_query("target:submit-button", ["attributes"])
|
|
|
|
result = index.searcher().search(query, 2)
|
|
|
|
assert len(result.hits) == 2
|
|
|
|
|
|
|
|
query = index.parse_query("target:submit", ["attributes"])
|
|
|
|
result = index.searcher().search(query, 2)
|
|
|
|
assert len(result.hits) == 2
|
|
|
|
|
|
|
|
query = index.parse_query("order:1.1", ["attributes"])
|
|
|
|
result = index.searcher().search(query, 2)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
|
|
|
|
# query = index.parse_query_for_attributes("cart.product_id:103")
|
|
|
|
# result = index.searcher().search(query, 1)
|
|
|
|
# assert len(result.hits) == 1
|
|
|
|
|
|
|
|
# query = index.parse_query_for_attributes(
|
|
|
|
# "target:submit-button AND cart.product_id:133"
|
|
|
|
# )
|
|
|
|
# result = index.searcher().search(query, 2)
|
|
|
|
# assert len(result.hits) == 1
|
2023-03-26 13:03:31 +00:00
|
|
|
|
|
|
|
|
2023-07-21 22:13:03 +00:00
|
|
|
@pytest.mark.parametrize("bytes_kwarg", [True, False])
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"bytes_payload",
|
|
|
|
[
|
|
|
|
b"abc",
|
|
|
|
bytearray(b"abc"),
|
|
|
|
memoryview(b"abc"),
|
|
|
|
BytesIO(b"abc").read(),
|
|
|
|
BytesIO(b"abc").getbuffer(),
|
|
|
|
],
|
|
|
|
)
|
2023-03-26 13:03:31 +00:00
|
|
|
def test_bytes(bytes_kwarg, bytes_payload):
|
2023-03-26 13:27:14 +00:00
|
|
|
schema = SchemaBuilder().add_bytes_field("embedding").build()
|
2023-03-26 13:03:31 +00:00
|
|
|
index = Index(schema)
|
|
|
|
writer = index.writer()
|
|
|
|
|
|
|
|
if bytes_kwarg:
|
|
|
|
doc = Document(id=1, embedding=bytes_payload)
|
|
|
|
else:
|
|
|
|
doc = Document(id=1)
|
|
|
|
doc.add_bytes("embedding", bytes_payload)
|
|
|
|
|
|
|
|
writer.add_document(doc)
|
|
|
|
writer.commit()
|
|
|
|
index.reload()
|
2023-08-04 07:23:31 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_schema_eq():
|
|
|
|
schema1 = schema()
|
|
|
|
schema2 = schema()
|
|
|
|
schema3 = schema_numeric_fields()
|
|
|
|
|
|
|
|
assert schema1 == schema2
|
|
|
|
assert schema1 != schema3
|
|
|
|
assert schema2 != schema3
|
|
|
|
|
|
|
|
|
|
|
|
def test_facet_eq():
|
|
|
|
facet1 = tantivy.Facet.from_string("/europe/france")
|
|
|
|
facet2 = tantivy.Facet.from_string("/europe/france")
|
|
|
|
facet3 = tantivy.Facet.from_string("/europe/germany")
|
|
|
|
|
|
|
|
assert facet1 == facet2
|
|
|
|
assert facet1 != facet3
|
|
|
|
assert facet2 != facet3
|
2023-08-26 12:13:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_schema_pickle():
|
|
|
|
orig = (
|
|
|
|
SchemaBuilder()
|
|
|
|
.add_integer_field("id", stored=True, indexed=True)
|
|
|
|
.add_unsigned_field("unsigned")
|
|
|
|
.add_float_field("rating", stored=True, indexed=True)
|
|
|
|
.add_text_field("body", stored=True)
|
|
|
|
.add_date_field("date")
|
|
|
|
.add_json_field("json")
|
|
|
|
.add_bytes_field("bytes")
|
|
|
|
.build()
|
|
|
|
)
|
|
|
|
|
|
|
|
pickled = pickle.loads(pickle.dumps(orig))
|
|
|
|
|
|
|
|
assert orig == pickled
|
|
|
|
|
|
|
|
|
|
|
|
def test_facet_pickle():
|
|
|
|
orig = tantivy.Facet.from_string("/europe/france")
|
|
|
|
pickled = pickle.loads(pickle.dumps(orig))
|
|
|
|
|
|
|
|
assert orig == pickled
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_address_pickle():
|
|
|
|
orig = tantivy.DocAddress(42, 123)
|
|
|
|
pickled = pickle.loads(pickle.dumps(orig))
|
|
|
|
|
|
|
|
assert orig == pickled
|
2023-09-13 10:03:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestSnippets(object):
|
|
|
|
def test_document_snippet(self, dir_index):
|
|
|
|
index_dir, _ = dir_index
|
|
|
|
doc_schema = schema()
|
|
|
|
index = Index(doc_schema, str(index_dir))
|
|
|
|
query = index.parse_query("sea whale", ["title", "body"])
|
|
|
|
searcher = index.searcher()
|
|
|
|
result = searcher.search(query)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
|
2023-11-19 11:08:33 +00:00
|
|
|
snippet_generator = SnippetGenerator.create(
|
|
|
|
searcher, query, doc_schema, "title"
|
|
|
|
)
|
2023-09-13 10:03:39 +00:00
|
|
|
|
2023-11-19 11:08:33 +00:00
|
|
|
for score, doc_address in result.hits:
|
2023-09-13 10:03:39 +00:00
|
|
|
doc = searcher.doc(doc_address)
|
|
|
|
snippet = snippet_generator.snippet_from_doc(doc)
|
|
|
|
highlights = snippet.highlighted()
|
|
|
|
assert len(highlights) == 1
|
|
|
|
first = highlights[0]
|
|
|
|
assert first.start == 20
|
|
|
|
assert first.end == 23
|
|
|
|
html_snippet = snippet.to_html()
|
2023-11-19 11:08:33 +00:00
|
|
|
assert html_snippet == "The Old Man and the <b>Sea</b>"
|
2023-12-20 09:40:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestQuery(object):
|
|
|
|
def test_term_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
query = Query.term_query(index.schema, "title", "sea")
|
|
|
|
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["The Old Man and the Sea"]
|
2024-03-31 11:56:22 +00:00
|
|
|
|
|
|
|
def test_all_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
query = Query.all_query()
|
|
|
|
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 3
|
2024-04-13 09:14:56 +00:00
|
|
|
|
|
|
|
def test_fuzzy_term_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
query = Query.fuzzy_term_query(index.schema, "title", "ice")
|
|
|
|
# the query "ice" should match "mice"
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Of Mice and Men"]
|
|
|
|
|
2024-04-13 10:26:05 +00:00
|
|
|
query = Query.fuzzy_term_query(index.schema, "title", "mna")
|
|
|
|
# the query "mna" should match "man" since the default transposition cost is 1.
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
titles = set()
|
|
|
|
for _, doc_address in result.hits:
|
|
|
|
titles.update(index.searcher().doc(doc_address)["title"])
|
|
|
|
assert titles == {"The Old Man and the Sea"}
|
|
|
|
|
|
|
|
query = Query.fuzzy_term_query(index.schema, "title", "mna", transposition_cost_one=False)
|
|
|
|
# the query "mna" should not match any doc since the default distance is 1 and transposition cost is set to 2.
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 0
|
|
|
|
|
|
|
|
query = Query.fuzzy_term_query(index.schema, "title", "mna", distance=2, transposition_cost_one=False)
|
|
|
|
# the query "mna" should match both "man" and "men" since distance is set to 2.
|
2024-04-13 09:14:56 +00:00
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 2
|
|
|
|
titles = set()
|
|
|
|
for _, doc_address in result.hits:
|
|
|
|
titles.update(index.searcher().doc(doc_address)["title"])
|
|
|
|
assert titles == {"The Old Man and the Sea", "Of Mice and Men"}
|
|
|
|
|
2024-04-13 10:26:05 +00:00
|
|
|
query = Query.fuzzy_term_query(index.schema, "title", "fraken")
|
|
|
|
# the query "fraken" should not match any doc.
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 0
|
|
|
|
|
|
|
|
query = Query.fuzzy_term_query(index.schema, "title", "fraken", prefix=True)
|
|
|
|
# the query "fraken" should match "franken", the prefix of "frankenstein", with edit distance 1.
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
titles = set()
|
|
|
|
for _, doc_address in result.hits:
|
|
|
|
titles.update(index.searcher().doc(doc_address)["title"])
|
|
|
|
assert titles == {"Frankenstein", "The Modern Prometheus"}
|
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
def test_boolean_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
query1 = Query.fuzzy_term_query(index.schema, "title", "ice")
|
|
|
|
query2 = Query.fuzzy_term_query(index.schema, "title", "mna")
|
|
|
|
query = Query.boolean_query([
|
|
|
|
(Occur.Must, query1),
|
|
|
|
(Occur.Must, query2)
|
|
|
|
])
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
# no document should match both queries
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 0
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
query = Query.boolean_query([
|
|
|
|
(Occur.Should, query1),
|
|
|
|
(Occur.Should, query2)
|
|
|
|
])
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
# two documents should match, one for each query
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 2
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
titles = set()
|
|
|
|
for _, doc_address in result.hits:
|
|
|
|
titles.update(index.searcher().doc(doc_address)["title"])
|
|
|
|
assert (
|
|
|
|
"The Old Man and the Sea" in titles and
|
|
|
|
"Of Mice and Men" in titles
|
|
|
|
)
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
query = Query.boolean_query([
|
|
|
|
(Occur.MustNot, query1),
|
|
|
|
(Occur.Must, query1)
|
|
|
|
])
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
# must not should take precedence over must
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 0
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
query = Query.boolean_query((
|
|
|
|
(Occur.Should, query1),
|
|
|
|
(Occur.Should, query2)
|
|
|
|
))
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
# the Vec signature should fit the tuple signature
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 2
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
# test invalid queries
|
|
|
|
with pytest.raises(ValueError, match = "expected tuple of length 2, but got tuple of length 3"):
|
|
|
|
Query.boolean_query([
|
|
|
|
(Occur.Must, Occur.Must, query1),
|
|
|
|
])
|
2024-04-24 21:57:16 +00:00
|
|
|
|
2024-04-22 23:27:51 +00:00
|
|
|
# test swapping the order of the tuple
|
|
|
|
with pytest.raises(TypeError, match = r"'Query' object cannot be converted to 'Occur'"):
|
|
|
|
Query.boolean_query([
|
|
|
|
(query1, Occur.Must),
|
2024-04-24 12:12:24 +00:00
|
|
|
])
|
|
|
|
|
|
|
|
def test_disjunction_max_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
|
|
|
|
# query1 should match the doc: "The Old Man and the Sea"
|
|
|
|
query1 = Query.term_query(index.schema, "title", "sea")
|
|
|
|
# query2 should matches the doc: "Of Mice and Men"
|
|
|
|
query2 = Query.term_query(index.schema, "title", "mice")
|
|
|
|
# the disjunction max query should match both docs.
|
|
|
|
query = Query.disjunction_max_query([query1, query2])
|
|
|
|
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 2
|
|
|
|
|
|
|
|
# the disjunction max query should also take a tie_breaker parameter
|
|
|
|
query = Query.disjunction_max_query([query1, query2], tie_breaker=0.5)
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 2
|
|
|
|
|
|
|
|
with pytest.raises(TypeError, match = r"'str' object cannot be converted to 'Query'"):
|
|
|
|
query = Query.disjunction_max_query([query1, "not a query"], tie_breaker=0.5)
|
2024-04-24 21:57:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_boost_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
query1 = Query.term_query(index.schema, "title", "sea")
|
|
|
|
boosted_query = Query.boost_query(query1, 2.0)
|
|
|
|
|
|
|
|
# Normal boost query
|
|
|
|
assert (
|
|
|
|
repr(boosted_query)
|
|
|
|
== """Query(Boost(query=TermQuery(Term(field=0, type=Str, "sea")), boost=2))"""
|
|
|
|
)
|
|
|
|
|
|
|
|
query2 = Query.fuzzy_term_query(index.schema, "title", "ice")
|
|
|
|
combined_query = Query.boolean_query([
|
|
|
|
(Occur.Should, boosted_query),
|
|
|
|
(Occur.Should, query2)
|
|
|
|
])
|
|
|
|
boosted_query = Query.boost_query(combined_query, 2.0)
|
|
|
|
|
|
|
|
# Boosted boolean query
|
|
|
|
assert (
|
|
|
|
repr(boosted_query)
|
|
|
|
== """Query(Boost(query=BooleanQuery { subqueries: [(Should, Boost(query=TermQuery(Term(field=0, type=Str, "sea")), boost=2)), (Should, FuzzyTermQuery { term: Term(field=0, type=Str, "ice"), distance: 1, transposition_cost_one: true, prefix: false })] }, boost=2))"""
|
|
|
|
)
|
|
|
|
|
|
|
|
boosted_query = Query.boost_query(query1, 0.1)
|
|
|
|
|
|
|
|
# Check for decimal boost values
|
|
|
|
assert(
|
|
|
|
repr(boosted_query)
|
|
|
|
== """Query(Boost(query=TermQuery(Term(field=0, type=Str, "sea")), boost=0.1))"""
|
|
|
|
)
|
|
|
|
|
|
|
|
boosted_query = Query.boost_query(query1, 0.0)
|
|
|
|
|
|
|
|
# Check for zero boost values
|
|
|
|
assert(
|
|
|
|
repr(boosted_query)
|
|
|
|
== """Query(Boost(query=TermQuery(Term(field=0, type=Str, "sea")), boost=0))"""
|
|
|
|
)
|
|
|
|
result = index.searcher().search(boosted_query, 10)
|
|
|
|
for _score, _ in result.hits:
|
|
|
|
# the score should be 0.0
|
|
|
|
assert _score == pytest.approx(0.0)
|
|
|
|
|
|
|
|
boosted_query = Query.boost_query(
|
|
|
|
Query.boost_query(
|
|
|
|
query1, 0.1
|
|
|
|
), 0.1
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check for nested boost queries
|
|
|
|
assert(
|
|
|
|
repr(boosted_query)
|
|
|
|
== """Query(Boost(query=Boost(query=TermQuery(Term(field=0, type=Str, "sea")), boost=0.1), boost=0.1))"""
|
|
|
|
)
|
|
|
|
result = index.searcher().search(boosted_query, 10)
|
|
|
|
for _score, _ in result.hits:
|
|
|
|
# the score should be very small, due to
|
|
|
|
# the unknown score of BM25, we can only check for the relative difference
|
|
|
|
assert _score == pytest.approx(0.01, rel = 1)
|
|
|
|
|
|
|
|
|
|
|
|
boosted_query = Query.boost_query(
|
|
|
|
query1, -0.1
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check for negative boost values
|
|
|
|
assert(
|
|
|
|
repr(boosted_query)
|
|
|
|
== """Query(Boost(query=TermQuery(Term(field=0, type=Str, "sea")), boost=-0.1))"""
|
|
|
|
)
|
|
|
|
|
|
|
|
result = index.searcher().search(boosted_query, 10)
|
|
|
|
# Even with a negative boost, the query should still match the document
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
titles = set()
|
|
|
|
for _score, doc_address in result.hits:
|
|
|
|
|
|
|
|
# the score should be negative
|
|
|
|
assert _score < 0
|
|
|
|
titles.update(index.searcher().doc(doc_address)["title"])
|
|
|
|
assert titles == {"The Old Man and the Sea"}
|
|
|
|
|
|
|
|
# wrong query type
|
|
|
|
with pytest.raises(TypeError, match = r"'int' object cannot be converted to 'Query'"):
|
|
|
|
Query.boost_query(1, 0.1)
|
|
|
|
|
|
|
|
# wrong boost type
|
|
|
|
with pytest.raises(TypeError, match = r"argument 'boost': must be real number, not str"):
|
|
|
|
Query.boost_query(query1, "0.1")
|
|
|
|
|
|
|
|
# no boost type error
|
|
|
|
with pytest.raises(TypeError, match = r"Query.boost_query\(\) missing 1 required positional argument: 'boost'"):
|
|
|
|
Query.boost_query(query1)
|
2024-04-25 02:57:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_regex_query(self, ram_index):
|
|
|
|
index = ram_index
|
|
|
|
|
|
|
|
query = Query.regex_query(index.schema, "body", "fish")
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 1
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["The Old Man and the Sea"]
|
|
|
|
|
|
|
|
query = Query.regex_query(index.schema, "title", "(?:man|men)")
|
|
|
|
result = index.searcher().search(query, 10)
|
|
|
|
assert len(result.hits) == 2
|
|
|
|
_, doc_address = result.hits[0]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["The Old Man and the Sea"]
|
|
|
|
_, doc_address = result.hits[1]
|
|
|
|
searched_doc = index.searcher().doc(doc_address)
|
|
|
|
assert searched_doc["title"] == ["Of Mice and Men"]
|
|
|
|
|
|
|
|
# unknown field in the schema
|
|
|
|
with pytest.raises(
|
|
|
|
ValueError, match="Field `unknown_field` is not defined in the schema."
|
|
|
|
):
|
|
|
|
Query.regex_query(index.schema, "unknown_field", "fish")
|
|
|
|
|
|
|
|
# invalid regex pattern
|
|
|
|
with pytest.raises(
|
|
|
|
ValueError, match=r"An invalid argument was passed: 'fish\('"
|
|
|
|
):
|
|
|
|
Query.regex_query(index.schema, "body", "fish(")
|