|
| 1 | +from typing import Any, Dict, List, Optional, Set, Tuple, Union |
| 2 | + |
| 3 | +import nltk |
| 4 | +from nltk.corpus import stopwords as nltk_stopwords |
| 5 | +from redis.commands.search.aggregation import AggregateRequest, Desc |
| 6 | + |
| 7 | +from redisvl.query.filter import FilterExpression |
| 8 | +from redisvl.redis.utils import array_to_buffer |
| 9 | +from redisvl.utils.token_escaper import TokenEscaper |
| 10 | + |
| 11 | + |
| 12 | +# base class |
| 13 | +class AggregationQuery(AggregateRequest): |
| 14 | + """ |
| 15 | + Base class for aggregation queries used to create aggregation queries for Redis. |
| 16 | + """ |
| 17 | + |
| 18 | + def __init__(self, query_string): |
| 19 | + super().__init__(query_string) |
| 20 | + |
| 21 | + |
| 22 | +class HybridAggregationQuery(AggregationQuery): |
| 23 | + """ |
| 24 | + HybridAggregationQuery combines text and vector search in Redis. |
| 25 | + It allows you to perform a hybrid search using both text and vector similarity. |
| 26 | + It scores documents based on a weighted combination of text and vector similarity. |
| 27 | + """ |
| 28 | + |
| 29 | + DISTANCE_ID: str = "vector_distance" |
| 30 | + VECTOR_PARAM: str = "vector" |
| 31 | + |
| 32 | + def __init__( |
| 33 | + self, |
| 34 | + text: str, |
| 35 | + text_field: str, |
| 36 | + vector: Union[bytes, List[float]], |
| 37 | + vector_field: str, |
| 38 | + text_scorer: str = "BM25STD", |
| 39 | + filter_expression: Optional[Union[str, FilterExpression]] = None, |
| 40 | + alpha: float = 0.7, |
| 41 | + dtype: str = "float32", |
| 42 | + num_results: int = 10, |
| 43 | + return_fields: Optional[List[str]] = None, |
| 44 | + stopwords: Optional[Union[str, Set[str]]] = "english", |
| 45 | + dialect: int = 4, |
| 46 | + ): |
| 47 | + """ |
| 48 | + Instantiages a HybridAggregationQuery object. |
| 49 | +
|
| 50 | + Args: |
| 51 | + text (str): The text to search for. |
| 52 | + text_field (str): The text field name to search in. |
| 53 | + vector (Union[bytes, List[float]]): The vector to perform vector similarity search. |
| 54 | + vector_field (str): The vector field name to search in. |
| 55 | + text_scorer (str, optional): The text scorer to use. Options are {TFIDF, TFIDF.DOCNORM, |
| 56 | + BM25, DISMAX, DOCSCORE, BM25STD}. Defaults to "BM25STD". |
| 57 | + filter_expression (Optional[FilterExpression], optional): The filter expression to use. |
| 58 | + Defaults to None. |
| 59 | + alpha (float, optional): The weight of the vector similarity. Documents will be scored |
| 60 | + as: hybrid_score = (alpha) * vector_score + (1-alpha) * text_score. |
| 61 | + Defaults to 0.7. |
| 62 | + dtype (str, optional): The data type of the vector. Defaults to "float32". |
| 63 | + num_results (int, optional): The number of results to return. Defaults to 10. |
| 64 | + return_fields (Optional[List[str]], optional): The fields to return. Defaults to None. |
| 65 | + stopwords (Optional[Union[str, Set[str]]], optional): The stopwords to remove from the |
| 66 | + provided text prior to searchuse. If a string such as "english" "german" is |
| 67 | + provided then a default set of stopwords for that language will be used. if a list, |
| 68 | + set, or tuple of strings is provided then those will be used as stopwords. |
| 69 | + Defaults to "english". if set to "None" then no stopwords will be removed. |
| 70 | + dialect (int, optional): The Redis dialect version. Defaults to 4. |
| 71 | +
|
| 72 | + Raises: |
| 73 | + ValueError: If the text string is empty, or if the text string becomes empty after |
| 74 | + stopwords are removed. |
| 75 | + TypeError: If the stopwords are not a set, list, or tuple of strings. |
| 76 | +
|
| 77 | + .. code-block:: python |
| 78 | + from redisvl.query.aggregate import HybridAggregationQuery |
| 79 | + from redisvl.index import SearchIndex |
| 80 | +
|
| 81 | + index = SearchIndex("my_index") |
| 82 | +
|
| 83 | + query = HybridAggregationQuery( |
| 84 | + text="example text", |
| 85 | + text_field="text_field", |
| 86 | + vector=[0.1, 0.2, 0.3], |
| 87 | + vector_field="vector_field", |
| 88 | + text_scorer="BM25STD", |
| 89 | + filter_expression=None, |
| 90 | + alpha=0.7, |
| 91 | + dtype="float32", |
| 92 | + num_results=10, |
| 93 | + return_fields=["field1", "field2"], |
| 94 | + stopwords="english", |
| 95 | + dialect=4, |
| 96 | + ) |
| 97 | +
|
| 98 | + results = index.aggregate_query(query) |
| 99 | + """ |
| 100 | + |
| 101 | + if not text.strip(): |
| 102 | + raise ValueError("text string cannot be empty") |
| 103 | + |
| 104 | + self._text = text |
| 105 | + self._text_field = text_field |
| 106 | + self._vector = vector |
| 107 | + self._vector_field = vector_field |
| 108 | + self._filter_expression = filter_expression |
| 109 | + self._alpha = alpha |
| 110 | + self._dtype = dtype |
| 111 | + self._num_results = num_results |
| 112 | + self.set_stopwords(stopwords) |
| 113 | + |
| 114 | + query_string = self._build_query_string() |
| 115 | + super().__init__(query_string) |
| 116 | + |
| 117 | + self.scorer(text_scorer) |
| 118 | + self.add_scores() |
| 119 | + self.apply( |
| 120 | + vector_similarity=f"(2 - @{self.DISTANCE_ID})/2", text_score="@__score" |
| 121 | + ) |
| 122 | + self.apply(hybrid_score=f"{1-alpha}*@text_score + {alpha}*@vector_similarity") |
| 123 | + self.sort_by(Desc("@hybrid_score"), max=num_results) |
| 124 | + self.dialect(dialect) |
| 125 | + |
| 126 | + if return_fields: |
| 127 | + self.load(*return_fields) |
| 128 | + |
| 129 | + @property |
| 130 | + def params(self) -> Dict[str, Any]: |
| 131 | + """Return the parameters for the aggregation. |
| 132 | +
|
| 133 | + Returns: |
| 134 | + Dict[str, Any]: The parameters for the aggregation. |
| 135 | + """ |
| 136 | + if isinstance(self._vector, bytes): |
| 137 | + vector = self._vector |
| 138 | + else: |
| 139 | + vector = array_to_buffer(self._vector, dtype=self._dtype) |
| 140 | + |
| 141 | + params = {self.VECTOR_PARAM: vector} |
| 142 | + |
| 143 | + return params |
| 144 | + |
| 145 | + @property |
| 146 | + def stopwords(self) -> Set[str]: |
| 147 | + """Return the stopwords used in the query. |
| 148 | + Returns: |
| 149 | + Set[str]: The stopwords used in the query. |
| 150 | + """ |
| 151 | + return self._stopwords.copy() if self._stopwords else set() |
| 152 | + |
| 153 | + def set_stopwords(self, stopwords: Optional[Union[str, Set[str]]] = "english"): |
| 154 | + """Set the stopwords to use in the query. |
| 155 | + Args: |
| 156 | + stopwords (Optional[Union[str, Set[str]]]): The stopwords to use. If a string |
| 157 | + such as "english" "german" is provided then a default set of stopwords for that |
| 158 | + language will be used. if a list, set, or tuple of strings is provided then those |
| 159 | + will be used as stopwords. Defaults to "english". if set to "None" then no stopwords |
| 160 | + will be removed. |
| 161 | + Raises: |
| 162 | + TypeError: If the stopwords are not a set, list, or tuple of strings. |
| 163 | + """ |
| 164 | + if not stopwords: |
| 165 | + self._stopwords = set() |
| 166 | + elif isinstance(stopwords, str): |
| 167 | + try: |
| 168 | + nltk.download("stopwords") |
| 169 | + self._stopwords = set(nltk_stopwords.words(stopwords)) |
| 170 | + except Exception as e: |
| 171 | + raise ValueError(f"Error trying to load {stopwords} from nltk. {e}") |
| 172 | + elif isinstance(stopwords, (Set, List, Tuple)) and all( # type: ignore |
| 173 | + isinstance(word, str) for word in stopwords |
| 174 | + ): |
| 175 | + self._stopwords = set(stopwords) |
| 176 | + else: |
| 177 | + raise TypeError("stopwords must be a set, list, or tuple of strings") |
| 178 | + |
| 179 | + def tokenize_and_escape_query(self, user_query: str) -> str: |
| 180 | + """Convert a raw user query to a redis full text query joined by ORs |
| 181 | + Args: |
| 182 | + user_query (str): The user query to tokenize and escape. |
| 183 | +
|
| 184 | + Returns: |
| 185 | + str: The tokenized and escaped query string. |
| 186 | + Raises: |
| 187 | + ValueError: If the text string becomes empty after stopwords are removed. |
| 188 | + """ |
| 189 | + |
| 190 | + escaper = TokenEscaper() |
| 191 | + |
| 192 | + tokens = [ |
| 193 | + escaper.escape( |
| 194 | + token.strip().strip(",").replace("“", "").replace("”", "").lower() |
| 195 | + ) |
| 196 | + for token in user_query.split() |
| 197 | + ] |
| 198 | + tokenized = " | ".join( |
| 199 | + [token for token in tokens if token and token not in self._stopwords] |
| 200 | + ) |
| 201 | + |
| 202 | + if not tokenized: |
| 203 | + raise ValueError("text string cannot be empty after removing stopwords") |
| 204 | + return tokenized |
| 205 | + |
| 206 | + def _build_query_string(self) -> str: |
| 207 | + """Build the full query string for text search with optional filtering.""" |
| 208 | + if isinstance(self._filter_expression, FilterExpression): |
| 209 | + filter_expression = str(self._filter_expression) |
| 210 | + else: |
| 211 | + filter_expression = "" |
| 212 | + |
| 213 | + # base KNN query |
| 214 | + knn_query = f"KNN {self._num_results} @{self._vector_field} ${self.VECTOR_PARAM} AS {self.DISTANCE_ID}" |
| 215 | + |
| 216 | + text = f"(~@{self._text_field}:({self.tokenize_and_escape_query(self._text)}))" |
| 217 | + |
| 218 | + if filter_expression and filter_expression != "*": |
| 219 | + text += f"({filter_expression})" |
| 220 | + |
| 221 | + return f"{text}=>[{knn_query}]" |
0 commit comments