diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 495e89ca..025387ec 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -32,7 +32,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: "3.12"
+ python-version: "3.13"
- name: Install dependencies
run: |
python3 -m pip install nox
@@ -62,7 +62,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: "3.12"
+ python-version: "3.x"
- name: Install dependencies
run: |
python3 -m pip install nox
@@ -81,8 +81,9 @@ jobs:
"3.10",
"3.11",
"3.12",
+ "3.13",
]
- es-version: [8.0.0, 8.13.0]
+ es-version: [8.0.0, 8.15.0]
steps:
- name: Remove irrelevant software to free up disk space
@@ -104,11 +105,6 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- - name: Set up Python for Nox
- if: matrix.python-version != '3.12'
- uses: actions/setup-python@v4
- with:
- python-version: 3
- name: Install dependencies
run: |
python3 -m pip install nox
diff --git a/elasticsearch_dsl/response/__init__.py b/elasticsearch_dsl/response/__init__.py
index a711950f..eea1b87f 100644
--- a/elasticsearch_dsl/response/__init__.py
+++ b/elasticsearch_dsl/response/__init__.py
@@ -40,11 +40,18 @@
from ..search_base import Request, SearchBase
from ..update_by_query_base import UpdateByQueryBase
-__all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta"]
+__all__ = [
+ "Response",
+ "AggResponse",
+ "UpdateByQueryResponse",
+ "Hit",
+ "HitMeta",
+ "AggregateResponseType",
+]
class Response(AttrDict[Any], Generic[_R]):
- """An Elasticsearch _search response.
+ """An Elasticsearch search response.
:arg took: (required)
:arg timed_out: (required)
@@ -195,21 +202,100 @@ def search_after(self) -> "SearchBase[_R]":
return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore
+AggregateResponseType = Union[
+ "types.CardinalityAggregate",
+ "types.HdrPercentilesAggregate",
+ "types.HdrPercentileRanksAggregate",
+ "types.TDigestPercentilesAggregate",
+ "types.TDigestPercentileRanksAggregate",
+ "types.PercentilesBucketAggregate",
+ "types.MedianAbsoluteDeviationAggregate",
+ "types.MinAggregate",
+ "types.MaxAggregate",
+ "types.SumAggregate",
+ "types.AvgAggregate",
+ "types.WeightedAvgAggregate",
+ "types.ValueCountAggregate",
+ "types.SimpleValueAggregate",
+ "types.DerivativeAggregate",
+ "types.BucketMetricValueAggregate",
+ "types.StatsAggregate",
+ "types.StatsBucketAggregate",
+ "types.ExtendedStatsAggregate",
+ "types.ExtendedStatsBucketAggregate",
+ "types.GeoBoundsAggregate",
+ "types.GeoCentroidAggregate",
+ "types.HistogramAggregate",
+ "types.DateHistogramAggregate",
+ "types.AutoDateHistogramAggregate",
+ "types.VariableWidthHistogramAggregate",
+ "types.StringTermsAggregate",
+ "types.LongTermsAggregate",
+ "types.DoubleTermsAggregate",
+ "types.UnmappedTermsAggregate",
+ "types.LongRareTermsAggregate",
+ "types.StringRareTermsAggregate",
+ "types.UnmappedRareTermsAggregate",
+ "types.MultiTermsAggregate",
+ "types.MissingAggregate",
+ "types.NestedAggregate",
+ "types.ReverseNestedAggregate",
+ "types.GlobalAggregate",
+ "types.FilterAggregate",
+ "types.ChildrenAggregate",
+ "types.ParentAggregate",
+ "types.SamplerAggregate",
+ "types.UnmappedSamplerAggregate",
+ "types.GeoHashGridAggregate",
+ "types.GeoTileGridAggregate",
+ "types.GeoHexGridAggregate",
+ "types.RangeAggregate",
+ "types.DateRangeAggregate",
+ "types.GeoDistanceAggregate",
+ "types.IpRangeAggregate",
+ "types.IpPrefixAggregate",
+ "types.FiltersAggregate",
+ "types.AdjacencyMatrixAggregate",
+ "types.SignificantLongTermsAggregate",
+ "types.SignificantStringTermsAggregate",
+ "types.UnmappedSignificantTermsAggregate",
+ "types.CompositeAggregate",
+ "types.FrequentItemSetsAggregate",
+ "types.TimeSeriesAggregate",
+ "types.ScriptedMetricAggregate",
+ "types.TopHitsAggregate",
+ "types.InferenceAggregate",
+ "types.StringStatsAggregate",
+ "types.BoxPlotAggregate",
+ "types.TopMetricsAggregate",
+ "types.TTestAggregate",
+ "types.RateAggregate",
+ "types.CumulativeCardinalityAggregate",
+ "types.MatrixStatsAggregate",
+ "types.GeoLineAggregate",
+]
+
+
class AggResponse(AttrDict[Any], Generic[_R]):
+ """An Elasticsearch aggregation response."""
+
_meta: Dict[str, Any]
def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]):
super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs})
super().__init__(data)
- def __getitem__(self, attr_name: str) -> Any:
+ def __getitem__(self, attr_name: str) -> AggregateResponseType:
if attr_name in self._meta["aggs"]:
# don't do self._meta['aggs'][attr_name] to avoid copying
agg = self._meta["aggs"].aggs[attr_name]
- return agg.result(self._meta["search"], self._d_[attr_name])
- return super().__getitem__(attr_name)
+ return cast(
+ AggregateResponseType,
+ agg.result(self._meta["search"], self._d_[attr_name]),
+ )
+ return super().__getitem__(attr_name) # type: ignore
- def __iter__(self) -> Iterator["Agg"]: # type: ignore[override]
+ def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override]
for name in self._meta["aggs"]:
yield self[name]
diff --git a/elasticsearch_dsl/types.py b/elasticsearch_dsl/types.py
index 64918ce3..a08c4c7f 100644
--- a/elasticsearch_dsl/types.py
+++ b/elasticsearch_dsl/types.py
@@ -26,10 +26,6 @@
PipeSeparatedFlags = str
-class Aggregation(AttrDict[Any]):
- pass
-
-
class AggregationRange(AttrDict[Any]):
"""
:arg from: Start of the range (inclusive).
@@ -147,27 +143,6 @@ def __init__(
super().__init__(kwargs)
-class BucketPathAggregation(Aggregation):
- """
- :arg buckets_path: Path to the buckets that contain one set of values
- to correlate.
- """
-
- buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
-
- def __init__(
- self,
- *,
- buckets_path: Union[
- str, Sequence[str], Mapping[str, str], DefaultType
- ] = DEFAULT,
- **kwargs: Any,
- ):
- if buckets_path is not DEFAULT:
- kwargs["buckets_path"] = buckets_path
- super().__init__(**kwargs)
-
-
class ChiSquareHeuristic(AttrDict[Any]):
"""
:arg background_is_superset: (required) Set to `false` if you defined
@@ -241,34 +216,7 @@ def __init__(
super().__init__(kwargs)
-class QueryBase(AttrDict[Any]):
- """
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
- """
-
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
-
- def __init__(
- self,
- *,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(kwargs)
-
-
-class CommonTermsQuery(QueryBase):
+class CommonTermsQuery(AttrDict[Any]):
"""
:arg query: (required)
:arg analyzer:
@@ -322,7 +270,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class CoordsGeoBounds(AttrDict[Any]):
@@ -415,7 +363,12 @@ def __init__(
class EmptyObject(AttrDict[Any]):
- pass
+ """
+ For empty Class assignments
+ """
+
+ def __init__(self, **kwargs: Any):
+ super().__init__(kwargs)
class EwmaModelSettings(AttrDict[Any]):
@@ -450,6 +403,9 @@ def __init__(self, *, max: Any = DEFAULT, min: Any = DEFAULT, **kwargs: Any):
class FieldAndFormat(AttrDict[Any]):
"""
+ A reference to a field with formatting instructions on how to return
+ the value
+
:arg field: (required) Wildcard pattern. The request returns values
for field names matching this pattern.
:arg format: Format in which the values are returned.
@@ -800,7 +756,7 @@ def __init__(
super().__init__(kwargs)
-class FuzzyQuery(QueryBase):
+class FuzzyQuery(AttrDict[Any]):
"""
:arg value: (required) Term you wish to find in the provided field.
:arg max_expansions: Maximum number of variations created. Defaults to
@@ -859,7 +815,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class GeoDistanceSort(AttrDict[Any]):
@@ -1084,171 +1040,7 @@ def __init__(
super().__init__(kwargs)
-class HighlightBase(AttrDict[Any]):
- """
- :arg type:
- :arg boundary_chars: A string that contains each boundary character.
- Defaults to `.,!? \t\n` if omitted.
- :arg boundary_max_scan: How far to scan for boundary characters.
- Defaults to `20` if omitted.
- :arg boundary_scanner: Specifies how to break the highlighted
- fragments: chars, sentence, or word. Only valid for the unified
- and fvh highlighters. Defaults to `sentence` for the `unified`
- highlighter. Defaults to `chars` for the `fvh` highlighter.
- :arg boundary_scanner_locale: Controls which locale is used to search
- for sentence and word boundaries. This parameter takes a form of a
- language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
- Defaults to `Locale.ROOT` if omitted.
- :arg force_source:
- :arg fragmenter: Specifies how text should be broken up in highlight
- snippets: `simple` or `span`. Only valid for the `plain`
- highlighter. Defaults to `span` if omitted.
- :arg fragment_size: The size of the highlighted fragment in
- characters. Defaults to `100` if omitted.
- :arg highlight_filter:
- :arg highlight_query: Highlight matches for a query other than the
- search query. This is especially useful if you use a rescore query
- because those are not taken into account by highlighting by
- default.
- :arg max_fragment_length:
- :arg max_analyzed_offset: If set to a non-negative value, highlighting
- stops at this defined maximum limit. The rest of the text is not
- processed, thus not highlighted and no error is returned The
- `max_analyzed_offset` query setting does not override the
- `index.highlight.max_analyzed_offset` setting, which prevails when
- it’s set to lower value than the query setting.
- :arg no_match_size: The amount of text you want to return from the
- beginning of the field if there are no matching fragments to
- highlight.
- :arg number_of_fragments: The maximum number of fragments to return.
- If the number of fragments is set to `0`, no fragments are
- returned. Instead, the entire field contents are highlighted and
- returned. This can be handy when you need to highlight short texts
- such as a title or address, but fragmentation is not required. If
- `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
- to `5` if omitted.
- :arg options:
- :arg order: Sorts highlighted fragments by score when set to `score`.
- By default, fragments will be output in the order they appear in
- the field (order: `none`). Setting this option to `score` will
- output the most relevant fragments first. Each highlighter applies
- its own logic to compute relevancy scores. Defaults to `none` if
- omitted.
- :arg phrase_limit: Controls the number of matching phrases in a
- document that are considered. Prevents the `fvh` highlighter from
- analyzing too many phrases and consuming too much memory. When
- using `matched_fields`, `phrase_limit` phrases per matched field
- are considered. Raising the limit increases query time and
- consumes more memory. Only supported by the `fvh` highlighter.
- Defaults to `256` if omitted.
- :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg require_field_match: By default, only fields that contains a
- query match are highlighted. Set to `false` to highlight all
- fields. Defaults to `True` if omitted.
- :arg tags_schema: Set to `styled` to use the built-in tag schema.
- """
-
- type: Union[Literal["plain", "fvh", "unified"], DefaultType]
- boundary_chars: Union[str, DefaultType]
- boundary_max_scan: Union[int, DefaultType]
- boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
- boundary_scanner_locale: Union[str, DefaultType]
- force_source: Union[bool, DefaultType]
- fragmenter: Union[Literal["simple", "span"], DefaultType]
- fragment_size: Union[int, DefaultType]
- highlight_filter: Union[bool, DefaultType]
- highlight_query: Union[Query, DefaultType]
- max_fragment_length: Union[int, DefaultType]
- max_analyzed_offset: Union[int, DefaultType]
- no_match_size: Union[int, DefaultType]
- number_of_fragments: Union[int, DefaultType]
- options: Union[Mapping[str, Any], DefaultType]
- order: Union[Literal["score"], DefaultType]
- phrase_limit: Union[int, DefaultType]
- post_tags: Union[Sequence[str], DefaultType]
- pre_tags: Union[Sequence[str], DefaultType]
- require_field_match: Union[bool, DefaultType]
- tags_schema: Union[Literal["styled"], DefaultType]
-
- def __init__(
- self,
- *,
- type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
- boundary_chars: Union[str, DefaultType] = DEFAULT,
- boundary_max_scan: Union[int, DefaultType] = DEFAULT,
- boundary_scanner: Union[
- Literal["chars", "sentence", "word"], DefaultType
- ] = DEFAULT,
- boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
- force_source: Union[bool, DefaultType] = DEFAULT,
- fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
- fragment_size: Union[int, DefaultType] = DEFAULT,
- highlight_filter: Union[bool, DefaultType] = DEFAULT,
- highlight_query: Union[Query, DefaultType] = DEFAULT,
- max_fragment_length: Union[int, DefaultType] = DEFAULT,
- max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
- no_match_size: Union[int, DefaultType] = DEFAULT,
- number_of_fragments: Union[int, DefaultType] = DEFAULT,
- options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["score"], DefaultType] = DEFAULT,
- phrase_limit: Union[int, DefaultType] = DEFAULT,
- post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- require_field_match: Union[bool, DefaultType] = DEFAULT,
- tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if type is not DEFAULT:
- kwargs["type"] = type
- if boundary_chars is not DEFAULT:
- kwargs["boundary_chars"] = boundary_chars
- if boundary_max_scan is not DEFAULT:
- kwargs["boundary_max_scan"] = boundary_max_scan
- if boundary_scanner is not DEFAULT:
- kwargs["boundary_scanner"] = boundary_scanner
- if boundary_scanner_locale is not DEFAULT:
- kwargs["boundary_scanner_locale"] = boundary_scanner_locale
- if force_source is not DEFAULT:
- kwargs["force_source"] = force_source
- if fragmenter is not DEFAULT:
- kwargs["fragmenter"] = fragmenter
- if fragment_size is not DEFAULT:
- kwargs["fragment_size"] = fragment_size
- if highlight_filter is not DEFAULT:
- kwargs["highlight_filter"] = highlight_filter
- if highlight_query is not DEFAULT:
- kwargs["highlight_query"] = highlight_query
- if max_fragment_length is not DEFAULT:
- kwargs["max_fragment_length"] = max_fragment_length
- if max_analyzed_offset is not DEFAULT:
- kwargs["max_analyzed_offset"] = max_analyzed_offset
- if no_match_size is not DEFAULT:
- kwargs["no_match_size"] = no_match_size
- if number_of_fragments is not DEFAULT:
- kwargs["number_of_fragments"] = number_of_fragments
- if options is not DEFAULT:
- kwargs["options"] = options
- if order is not DEFAULT:
- kwargs["order"] = order
- if phrase_limit is not DEFAULT:
- kwargs["phrase_limit"] = phrase_limit
- if post_tags is not DEFAULT:
- kwargs["post_tags"] = post_tags
- if pre_tags is not DEFAULT:
- kwargs["pre_tags"] = pre_tags
- if require_field_match is not DEFAULT:
- kwargs["require_field_match"] = require_field_match
- if tags_schema is not DEFAULT:
- kwargs["tags_schema"] = tags_schema
- super().__init__(kwargs)
-
-
-class Highlight(HighlightBase):
+class Highlight(AttrDict[Any]):
"""
:arg fields: (required)
:arg encoder:
@@ -1427,10 +1219,10 @@ def __init__(
kwargs["require_field_match"] = require_field_match
if tags_schema is not DEFAULT:
kwargs["tags_schema"] = tags_schema
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class HighlightField(HighlightBase):
+class HighlightField(AttrDict[Any]):
"""
:arg fragment_offset:
:arg matched_fields:
@@ -1614,7 +1406,7 @@ def __init__(
kwargs["require_field_match"] = require_field_match
if tags_schema is not DEFAULT:
kwargs["tags_schema"] = tags_schema
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class HoltLinearModelSettings(AttrDict[Any]):
@@ -2164,7 +1956,7 @@ def __init__(
super().__init__(kwargs)
-class IntervalsQuery(QueryBase):
+class IntervalsQuery(AttrDict[Any]):
"""
:arg all_of: Returns matches that span a combination of other rules.
:arg any_of: Returns intervals produced by any of its sub-rules.
@@ -2220,7 +2012,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class IntervalsWildcard(AttrDict[Any]):
@@ -2365,7 +2157,7 @@ def __init__(
super().__init__(kwargs)
-class MatchBoolPrefixQuery(QueryBase):
+class MatchBoolPrefixQuery(AttrDict[Any]):
"""
:arg query: (required) Terms you wish to find in the provided field.
The last term is used in a prefix query.
@@ -2451,10 +2243,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class MatchPhrasePrefixQuery(QueryBase):
+class MatchPhrasePrefixQuery(AttrDict[Any]):
"""
:arg query: (required) Text you wish to find in the provided field.
:arg analyzer: Analyzer used to convert text in the query value into
@@ -2509,10 +2301,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class MatchPhraseQuery(QueryBase):
+class MatchPhraseQuery(AttrDict[Any]):
"""
:arg query: (required) Query terms that are analyzed and turned into a
phrase query.
@@ -2561,10 +2353,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class MatchQuery(QueryBase):
+class MatchQuery(AttrDict[Any]):
"""
:arg query: (required) Text, number, boolean value or date you wish to
find in the provided field.
@@ -2668,94 +2460,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
-
-
-class PipelineAggregationBase(BucketPathAggregation):
- """
- :arg format: `DecimalFormat` pattern for the output value. If
- specified, the formatted value is returned in the aggregation’s
- `value_as_string` property.
- :arg gap_policy: Policy to apply when gaps are found in the data.
- Defaults to `skip` if omitted.
- :arg buckets_path: Path to the buckets that contain one set of values
- to correlate.
- """
-
- format: Union[str, DefaultType]
- gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
- buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
-
- def __init__(
- self,
- *,
- format: Union[str, DefaultType] = DEFAULT,
- gap_policy: Union[
- Literal["skip", "insert_zeros", "keep_values"], DefaultType
- ] = DEFAULT,
- buckets_path: Union[
- str, Sequence[str], Mapping[str, str], DefaultType
- ] = DEFAULT,
- **kwargs: Any,
- ):
- if format is not DEFAULT:
- kwargs["format"] = format
- if gap_policy is not DEFAULT:
- kwargs["gap_policy"] = gap_policy
- if buckets_path is not DEFAULT:
- kwargs["buckets_path"] = buckets_path
- super().__init__(**kwargs)
-
-
-class MovingAverageAggregationBase(PipelineAggregationBase):
- """
- :arg minimize:
- :arg predict:
- :arg window:
- :arg format: `DecimalFormat` pattern for the output value. If
- specified, the formatted value is returned in the aggregation’s
- `value_as_string` property.
- :arg gap_policy: Policy to apply when gaps are found in the data.
- Defaults to `skip` if omitted.
- :arg buckets_path: Path to the buckets that contain one set of values
- to correlate.
- """
-
- minimize: Union[bool, DefaultType]
- predict: Union[int, DefaultType]
- window: Union[int, DefaultType]
- format: Union[str, DefaultType]
- gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
- buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
-
- def __init__(
- self,
- *,
- minimize: Union[bool, DefaultType] = DEFAULT,
- predict: Union[int, DefaultType] = DEFAULT,
- window: Union[int, DefaultType] = DEFAULT,
- format: Union[str, DefaultType] = DEFAULT,
- gap_policy: Union[
- Literal["skip", "insert_zeros", "keep_values"], DefaultType
- ] = DEFAULT,
- buckets_path: Union[
- str, Sequence[str], Mapping[str, str], DefaultType
- ] = DEFAULT,
- **kwargs: Any,
- ):
- if minimize is not DEFAULT:
- kwargs["minimize"] = minimize
- if predict is not DEFAULT:
- kwargs["predict"] = predict
- if window is not DEFAULT:
- kwargs["window"] = window
- if format is not DEFAULT:
- kwargs["format"] = format
- if gap_policy is not DEFAULT:
- kwargs["gap_policy"] = gap_policy
- if buckets_path is not DEFAULT:
- kwargs["buckets_path"] = buckets_path
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class MultiTermLookup(AttrDict[Any]):
@@ -2869,7 +2574,7 @@ def __init__(
super().__init__(kwargs)
-class PrefixQuery(QueryBase):
+class PrefixQuery(AttrDict[Any]):
"""
:arg value: (required) Beginning characters of terms you wish to find
in the provided field.
@@ -2912,7 +2617,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class QueryVectorBuilder(AttrDict[Any]):
@@ -2933,15 +2638,11 @@ def __init__(
super().__init__(kwargs)
-class RankFeatureFunction(AttrDict[Any]):
- pass
-
-
-class RankFeatureFunctionLinear(RankFeatureFunction):
+class RankFeatureFunctionLinear(AttrDict[Any]):
pass
-class RankFeatureFunctionLogarithm(RankFeatureFunction):
+class RankFeatureFunctionLogarithm(AttrDict[Any]):
"""
:arg scaling_factor: (required) Configurable scaling factor.
"""
@@ -2953,10 +2654,10 @@ def __init__(
):
if scaling_factor is not DEFAULT:
kwargs["scaling_factor"] = scaling_factor
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class RankFeatureFunctionSaturation(RankFeatureFunction):
+class RankFeatureFunctionSaturation(AttrDict[Any]):
"""
:arg pivot: Configurable pivot value so that the result will be less
than 0.5.
@@ -2967,10 +2668,10 @@ class RankFeatureFunctionSaturation(RankFeatureFunction):
def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any):
if pivot is not DEFAULT:
kwargs["pivot"] = pivot
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class RankFeatureFunctionSigmoid(RankFeatureFunction):
+class RankFeatureFunctionSigmoid(AttrDict[Any]):
"""
:arg pivot: (required) Configurable pivot value so that the result
will be less than 0.5.
@@ -2991,10 +2692,10 @@ def __init__(
kwargs["pivot"] = pivot
if exponent is not DEFAULT:
kwargs["exponent"] = exponent
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class RegexpQuery(QueryBase):
+class RegexpQuery(AttrDict[Any]):
"""
:arg value: (required) Regular expression for terms you wish to find
in the provided field.
@@ -3048,7 +2749,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class RegressionInferenceOptions(AttrDict[Any]):
@@ -3334,7 +3035,7 @@ def __init__(
super().__init__(kwargs)
-class SpanContainingQuery(QueryBase):
+class SpanContainingQuery(AttrDict[Any]):
"""
:arg big: (required) Can be any span query. Matching spans from `big`
that contain matches from `little` are returned.
@@ -3370,10 +3071,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanFieldMaskingQuery(QueryBase):
+class SpanFieldMaskingQuery(AttrDict[Any]):
"""
:arg field: (required)
:arg query: (required)
@@ -3407,10 +3108,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanFirstQuery(QueryBase):
+class SpanFirstQuery(AttrDict[Any]):
"""
:arg end: (required) Controls the maximum end position permitted in a
match.
@@ -3445,10 +3146,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanMultiTermQuery(QueryBase):
+class SpanMultiTermQuery(AttrDict[Any]):
"""
:arg match: (required) Should be a multi term query (one of
`wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query).
@@ -3478,10 +3179,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanNearQuery(QueryBase):
+class SpanNearQuery(AttrDict[Any]):
"""
:arg clauses: (required) Array of one or more other span type queries.
:arg in_order: Controls whether matches are required to be in-order.
@@ -3523,10 +3224,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanNotQuery(QueryBase):
+class SpanNotQuery(AttrDict[Any]):
"""
:arg exclude: (required) Span query whose matches must not overlap
those returned.
@@ -3580,10 +3281,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanOrQuery(QueryBase):
+class SpanOrQuery(AttrDict[Any]):
"""
:arg clauses: (required) Array of one or more other span type queries.
:arg boost: Floating point number used to decrease or increase the
@@ -3614,7 +3315,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class SpanQuery(AttrDict[Any]):
@@ -3705,7 +3406,7 @@ def __init__(
super().__init__(kwargs)
-class SpanTermQuery(QueryBase):
+class SpanTermQuery(AttrDict[Any]):
"""
:arg value: (required)
:arg boost: Floating point number used to decrease or increase the
@@ -3734,10 +3435,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class SpanWithinQuery(QueryBase):
+class SpanWithinQuery(AttrDict[Any]):
"""
:arg big: (required) Can be any span query. Matching spans from
`little` that are enclosed within `big` are returned.
@@ -3773,7 +3474,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class TDigest(AttrDict[Any]):
@@ -3793,7 +3494,7 @@ def __init__(
super().__init__(kwargs)
-class TermQuery(QueryBase):
+class TermQuery(AttrDict[Any]):
"""
:arg value: (required) Term you wish to find in the provided field.
:arg case_insensitive: Allows ASCII case insensitive matching of the
@@ -3830,7 +3531,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class TermsLookup(AttrDict[Any]):
@@ -3889,7 +3590,7 @@ def __init__(
super().__init__(kwargs)
-class TermsSetQuery(QueryBase):
+class TermsSetQuery(AttrDict[Any]):
"""
:arg terms: (required) Array of terms you wish to find in the provided
field.
@@ -3941,7 +3642,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class TestPopulation(AttrDict[Any]):
@@ -3996,7 +3697,7 @@ def __init__(
super().__init__(kwargs)
-class TextExpansionQuery(QueryBase):
+class TextExpansionQuery(AttrDict[Any]):
"""
:arg model_id: (required) The text expansion NLP model to use
:arg model_text: (required) The query text
@@ -4037,7 +3738,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class TokenPruningConfig(AttrDict[Any]):
@@ -4222,7 +3923,7 @@ def __init__(
super().__init__(kwargs)
-class WeightedTokensQuery(QueryBase):
+class WeightedTokensQuery(AttrDict[Any]):
"""
:arg tokens: (required) The tokens representing this query
:arg pruning_config: Token pruning configurations
@@ -4258,10 +3959,10 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class WildcardQuery(QueryBase):
+class WildcardQuery(AttrDict[Any]):
"""
:arg case_insensitive: Allows case insensitive matching of the pattern
with the indexed field values when set to true. Default is false
@@ -4310,7 +4011,7 @@ def __init__(
kwargs["boost"] = boost
if _name is not DEFAULT:
kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
class WktGeoBounds(AttrDict[Any]):
@@ -4326,6 +4027,30 @@ def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
super().__init__(kwargs)
+class AdjacencyMatrixAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["AdjacencyMatrixBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "AdjacencyMatrixBucket"]:
+ return self.buckets # type: ignore
+
+
+class AdjacencyMatrixBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: str
+ doc_count: int
+
+
class AggregationBreakdown(AttrDict[Any]):
"""
:arg build_aggregation: (required)
@@ -4458,6 +4183,100 @@ class AggregationProfileDelegateDebugFilter(AttrDict[Any]):
segments_counted_in_constant_time: int
+class ArrayPercentilesItem(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg value: (required)
+ :arg value_as_string:
+ """
+
+ key: str
+ value: Union[float, None]
+ value_as_string: str
+
+
+class AutoDateHistogramAggregate(AttrDict[Any]):
+ """
+ :arg interval: (required)
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ interval: str
+ buckets: Sequence["DateHistogramBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]:
+ return self.buckets # type: ignore
+
+
+class AvgAggregate(AttrDict[Any]):
+ """
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class BoxPlotAggregate(AttrDict[Any]):
+ """
+ :arg min: (required)
+ :arg max: (required)
+ :arg q1: (required)
+ :arg q2: (required)
+ :arg q3: (required)
+ :arg lower: (required)
+ :arg upper: (required)
+ :arg min_as_string:
+ :arg max_as_string:
+ :arg q1_as_string:
+ :arg q2_as_string:
+ :arg q3_as_string:
+ :arg lower_as_string:
+ :arg upper_as_string:
+ :arg meta:
+ """
+
+ min: float
+ max: float
+ q1: float
+ q2: float
+ q3: float
+ lower: float
+ upper: float
+ min_as_string: str
+ max_as_string: str
+ q1_as_string: str
+ q2_as_string: str
+ q3_as_string: str
+ lower_as_string: str
+ upper_as_string: str
+ meta: Mapping[str, Any]
+
+
+class BucketMetricValueAggregate(AttrDict[Any]):
+ """
+ :arg keys: (required)
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ keys: Sequence[str] # type: ignore[assignment]
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
class BulkIndexByScrollFailure(AttrDict[Any]):
"""
:arg cause: (required)
@@ -4474,6 +4293,26 @@ class BulkIndexByScrollFailure(AttrDict[Any]):
type: str
+class CardinalityAggregate(AttrDict[Any]):
+ """
+ :arg value: (required)
+ :arg meta:
+ """
+
+ value: int
+ meta: Mapping[str, Any]
+
+
+class ChildrenAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
class ClusterDetails(AttrDict[Any]):
"""
:arg status: (required)
@@ -4526,19 +4365,7 @@ class Collector(AttrDict[Any]):
children: Sequence["Collector"]
-class SuggestBase(AttrDict[Any]):
- """
- :arg length: (required)
- :arg offset: (required)
- :arg text: (required)
- """
-
- length: int
- offset: int
- text: str
-
-
-class CompletionSuggest(SuggestBase):
+class CompletionSuggest(AttrDict[Any]):
"""
:arg options: (required)
:arg length: (required)
@@ -4585,6 +4412,108 @@ class CompletionSuggestOption(AttrDict[Any]):
score: float
+class CompositeAggregate(AttrDict[Any]):
+ """
+ :arg after_key:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ after_key: Mapping[str, Union[int, float, str, bool, None, Any]]
+ buckets: Sequence["CompositeBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "CompositeBucket"]:
+ return self.buckets # type: ignore
+
+
+class CompositeBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: Mapping[str, Union[int, float, str, bool, None, Any]]
+ doc_count: int
+
+
+class CumulativeCardinalityAggregate(AttrDict[Any]):
+ """
+ Result of the `cumulative_cardinality` aggregation
+
+ :arg value: (required)
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: int
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class DateHistogramAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["DateHistogramBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]:
+ return self.buckets # type: ignore
+
+
+class DateHistogramBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ """
+
+ key: Any
+ doc_count: int
+ key_as_string: str
+
+
+class DateRangeAggregate(AttrDict[Any]):
+ """
+ Result of a `date_range` aggregation. Same format as a for a `range`
+ aggregation: `from` and `to` in `buckets` are milliseconds since the
+ Epoch, represented as a floating point number.
+
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["RangeBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]:
+ return self.buckets # type: ignore
+
+
+class DerivativeAggregate(AttrDict[Any]):
+ """
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg normalized_value:
+ :arg normalized_value_as_string:
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ normalized_value: float
+ normalized_value_as_string: str
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
class DfsKnnProfile(AttrDict[Any]):
"""
:arg query: (required)
@@ -4651,8 +4580,47 @@ class DfsStatisticsProfile(AttrDict[Any]):
children: Sequence["DfsStatisticsProfile"]
+class DoubleTermsAggregate(AttrDict[Any]):
+ """
+ Result of a `terms` aggregation when the field is some kind of decimal
+ number like a float, double, or distance.
+
+ :arg doc_count_error_upper_bound:
+ :arg sum_other_doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ doc_count_error_upper_bound: int
+ sum_other_doc_count: int
+ buckets: Sequence["DoubleTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "DoubleTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class DoubleTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ :arg doc_count_error_upper_bound:
+ """
+
+ key: float
+ doc_count: int
+ key_as_string: str
+ doc_count_error_upper_bound: int
+
+
class ErrorCause(AttrDict[Any]):
"""
+ Cause and details about a request failure. This class defines the
+ properties common to all error types. Additional details are also
+ provided, that depend on the error type.
+
:arg type: (required) The type of error
:arg reason: A human-readable explanation of the error, in english
:arg stack_trace: The server stack trace. Present only if the
@@ -4670,6 +4638,138 @@ class ErrorCause(AttrDict[Any]):
suppressed: Sequence["ErrorCause"]
+class Explanation(AttrDict[Any]):
+ """
+ :arg description: (required)
+ :arg details: (required)
+ :arg value: (required)
+ """
+
+ description: str
+ details: Sequence["ExplanationDetail"]
+ value: float
+
+
+class ExplanationDetail(AttrDict[Any]):
+ """
+ :arg description: (required)
+ :arg value: (required)
+ :arg details:
+ """
+
+ description: str
+ value: float
+ details: Sequence["ExplanationDetail"]
+
+
+class ExtendedStatsAggregate(AttrDict[Any]):
+ """
+ :arg sum_of_squares: (required)
+ :arg variance: (required)
+ :arg variance_population: (required)
+ :arg variance_sampling: (required)
+ :arg std_deviation: (required)
+ :arg std_deviation_population: (required)
+ :arg std_deviation_sampling: (required)
+ :arg count: (required)
+ :arg min: (required)
+ :arg max: (required)
+ :arg avg: (required)
+ :arg sum: (required)
+ :arg std_deviation_bounds:
+ :arg sum_of_squares_as_string:
+ :arg variance_as_string:
+ :arg variance_population_as_string:
+ :arg variance_sampling_as_string:
+ :arg std_deviation_as_string:
+ :arg std_deviation_bounds_as_string:
+ :arg min_as_string:
+ :arg max_as_string:
+ :arg avg_as_string:
+ :arg sum_as_string:
+ :arg meta:
+ """
+
+ sum_of_squares: Union[float, None]
+ variance: Union[float, None]
+ variance_population: Union[float, None]
+ variance_sampling: Union[float, None]
+ std_deviation: Union[float, None]
+ std_deviation_population: Union[float, None]
+ std_deviation_sampling: Union[float, None]
+ count: int
+ min: Union[float, None]
+ max: Union[float, None]
+ avg: Union[float, None]
+ sum: float
+ std_deviation_bounds: "StandardDeviationBounds"
+ sum_of_squares_as_string: str
+ variance_as_string: str
+ variance_population_as_string: str
+ variance_sampling_as_string: str
+ std_deviation_as_string: str
+ std_deviation_bounds_as_string: "StandardDeviationBoundsAsString"
+ min_as_string: str
+ max_as_string: str
+ avg_as_string: str
+ sum_as_string: str
+ meta: Mapping[str, Any]
+
+
+class ExtendedStatsBucketAggregate(AttrDict[Any]):
+ """
+ :arg sum_of_squares: (required)
+ :arg variance: (required)
+ :arg variance_population: (required)
+ :arg variance_sampling: (required)
+ :arg std_deviation: (required)
+ :arg std_deviation_population: (required)
+ :arg std_deviation_sampling: (required)
+ :arg count: (required)
+ :arg min: (required)
+ :arg max: (required)
+ :arg avg: (required)
+ :arg sum: (required)
+ :arg std_deviation_bounds:
+ :arg sum_of_squares_as_string:
+ :arg variance_as_string:
+ :arg variance_population_as_string:
+ :arg variance_sampling_as_string:
+ :arg std_deviation_as_string:
+ :arg std_deviation_bounds_as_string:
+ :arg min_as_string:
+ :arg max_as_string:
+ :arg avg_as_string:
+ :arg sum_as_string:
+ :arg meta:
+ """
+
+ sum_of_squares: Union[float, None]
+ variance: Union[float, None]
+ variance_population: Union[float, None]
+ variance_sampling: Union[float, None]
+ std_deviation: Union[float, None]
+ std_deviation_population: Union[float, None]
+ std_deviation_sampling: Union[float, None]
+ count: int
+ min: Union[float, None]
+ max: Union[float, None]
+ avg: Union[float, None]
+ sum: float
+ std_deviation_bounds: "StandardDeviationBounds"
+ sum_of_squares_as_string: str
+ variance_as_string: str
+ variance_population_as_string: str
+ variance_sampling_as_string: str
+ std_deviation_as_string: str
+ std_deviation_bounds_as_string: "StandardDeviationBoundsAsString"
+ min_as_string: str
+ max_as_string: str
+ avg_as_string: str
+ sum_as_string: str
+ meta: Mapping[str, Any]
+
+
class FetchProfile(AttrDict[Any]):
"""
:arg type: (required)
@@ -4720,146 +4820,803 @@ class FetchProfileDebug(AttrDict[Any]):
fast_path: int
-class KnnCollectorResult(AttrDict[Any]):
+class FilterAggregate(AttrDict[Any]):
"""
- :arg name: (required)
- :arg reason: (required)
- :arg time_in_nanos: (required)
- :arg time:
- :arg children:
+ :arg doc_count: (required)
+ :arg meta:
"""
- name: str
- reason: str
- time_in_nanos: Any
- time: Any
- children: Sequence["KnnCollectorResult"]
+ doc_count: int
+ meta: Mapping[str, Any]
-class KnnQueryProfileBreakdown(AttrDict[Any]):
+class FiltersAggregate(AttrDict[Any]):
"""
- :arg advance: (required)
- :arg advance_count: (required)
- :arg build_scorer: (required)
- :arg build_scorer_count: (required)
- :arg compute_max_score: (required)
- :arg compute_max_score_count: (required)
- :arg count_weight: (required)
- :arg count_weight_count: (required)
- :arg create_weight: (required)
- :arg create_weight_count: (required)
- :arg match: (required)
- :arg match_count: (required)
- :arg next_doc: (required)
- :arg next_doc_count: (required)
- :arg score: (required)
- :arg score_count: (required)
- :arg set_min_competitive_score: (required)
- :arg set_min_competitive_score_count: (required)
- :arg shallow_advance: (required)
- :arg shallow_advance_count: (required)
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
"""
- advance: int
- advance_count: int
- build_scorer: int
- build_scorer_count: int
- compute_max_score: int
- compute_max_score_count: int
- count_weight: int
- count_weight_count: int
- create_weight: int
- create_weight_count: int
- match: int
- match_count: int
- next_doc: int
- next_doc_count: int
- score: int
- score_count: int
- set_min_competitive_score: int
- set_min_competitive_score_count: int
- shallow_advance: int
- shallow_advance_count: int
+ buckets: Sequence["FiltersBucket"]
+ meta: Mapping[str, Any]
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "FiltersBucket"]:
+ return self.buckets # type: ignore
-class KnnQueryProfileResult(AttrDict[Any]):
+
+class FiltersBucket(AttrDict[Any]):
"""
- :arg type: (required)
- :arg description: (required)
- :arg time_in_nanos: (required)
- :arg breakdown: (required)
- :arg time:
- :arg debug:
- :arg children:
+ :arg doc_count: (required)
"""
- type: str
- description: str
- time_in_nanos: Any
- breakdown: "KnnQueryProfileBreakdown"
- time: Any
- debug: Mapping[str, Any]
- children: Sequence["KnnQueryProfileResult"]
+ doc_count: int
-class PhraseSuggest(SuggestBase):
+class FrequentItemSetsAggregate(AttrDict[Any]):
"""
- :arg options: (required)
- :arg length: (required)
- :arg offset: (required)
- :arg text: (required)
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
"""
- options: Sequence["PhraseSuggestOption"]
- length: int
- offset: int
- text: str
+ buckets: Sequence["FrequentItemSetsBucket"]
+ meta: Mapping[str, Any]
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "FrequentItemSetsBucket"]:
+ return self.buckets # type: ignore
-class PhraseSuggestOption(AttrDict[Any]):
+
+class FrequentItemSetsBucket(AttrDict[Any]):
"""
- :arg text: (required)
- :arg score: (required)
- :arg highlighted:
- :arg collate_match:
+ :arg key: (required)
+ :arg support: (required)
+ :arg doc_count: (required)
"""
- text: str
- score: float
- highlighted: str
- collate_match: bool
+ key: Mapping[str, Sequence[str]]
+ support: float
+ doc_count: int
-class Profile(AttrDict[Any]):
+class GeoBoundsAggregate(AttrDict[Any]):
"""
- :arg shards: (required)
+ :arg bounds:
+ :arg meta:
"""
- shards: Sequence["ShardProfile"]
+ bounds: Union[
+ "CoordsGeoBounds",
+ "TopLeftBottomRightGeoBounds",
+ "TopRightBottomLeftGeoBounds",
+ "WktGeoBounds",
+ ]
+ meta: Mapping[str, Any]
-class QueryBreakdown(AttrDict[Any]):
+class GeoCentroidAggregate(AttrDict[Any]):
"""
- :arg advance: (required)
- :arg advance_count: (required)
- :arg build_scorer: (required)
- :arg build_scorer_count: (required)
- :arg create_weight: (required)
- :arg create_weight_count: (required)
- :arg match: (required)
- :arg match_count: (required)
- :arg shallow_advance: (required)
- :arg shallow_advance_count: (required)
- :arg next_doc: (required)
- :arg next_doc_count: (required)
- :arg score: (required)
- :arg score_count: (required)
- :arg compute_max_score: (required)
- :arg compute_max_score_count: (required)
- :arg count_weight: (required)
- :arg count_weight_count: (required)
- :arg set_min_competitive_score: (required)
- :arg set_min_competitive_score_count: (required)
+ :arg count: (required)
+ :arg location:
+ :arg meta:
+ """
+
+ count: int
+ location: Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+ meta: Mapping[str, Any]
+
+
+class GeoDistanceAggregate(AttrDict[Any]):
+ """
+ Result of a `geo_distance` aggregation. The unit for `from` and `to`
+ is meters by default.
+
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["RangeBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]:
+ return self.buckets # type: ignore
+
+
+class GeoHashGridAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["GeoHashGridBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "GeoHashGridBucket"]:
+ return self.buckets # type: ignore
+
+
+class GeoHashGridBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: str
+ doc_count: int
+
+
+class GeoHexGridAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["GeoHexGridBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "GeoHexGridBucket"]:
+ return self.buckets # type: ignore
+
+
+class GeoHexGridBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: str
+ doc_count: int
+
+
+class GeoLine(AttrDict[Any]):
+ """
+ A GeoJson GeoLine.
+
+ :arg type: (required) Always `"LineString"`
+ :arg coordinates: (required) Array of `[lon, lat]` coordinates
+ """
+
+ type: str
+ coordinates: Sequence[Sequence[float]]
+
+
+class GeoLineAggregate(AttrDict[Any]):
+ """
+ :arg type: (required)
+ :arg geometry: (required)
+ :arg properties: (required)
+ :arg meta:
+ """
+
+ type: str
+ geometry: "GeoLine"
+ properties: Any
+ meta: Mapping[str, Any]
+
+
+class GeoTileGridAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["GeoTileGridBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "GeoTileGridBucket"]:
+ return self.buckets # type: ignore
+
+
+class GeoTileGridBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: str
+ doc_count: int
+
+
+class GlobalAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class HdrPercentileRanksAggregate(AttrDict[Any]):
+ """
+ :arg values: (required)
+ :arg meta:
+ """
+
+ values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ meta: Mapping[str, Any]
+
+
+class HdrPercentilesAggregate(AttrDict[Any]):
+ """
+ :arg values: (required)
+ :arg meta:
+ """
+
+ values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ meta: Mapping[str, Any]
+
+
+class HistogramAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["HistogramBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "HistogramBucket"]:
+ return self.buckets # type: ignore
+
+
+class HistogramBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ """
+
+ key: float
+ doc_count: int
+ key_as_string: str
+
+
+class Hit(AttrDict[Any]):
+ """
+ :arg index: (required)
+ :arg id:
+ :arg score:
+ :arg explanation:
+ :arg fields:
+ :arg highlight:
+ :arg inner_hits:
+ :arg matched_queries:
+ :arg nested:
+ :arg ignored:
+ :arg ignored_field_values:
+ :arg shard:
+ :arg node:
+ :arg routing:
+ :arg source:
+ :arg rank:
+ :arg seq_no:
+ :arg primary_term:
+ :arg version:
+ :arg sort:
+ """
+
+ index: str
+ id: str
+ score: Union[float, None]
+ explanation: "Explanation"
+ fields: Mapping[str, Any]
+ highlight: Mapping[str, Sequence[str]]
+ inner_hits: Mapping[str, "InnerHitsResult"]
+ matched_queries: Union[Sequence[str], Mapping[str, float]]
+ nested: "NestedIdentity"
+ ignored: Sequence[str]
+ ignored_field_values: Mapping[
+ str, Sequence[Union[int, float, str, bool, None, Any]]
+ ]
+ shard: str
+ node: str
+ routing: str
+ source: Any
+ rank: int
+ seq_no: int
+ primary_term: int
+ version: int
+ sort: Sequence[Union[int, float, str, bool, None, Any]]
+
+
+class HitsMetadata(AttrDict[Any]):
+ """
+ :arg hits: (required)
+ :arg total: Total hit count information, present only if
+ `track_total_hits` wasn't `false` in the search request.
+ :arg max_score:
+ """
+
+ hits: Sequence["Hit"]
+ total: Union["TotalHits", int]
+ max_score: Union[float, None]
+
+
+class InferenceAggregate(AttrDict[Any]):
+ """
+ :arg value:
+ :arg feature_importance:
+ :arg top_classes:
+ :arg warning:
+ :arg meta:
+ """
+
+ value: Union[int, float, str, bool, None, Any]
+ feature_importance: Sequence["InferenceFeatureImportance"]
+ top_classes: Sequence["InferenceTopClassEntry"]
+ warning: str
+ meta: Mapping[str, Any]
+
+
+class InferenceClassImportance(AttrDict[Any]):
+ """
+ :arg class_name: (required)
+ :arg importance: (required)
+ """
+
+ class_name: str
+ importance: float
+
+
+class InferenceFeatureImportance(AttrDict[Any]):
+ """
+ :arg feature_name: (required)
+ :arg importance:
+ :arg classes:
+ """
+
+ feature_name: str
+ importance: float
+ classes: Sequence["InferenceClassImportance"]
+
+
+class InferenceTopClassEntry(AttrDict[Any]):
+ """
+ :arg class_name: (required)
+ :arg class_probability: (required)
+ :arg class_score: (required)
+ """
+
+ class_name: Union[int, float, str, bool, None, Any]
+ class_probability: float
+ class_score: float
+
+
+class InnerHitsResult(AttrDict[Any]):
+ """
+ :arg hits: (required)
+ """
+
+ hits: "HitsMetadata"
+
+
+class IpPrefixAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["IpPrefixBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "IpPrefixBucket"]:
+ return self.buckets # type: ignore
+
+
+class IpPrefixBucket(AttrDict[Any]):
+ """
+ :arg is_ipv6: (required)
+ :arg key: (required)
+ :arg prefix_length: (required)
+ :arg doc_count: (required)
+ :arg netmask:
+ """
+
+ is_ipv6: bool
+ key: str
+ prefix_length: int
+ doc_count: int
+ netmask: str
+
+
+class IpRangeAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["IpRangeBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "IpRangeBucket"]:
+ return self.buckets # type: ignore
+
+
+class IpRangeBucket(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg key:
+ :arg from:
+ :arg to:
+ """
+
+ doc_count: int
+ key: str
+ from_: str
+ to: str
+
+
+class KnnCollectorResult(AttrDict[Any]):
+ """
+ :arg name: (required)
+ :arg reason: (required)
+ :arg time_in_nanos: (required)
+ :arg time:
+ :arg children:
+ """
+
+ name: str
+ reason: str
+ time_in_nanos: Any
+ time: Any
+ children: Sequence["KnnCollectorResult"]
+
+
+class KnnQueryProfileBreakdown(AttrDict[Any]):
+ """
+ :arg advance: (required)
+ :arg advance_count: (required)
+ :arg build_scorer: (required)
+ :arg build_scorer_count: (required)
+ :arg compute_max_score: (required)
+ :arg compute_max_score_count: (required)
+ :arg count_weight: (required)
+ :arg count_weight_count: (required)
+ :arg create_weight: (required)
+ :arg create_weight_count: (required)
+ :arg match: (required)
+ :arg match_count: (required)
+ :arg next_doc: (required)
+ :arg next_doc_count: (required)
+ :arg score: (required)
+ :arg score_count: (required)
+ :arg set_min_competitive_score: (required)
+ :arg set_min_competitive_score_count: (required)
+ :arg shallow_advance: (required)
+ :arg shallow_advance_count: (required)
+ """
+
+ advance: int
+ advance_count: int
+ build_scorer: int
+ build_scorer_count: int
+ compute_max_score: int
+ compute_max_score_count: int
+ count_weight: int
+ count_weight_count: int
+ create_weight: int
+ create_weight_count: int
+ match: int
+ match_count: int
+ next_doc: int
+ next_doc_count: int
+ score: int
+ score_count: int
+ set_min_competitive_score: int
+ set_min_competitive_score_count: int
+ shallow_advance: int
+ shallow_advance_count: int
+
+
+class KnnQueryProfileResult(AttrDict[Any]):
+ """
+ :arg type: (required)
+ :arg description: (required)
+ :arg time_in_nanos: (required)
+ :arg breakdown: (required)
+ :arg time:
+ :arg debug:
+ :arg children:
+ """
+
+ type: str
+ description: str
+ time_in_nanos: Any
+ breakdown: "KnnQueryProfileBreakdown"
+ time: Any
+ debug: Mapping[str, Any]
+ children: Sequence["KnnQueryProfileResult"]
+
+
+class LongRareTermsAggregate(AttrDict[Any]):
+ """
+ Result of the `rare_terms` aggregation when the field is some kind of
+ whole number like a integer, long, or a date.
+
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["LongRareTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "LongRareTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class LongRareTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ """
+
+ key: int
+ doc_count: int
+ key_as_string: str
+
+
+class LongTermsAggregate(AttrDict[Any]):
+ """
+ Result of a `terms` aggregation when the field is some kind of whole
+ number like a integer, long, or a date.
+
+ :arg doc_count_error_upper_bound:
+ :arg sum_other_doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ doc_count_error_upper_bound: int
+ sum_other_doc_count: int
+ buckets: Sequence["LongTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "LongTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class LongTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ :arg doc_count_error_upper_bound:
+ """
+
+ key: int
+ doc_count: int
+ key_as_string: str
+ doc_count_error_upper_bound: int
+
+
+class MatrixStatsAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg fields:
+ :arg meta:
+ """
+
+ doc_count: int
+ fields: Sequence["MatrixStatsFields"]
+ meta: Mapping[str, Any]
+
+
+class MatrixStatsFields(AttrDict[Any]):
+ """
+ :arg name: (required)
+ :arg count: (required)
+ :arg mean: (required)
+ :arg variance: (required)
+ :arg skewness: (required)
+ :arg kurtosis: (required)
+ :arg covariance: (required)
+ :arg correlation: (required)
+ """
+
+ name: str
+ count: int
+ mean: float
+ variance: float
+ skewness: float
+ kurtosis: float
+ covariance: Mapping[str, float]
+ correlation: Mapping[str, float]
+
+
+class MaxAggregate(AttrDict[Any]):
+ """
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class MedianAbsoluteDeviationAggregate(AttrDict[Any]):
+ """
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class MinAggregate(AttrDict[Any]):
+ """
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class MissingAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class MultiTermsAggregate(AttrDict[Any]):
+ """
+ :arg doc_count_error_upper_bound:
+ :arg sum_other_doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ doc_count_error_upper_bound: int
+ sum_other_doc_count: int
+ buckets: Sequence["MultiTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "MultiTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class MultiTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ :arg doc_count_error_upper_bound:
+ """
+
+ key: Sequence[Union[int, float, str, bool, None, Any]]
+ doc_count: int
+ key_as_string: str
+ doc_count_error_upper_bound: int
+
+
+class NestedAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class NestedIdentity(AttrDict[Any]):
+ """
+ :arg field: (required)
+ :arg offset: (required)
+ :arg _nested:
+ """
+
+ field: str
+ offset: int
+ _nested: "NestedIdentity"
+
+
+class ParentAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class PercentilesBucketAggregate(AttrDict[Any]):
+ """
+ :arg values: (required)
+ :arg meta:
+ """
+
+ values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ meta: Mapping[str, Any]
+
+
+class PhraseSuggest(AttrDict[Any]):
+ """
+ :arg options: (required)
+ :arg length: (required)
+ :arg offset: (required)
+ :arg text: (required)
+ """
+
+ options: Sequence["PhraseSuggestOption"]
+ length: int
+ offset: int
+ text: str
+
+
+class PhraseSuggestOption(AttrDict[Any]):
+ """
+ :arg text: (required)
+ :arg score: (required)
+ :arg highlighted:
+ :arg collate_match:
+ """
+
+ text: str
+ score: float
+ highlighted: str
+ collate_match: bool
+
+
+class Profile(AttrDict[Any]):
+ """
+ :arg shards: (required)
+ """
+
+ shards: Sequence["ShardProfile"]
+
+
+class QueryBreakdown(AttrDict[Any]):
+ """
+ :arg advance: (required)
+ :arg advance_count: (required)
+ :arg build_scorer: (required)
+ :arg build_scorer_count: (required)
+ :arg create_weight: (required)
+ :arg create_weight_count: (required)
+ :arg match: (required)
+ :arg match_count: (required)
+ :arg shallow_advance: (required)
+ :arg shallow_advance_count: (required)
+ :arg next_doc: (required)
+ :arg next_doc_count: (required)
+ :arg score: (required)
+ :arg score_count: (required)
+ :arg compute_max_score: (required)
+ :arg compute_max_score_count: (required)
+ :arg count_weight: (required)
+ :arg count_weight_count: (required)
+ :arg set_min_competitive_score: (required)
+ :arg set_min_competitive_score_count: (required)
"""
advance: int
@@ -4900,6 +5657,50 @@ class QueryProfile(AttrDict[Any]):
children: Sequence["QueryProfile"]
+class RangeAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["RangeBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]:
+ return self.buckets # type: ignore
+
+
+class RangeBucket(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg from:
+ :arg to:
+ :arg from_as_string:
+ :arg to_as_string:
+ :arg key: The bucket key. Present if the aggregation is _not_ keyed
+ """
+
+ doc_count: int
+ from_: float
+ to: float
+ from_as_string: str
+ to_as_string: str
+ key: str
+
+
+class RateAggregate(AttrDict[Any]):
+ """
+ :arg value: (required)
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: float
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
class Retries(AttrDict[Any]):
"""
:arg bulk: (required)
@@ -4910,6 +5711,36 @@ class Retries(AttrDict[Any]):
search: int
+class ReverseNestedAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class SamplerAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class ScriptedMetricAggregate(AttrDict[Any]):
+ """
+ :arg value: (required)
+ :arg meta:
+ """
+
+ value: Any
+ meta: Mapping[str, Any]
+
+
class SearchProfile(AttrDict[Any]):
"""
:arg collector: (required)
@@ -4980,7 +5811,311 @@ class ShardStatistics(AttrDict[Any]):
skipped: int
-class TermSuggest(SuggestBase):
+class SignificantLongTermsAggregate(AttrDict[Any]):
+ """
+ :arg bg_count:
+ :arg doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ bg_count: int
+ doc_count: int
+ buckets: Sequence["SignificantLongTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "SignificantLongTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class SignificantLongTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg score: (required)
+ :arg bg_count: (required)
+ :arg doc_count: (required)
+ :arg key_as_string:
+ """
+
+ key: int
+ score: float
+ bg_count: int
+ doc_count: int
+ key_as_string: str
+
+
+class SignificantStringTermsAggregate(AttrDict[Any]):
+ """
+ :arg bg_count:
+ :arg doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ bg_count: int
+ doc_count: int
+ buckets: Sequence["SignificantStringTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "SignificantStringTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class SignificantStringTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg score: (required)
+ :arg bg_count: (required)
+ :arg doc_count: (required)
+ """
+
+ key: str
+ score: float
+ bg_count: int
+ doc_count: int
+
+
+class SimpleValueAggregate(AttrDict[Any]):
+ """
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class StandardDeviationBounds(AttrDict[Any]):
+ """
+ :arg upper: (required)
+ :arg lower: (required)
+ :arg upper_population: (required)
+ :arg lower_population: (required)
+ :arg upper_sampling: (required)
+ :arg lower_sampling: (required)
+ """
+
+ upper: Union[float, None]
+ lower: Union[float, None]
+ upper_population: Union[float, None]
+ lower_population: Union[float, None]
+ upper_sampling: Union[float, None]
+ lower_sampling: Union[float, None]
+
+
+class StandardDeviationBoundsAsString(AttrDict[Any]):
+ """
+ :arg upper: (required)
+ :arg lower: (required)
+ :arg upper_population: (required)
+ :arg lower_population: (required)
+ :arg upper_sampling: (required)
+ :arg lower_sampling: (required)
+ """
+
+ upper: str
+ lower: str
+ upper_population: str
+ lower_population: str
+ upper_sampling: str
+ lower_sampling: str
+
+
+class StatsAggregate(AttrDict[Any]):
+ """
+ Statistics aggregation result. `min`, `max` and `avg` are missing if
+ there were no values to process (`count` is zero).
+
+ :arg count: (required)
+ :arg min: (required)
+ :arg max: (required)
+ :arg avg: (required)
+ :arg sum: (required)
+ :arg min_as_string:
+ :arg max_as_string:
+ :arg avg_as_string:
+ :arg sum_as_string:
+ :arg meta:
+ """
+
+ count: int
+ min: Union[float, None]
+ max: Union[float, None]
+ avg: Union[float, None]
+ sum: float
+ min_as_string: str
+ max_as_string: str
+ avg_as_string: str
+ sum_as_string: str
+ meta: Mapping[str, Any]
+
+
+class StatsBucketAggregate(AttrDict[Any]):
+ """
+ :arg count: (required)
+ :arg min: (required)
+ :arg max: (required)
+ :arg avg: (required)
+ :arg sum: (required)
+ :arg min_as_string:
+ :arg max_as_string:
+ :arg avg_as_string:
+ :arg sum_as_string:
+ :arg meta:
+ """
+
+ count: int
+ min: Union[float, None]
+ max: Union[float, None]
+ avg: Union[float, None]
+ sum: float
+ min_as_string: str
+ max_as_string: str
+ avg_as_string: str
+ sum_as_string: str
+ meta: Mapping[str, Any]
+
+
+class StringRareTermsAggregate(AttrDict[Any]):
+ """
+ Result of the `rare_terms` aggregation when the field is a string.
+
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["StringRareTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "StringRareTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class StringRareTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: str
+ doc_count: int
+
+
+class StringStatsAggregate(AttrDict[Any]):
+ """
+ :arg count: (required)
+ :arg min_length: (required)
+ :arg max_length: (required)
+ :arg avg_length: (required)
+ :arg entropy: (required)
+ :arg distribution:
+ :arg min_length_as_string:
+ :arg max_length_as_string:
+ :arg avg_length_as_string:
+ :arg meta:
+ """
+
+ count: int
+ min_length: Union[int, None]
+ max_length: Union[int, None]
+ avg_length: Union[float, None]
+ entropy: Union[float, None]
+ distribution: Union[Mapping[str, float], None]
+ min_length_as_string: str
+ max_length_as_string: str
+ avg_length_as_string: str
+ meta: Mapping[str, Any]
+
+
+class StringTermsAggregate(AttrDict[Any]):
+ """
+ Result of a `terms` aggregation when the field is a string.
+
+ :arg doc_count_error_upper_bound:
+ :arg sum_other_doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ doc_count_error_upper_bound: int
+ sum_other_doc_count: int
+ buckets: Sequence["StringTermsBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "StringTermsBucket"]:
+ return self.buckets # type: ignore
+
+
+class StringTermsBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ :arg doc_count_error_upper_bound:
+ """
+
+ key: Union[int, float, str, bool, None, Any]
+ doc_count: int
+ doc_count_error_upper_bound: int
+
+
+class SumAggregate(AttrDict[Any]):
+ """
+ Sum aggregation result. `value` is always present and is zero if there
+ were no values to process.
+
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class TDigestPercentileRanksAggregate(AttrDict[Any]):
+ """
+ :arg values: (required)
+ :arg meta:
+ """
+
+ values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ meta: Mapping[str, Any]
+
+
+class TDigestPercentilesAggregate(AttrDict[Any]):
+ """
+ :arg values: (required)
+ :arg meta:
+ """
+
+ values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ meta: Mapping[str, Any]
+
+
+class TTestAggregate(AttrDict[Any]):
+ """
+ :arg value: (required)
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class TermSuggest(AttrDict[Any]):
"""
:arg options: (required)
:arg length: (required)
@@ -5008,3 +6143,203 @@ class TermSuggestOption(AttrDict[Any]):
freq: int
highlighted: str
collate_match: bool
+
+
+class TimeSeriesAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["TimeSeriesBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "TimeSeriesBucket"]:
+ return self.buckets # type: ignore
+
+
+class TimeSeriesBucket(AttrDict[Any]):
+ """
+ :arg key: (required)
+ :arg doc_count: (required)
+ """
+
+ key: Mapping[str, Union[int, float, str, bool, None, Any]]
+ doc_count: int
+
+
+class TopHitsAggregate(AttrDict[Any]):
+ """
+ :arg hits: (required)
+ :arg meta:
+ """
+
+ hits: "HitsMetadata"
+ meta: Mapping[str, Any]
+
+
+class TopMetrics(AttrDict[Any]):
+ """
+ :arg sort: (required)
+ :arg metrics: (required)
+ """
+
+ sort: Sequence[Union[Union[int, float, str, bool, None, Any], None]]
+ metrics: Mapping[str, Union[Union[int, float, str, bool, None, Any], None]]
+
+
+class TopMetricsAggregate(AttrDict[Any]):
+ """
+ :arg top: (required)
+ :arg meta:
+ """
+
+ top: Sequence["TopMetrics"]
+ meta: Mapping[str, Any]
+
+
+class TotalHits(AttrDict[Any]):
+ """
+ :arg relation: (required)
+ :arg value: (required)
+ """
+
+ relation: Literal["eq", "gte"]
+ value: int
+
+
+class UnmappedRareTermsAggregate(AttrDict[Any]):
+ """
+ Result of a `rare_terms` aggregation when the field is unmapped.
+ `buckets` is always empty.
+
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence[Any]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, Any]:
+ return self.buckets # type: ignore
+
+
+class UnmappedSamplerAggregate(AttrDict[Any]):
+ """
+ :arg doc_count: (required)
+ :arg meta:
+ """
+
+ doc_count: int
+ meta: Mapping[str, Any]
+
+
+class UnmappedSignificantTermsAggregate(AttrDict[Any]):
+ """
+ Result of the `significant_terms` aggregation on an unmapped field.
+ `buckets` is always empty.
+
+ :arg bg_count:
+ :arg doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ bg_count: int
+ doc_count: int
+ buckets: Sequence[Any]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, Any]:
+ return self.buckets # type: ignore
+
+
+class UnmappedTermsAggregate(AttrDict[Any]):
+ """
+ Result of a `terms` aggregation when the field is unmapped. `buckets`
+ is always empty.
+
+ :arg doc_count_error_upper_bound:
+ :arg sum_other_doc_count:
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ doc_count_error_upper_bound: int
+ sum_other_doc_count: int
+ buckets: Sequence[Any]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, Any]:
+ return self.buckets # type: ignore
+
+
+class ValueCountAggregate(AttrDict[Any]):
+ """
+ Value count aggregation result. `value` is always present.
+
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
+
+
+class VariableWidthHistogramAggregate(AttrDict[Any]):
+ """
+ :arg buckets: (required) the aggregation buckets as a list
+ :arg meta:
+ """
+
+ buckets: Sequence["VariableWidthHistogramBucket"]
+ meta: Mapping[str, Any]
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, "VariableWidthHistogramBucket"]:
+ return self.buckets # type: ignore
+
+
+class VariableWidthHistogramBucket(AttrDict[Any]):
+ """
+ :arg min: (required)
+ :arg key: (required)
+ :arg max: (required)
+ :arg doc_count: (required)
+ :arg min_as_string:
+ :arg key_as_string:
+ :arg max_as_string:
+ """
+
+ min: float
+ key: float
+ max: float
+ doc_count: int
+ min_as_string: str
+ key_as_string: str
+ max_as_string: str
+
+
+class WeightedAvgAggregate(AttrDict[Any]):
+ """
+ Weighted average aggregation result. `value` is missing if the weight
+ was set to zero.
+
+ :arg value: (required) The metric value. A missing value generally
+ means that there was no data to aggregate, unless specified
+ otherwise.
+ :arg value_as_string:
+ :arg meta:
+ """
+
+ value: Union[float, None]
+ value_as_string: str
+ meta: Mapping[str, Any]
diff --git a/elasticsearch_dsl/utils.py b/elasticsearch_dsl/utils.py
index e85a17e0..476778f8 100644
--- a/elasticsearch_dsl/utils.py
+++ b/elasticsearch_dsl/utils.py
@@ -28,6 +28,7 @@
Iterable,
Iterator,
List,
+ Mapping,
Optional,
Tuple,
Type,
@@ -48,6 +49,7 @@
from .field import Field
from .index_base import IndexBase
from .response import Hit # noqa: F401
+ from .types import Hit as HitBaseType
UsingType: TypeAlias = Union[str, "Elasticsearch"]
AsyncUsingType: TypeAlias = Union[str, "AsyncElasticsearch"]
@@ -468,7 +470,15 @@ def _clone(self) -> Self:
return c
-class HitMeta(AttrDict[Any]):
+if TYPE_CHECKING:
+ HitMetaBase = HitBaseType
+else:
+ HitMetaBase = AttrDict[Any]
+
+
+class HitMeta(HitMetaBase):
+ inner_hits: Mapping[str, Any]
+
def __init__(
self,
document: Dict[str, Any],
diff --git a/examples/async/composite_agg.py b/examples/async/composite_agg.py
index 5188ce4c..37c7bfcb 100644
--- a/examples/async/composite_agg.py
+++ b/examples/async/composite_agg.py
@@ -17,11 +17,12 @@
import asyncio
import os
-from typing import Any, AsyncIterator, Dict, Mapping, Sequence
+from typing import Any, AsyncIterator, Dict, Mapping, Sequence, cast
from elasticsearch.helpers import async_bulk
from elasticsearch_dsl import Agg, AsyncSearch, Response, aggs, async_connections
+from elasticsearch_dsl.types import CompositeAggregate
from tests.test_integration.test_data import DATA, GIT_INDEX
@@ -30,7 +31,7 @@ async def scan_aggs(
source_aggs: Sequence[Mapping[str, Agg]],
inner_aggs: Dict[str, Agg] = {},
size: int = 10,
-) -> AsyncIterator[Response]:
+) -> AsyncIterator[CompositeAggregate]:
"""
Helper function used to iterate over all possible bucket combinations of
``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
@@ -52,13 +53,13 @@ async def run_search(**kwargs: Any) -> Response:
return await s.execute()
response = await run_search()
- while response.aggregations.comp.buckets:
- for b in response.aggregations.comp.buckets:
- yield b
- if "after_key" in response.aggregations.comp:
- after = response.aggregations.comp.after_key
+ while response.aggregations["comp"].buckets:
+ for b in response.aggregations["comp"].buckets:
+ yield cast(CompositeAggregate, b)
+ if "after_key" in response.aggregations["comp"]:
+ after = response.aggregations["comp"].after_key
else:
- after = response.aggregations.comp.buckets[-1].key
+ after = response.aggregations["comp"].buckets[-1].key
response = await run_search(after=after)
diff --git a/examples/async/parent_child.py b/examples/async/parent_child.py
index 2a74d2d4..6668a77c 100644
--- a/examples/async/parent_child.py
+++ b/examples/async/parent_child.py
@@ -46,7 +46,6 @@
from elasticsearch_dsl import (
AsyncDocument,
- AsyncIndex,
AsyncSearch,
Date,
InnerDoc,
@@ -92,7 +91,6 @@ class Post(AsyncDocument):
# definitions here help type checkers understand additional arguments
# that are allowed in the constructor
_routing: str = mapped_field(default=None)
- _index: AsyncIndex = mapped_field(default=None)
_id: Optional[int] = mapped_field(default=None)
created: Optional[datetime] = mapped_field(default=None)
@@ -161,8 +159,6 @@ async def add_answer(
answer = Answer(
# required make sure the answer is stored in the same shard
_routing=self.meta.id,
- # since we don't have explicit index, ensure same index as self
- _index=self.meta.index,
# set up the parent/child mapping
question_answer={"name": "answer", "parent": self.meta.id},
# pass in the field values
@@ -190,7 +186,7 @@ async def get_answers(self) -> List[Any]:
elasticsearch.
"""
if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
- return cast(List[Any], self.meta.inner_hits.answer.hits)
+ return cast(List[Any], self.meta.inner_hits["answer"].hits)
return [a async for a in self.search_answers()]
async def save(self, **kwargs: Any) -> None: # type: ignore[override]
diff --git a/examples/async/sparse_vectors.py b/examples/async/sparse_vectors.py
index 458798a3..962ecda8 100644
--- a/examples/async/sparse_vectors.py
+++ b/examples/async/sparse_vectors.py
@@ -186,7 +186,7 @@ async def main() -> None:
)
print(f"Summary: {hit.summary}")
if args.show_inner_hits:
- for passage in hit.meta.inner_hits.passages:
+ for passage in hit.meta.inner_hits["passages"]:
print(f" - [Score: {passage.meta.score}] {passage.content!r}")
print("")
diff --git a/examples/async/vectors.py b/examples/async/vectors.py
index b58c184e..e17cbf3e 100644
--- a/examples/async/vectors.py
+++ b/examples/async/vectors.py
@@ -175,7 +175,7 @@ async def main() -> None:
)
print(f"Summary: {hit.summary}")
if args.show_inner_hits:
- for passage in hit.meta.inner_hits.passages:
+ for passage in hit.meta.inner_hits["passages"]:
print(f" - [Score: {passage.meta.score}] {passage.content!r}")
print("")
diff --git a/examples/composite_agg.py b/examples/composite_agg.py
index f93560ec..755c0a8c 100644
--- a/examples/composite_agg.py
+++ b/examples/composite_agg.py
@@ -16,11 +16,12 @@
# under the License.
import os
-from typing import Any, Dict, Iterator, Mapping, Sequence
+from typing import Any, Dict, Iterator, Mapping, Sequence, cast
from elasticsearch.helpers import bulk
from elasticsearch_dsl import Agg, Response, Search, aggs, connections
+from elasticsearch_dsl.types import CompositeAggregate
from tests.test_integration.test_data import DATA, GIT_INDEX
@@ -29,7 +30,7 @@ def scan_aggs(
source_aggs: Sequence[Mapping[str, Agg]],
inner_aggs: Dict[str, Agg] = {},
size: int = 10,
-) -> Iterator[Response]:
+) -> Iterator[CompositeAggregate]:
"""
Helper function used to iterate over all possible bucket combinations of
``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
@@ -51,13 +52,13 @@ def run_search(**kwargs: Any) -> Response:
return s.execute()
response = run_search()
- while response.aggregations.comp.buckets:
- for b in response.aggregations.comp.buckets:
- yield b
- if "after_key" in response.aggregations.comp:
- after = response.aggregations.comp.after_key
+ while response.aggregations["comp"].buckets:
+ for b in response.aggregations["comp"].buckets:
+ yield cast(CompositeAggregate, b)
+ if "after_key" in response.aggregations["comp"]:
+ after = response.aggregations["comp"].after_key
else:
- after = response.aggregations.comp.buckets[-1].key
+ after = response.aggregations["comp"].buckets[-1].key
response = run_search(after=after)
diff --git a/examples/parent_child.py b/examples/parent_child.py
index 6d20dde2..5acbbd72 100644
--- a/examples/parent_child.py
+++ b/examples/parent_child.py
@@ -46,7 +46,6 @@
from elasticsearch_dsl import (
Date,
Document,
- Index,
InnerDoc,
Join,
Keyword,
@@ -91,7 +90,6 @@ class Post(Document):
# definitions here help type checkers understand additional arguments
# that are allowed in the constructor
_routing: str = mapped_field(default=None)
- _index: Index = mapped_field(default=None)
_id: Optional[int] = mapped_field(default=None)
created: Optional[datetime] = mapped_field(default=None)
@@ -160,8 +158,6 @@ def add_answer(
answer = Answer(
# required make sure the answer is stored in the same shard
_routing=self.meta.id,
- # since we don't have explicit index, ensure same index as self
- _index=self.meta.index,
# set up the parent/child mapping
question_answer={"name": "answer", "parent": self.meta.id},
# pass in the field values
@@ -189,7 +185,7 @@ def get_answers(self) -> List[Any]:
elasticsearch.
"""
if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
- return cast(List[Any], self.meta.inner_hits.answer.hits)
+ return cast(List[Any], self.meta.inner_hits["answer"].hits)
return [a for a in self.search_answers()]
def save(self, **kwargs: Any) -> None: # type: ignore[override]
diff --git a/examples/sparse_vectors.py b/examples/sparse_vectors.py
index ba853e8c..408e0df8 100644
--- a/examples/sparse_vectors.py
+++ b/examples/sparse_vectors.py
@@ -185,7 +185,7 @@ def main() -> None:
)
print(f"Summary: {hit.summary}")
if args.show_inner_hits:
- for passage in hit.meta.inner_hits.passages:
+ for passage in hit.meta.inner_hits["passages"]:
print(f" - [Score: {passage.meta.score}] {passage.content!r}")
print("")
diff --git a/examples/vectors.py b/examples/vectors.py
index ef1342ee..d6ecd6e2 100644
--- a/examples/vectors.py
+++ b/examples/vectors.py
@@ -174,7 +174,7 @@ def main() -> None:
)
print(f"Summary: {hit.summary}")
if args.show_inner_hits:
- for passage in hit.meta.inner_hits.passages:
+ for passage in hit.meta.inner_hits["passages"]:
print(f" - [Score: {passage.meta.score}] {passage.content!r}")
print("")
diff --git a/noxfile.py b/noxfile.py
index 98f8263f..b585b422 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -35,6 +35,7 @@
"3.10",
"3.11",
"3.12",
+ "3.13",
]
)
def test(session):
@@ -55,7 +56,7 @@ def test(session):
session.run("pytest", *argv)
-@nox.session(python="3.12")
+@nox.session(python="3.13")
def format(session):
session.install("black~=24.0", "isort", "unasync", "setuptools", ".[develop]")
session.run("python", "utils/run-unasync.py")
@@ -67,7 +68,7 @@ def format(session):
lint(session)
-@nox.session(python="3.12")
+@nox.session(python="3.13")
def lint(session):
session.install("flake8", "black~=24.0", "isort", "unasync", "setuptools")
session.run("black", "--check", "--target-version=py38", *SOURCE_FILES)
diff --git a/setup.py b/setup.py
index 5e36e6c7..d1444331 100644
--- a/setup.py
+++ b/setup.py
@@ -88,6 +88,7 @@
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
diff --git a/utils/generator.py b/utils/generator.py
index 39a3eaaf..653f943b 100644
--- a/utils/generator.py
+++ b/utils/generator.py
@@ -82,7 +82,7 @@ def add_dict_type(type_):
def add_seq_dict_type(type_):
- """Add List[Dict[str, Any]] to a Python type hint."""
+ """Add Sequence[Dict[str, Any]] to a Python type hint."""
if type_.startswith("Union["):
type_ = f"{type_[:-1]}, Sequence[Dict[str, Any]]]"
else:
@@ -99,6 +99,18 @@ def add_not_set(type_):
return type_
+def type_for_types_py(type_):
+ """Converts a type rendered in a generic way to the format needed in the
+ types.py module.
+ """
+ type_ = type_.replace('"DefaultType"', "DefaultType")
+ type_ = type_.replace('"InstrumentedField"', "InstrumentedField")
+ type_ = re.sub(r'"(function\.[a-zA-Z0-9_]+)"', r"\1", type_)
+ type_ = re.sub(r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', type_)
+ type_ = re.sub(r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", type_)
+ return type_
+
+
class ElasticsearchSchema:
"""Operations related to the Elasticsearch schema."""
@@ -172,7 +184,10 @@ def get_python_type(self, schema_type, for_response=False):
elif type_name["name"] == "null":
return "None", None
elif type_name["name"] == "Field":
- return 'Union[str, "InstrumentedField"]', None
+ if for_response:
+ return "str", None
+ else:
+ return 'Union[str, "InstrumentedField"]', None
else:
# not an instance of a native type, so we get the type and try again
return self.get_python_type(
@@ -189,7 +204,10 @@ def get_python_type(self, schema_type, for_response=False):
type_name["namespace"] == "_types.aggregations"
and type_name["name"] == "Buckets"
):
- return "Dict[str, Query]", {"type": "query", "hash": True}
+ if for_response:
+ return "Union[Sequence[Any], Dict[str, Any]]", None
+ else:
+ return "Dict[str, Query]", {"type": "query", "hash": True}
elif (
type_name["namespace"] == "_types.aggregations"
and type_name["name"] == "CompositeAggregationSource"
@@ -343,9 +361,7 @@ def add_attribute(self, k, arg, for_types_py=False, for_response=False):
attributes.
"""
try:
- type_, param = schema.get_python_type(
- arg["type"], for_response=for_response
- )
+ type_, param = self.get_python_type(arg["type"], for_response=for_response)
except RuntimeError:
type_ = "Any"
param = None
@@ -357,11 +373,7 @@ def add_attribute(self, k, arg, for_types_py=False, for_response=False):
type_ = add_dict_type(type_) # interfaces can be given as dicts
type_ = add_not_set(type_)
if for_types_py:
- type_ = type_.replace('"DefaultType"', "DefaultType")
- type_ = type_.replace('"InstrumentedField"', "InstrumentedField")
- type_ = re.sub(r'"(function\.[a-zA-Z0-9_]+)"', r"\1", type_)
- type_ = re.sub(r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', type_)
- type_ = re.sub(r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", type_)
+ type_ = type_for_types_py(type_)
required = "(required) " if arg["required"] else ""
server_default = (
f" Defaults to `{arg['serverDefault']}` if omitted."
@@ -409,11 +421,11 @@ class representation.
):
# we do not support this behavior, so we ignore it
continue
- key_type, _ = schema.get_python_type(
+ key_type, _ = self.get_python_type(
behavior["generics"][0], for_response=for_response
)
if "InstrumentedField" in key_type:
- value_type, _ = schema.get_python_type(
+ value_type, _ = self.get_python_type(
behavior["generics"][1], for_response=for_response
)
if for_types_py:
@@ -502,7 +514,7 @@ def property_to_python_class(self, p):
"properties": [p],
}
else:
- type_ = schema.find_type(name, namespace)
+ type_ = self.find_type(name, namespace)
if p["name"] in AGG_TYPES:
k["parent"] = AGG_TYPES[p["name"]]
@@ -525,7 +537,7 @@ def property_to_python_class(self, p):
for arg in type_["properties"]:
self.add_attribute(k, arg)
if "inherits" in type_ and "type" in type_["inherits"]:
- type_ = schema.find_type(
+ type_ = self.find_type(
type_["inherits"]["type"]["name"],
type_["inherits"]["type"]["namespace"],
)
@@ -538,6 +550,7 @@ def property_to_python_class(self, p):
for other in type_["type"]["items"]:
other_class = self.interface_to_python_class(
other["type"]["name"],
+ other["type"]["namespace"],
for_types_py=False,
)
other_class["parent"] = k["name"]
@@ -553,9 +566,9 @@ def property_to_python_class(self, p):
)
elif kind == "dictionary_of":
- key_type, _ = schema.get_python_type(p["type"]["key"])
+ key_type, _ = self.get_python_type(p["type"]["key"])
if "InstrumentedField" in key_type:
- value_type, _ = schema.get_python_type(p["type"]["value"])
+ value_type, _ = self.get_python_type(p["type"]["value"])
if p["type"]["singleKey"]:
# special handling for single-key dicts with field key
k["args"] = [
@@ -625,6 +638,9 @@ def interface_to_python_class(
"required": bool,
"positional": bool,
],
+ "buckets_as_dict": "type" # optional, only present in aggregation response
+ # classes that have buckets that can have a list
+ # or dict representation
}
```
"""
@@ -636,43 +652,107 @@ def interface_to_python_class(
# but the location of the properties is different
type_ = type_["body"]
k = {"name": interface, "for_response": for_response, "args": []}
+ k["docstring"] = wrapped_doc(type_.get("description") or "")
self.add_behaviors(
type_, k, for_types_py=for_types_py, for_response=for_response
)
+ generics = []
while True:
for arg in type_["properties"]:
if interface == "ResponseBody" and arg["name"] == "hits":
k["args"].append(
{
"name": "hits",
- "type": "List[_R]",
+ "type": "Sequence[_R]",
"doc": [":arg hits: search results"],
"required": arg["required"],
}
)
elif interface == "ResponseBody" and arg["name"] == "aggregations":
+ # Aggregations are tricky because the DSL client uses a
+ # flexible representation that is difficult to generate
+ # from the schema.
+ # To handle this we let the generator do its work by calling
+ # `add_attribute()`, but then we save the generated attribute
+ # apart and replace it with the DSL's `AggResponse` class.
+ # The generated type is then used in type hints in variables
+ # and methods of this class.
+ self.add_attribute(
+ k, arg, for_types_py=for_types_py, for_response=for_response
+ )
+ k["aggregate_type"] = (
+ k["args"][-1]["type"]
+ .split("Mapping[str, ")[1]
+ .rsplit("]", 1)[0]
+ )
+ k["args"][-1] = {
+ "name": "aggregations",
+ "type": '"AggResponse[_R]"',
+ "doc": [":arg aggregations: aggregation results"],
+ "required": arg["required"],
+ }
+ elif (
+ "name" in type_
+ and type_["name"]["name"] == "MultiBucketAggregateBase"
+ and arg["name"] == "buckets"
+ ):
+ # Also during aggregation response generation, the "buckets"
+ # attribute that many aggregation responses have is very
+ # complex, supporting over a dozen different aggregation
+ # types via generics, each in array or object configurations.
+ # Typing this attribute proved very difficult. A solution
+ # that worked with mypy and pyright is to type "buckets"
+ # for the list form, and create a `buckets_as_dict`
+ # property that is typed appropriately for accessing the
+ # buckets in dictionary form.
+ # The generic type is assumed to be the first in the list,
+ # which is a simplification that should be improved when a
+ # more complete implementation of generics is added.
+ if generics[0]["type"]["name"] == "Void":
+ generic_type = "Any"
+ else:
+ _g = self.find_type(
+ generics[0]["type"]["name"],
+ generics[0]["type"]["namespace"],
+ )
+ generic_type, _ = self.get_python_type(
+ _g, for_response=for_response
+ )
+ generic_type = type_for_types_py(generic_type)
k["args"].append(
{
- "name": "aggregations",
- "type": '"AggResponse[_R]"',
- "doc": [":arg aggregations: aggregation results"],
- "required": arg["required"],
+ "name": arg["name"],
+ # for the type we only include the array form, since
+ # this client does not request the dict form
+ "type": f"Sequence[{generic_type}]",
+ "doc": [
+ ":arg buckets: (required) the aggregation buckets as a list"
+ ],
+ "required": True,
}
)
+ k["buckets_as_dict"] = generic_type
else:
- schema.add_attribute(
+ if interface == "Hit" and arg["name"].startswith("_"):
+ # Python DSL removes the undersore prefix from all the
+ # properties of the hit, so we do the same
+ arg["name"] = arg["name"][1:]
+
+ self.add_attribute(
k, arg, for_types_py=for_types_py, for_response=for_response
)
if "inherits" not in type_ or "type" not in type_["inherits"]:
break
- if "parent" not in k:
- k["parent"] = type_["inherits"]["type"]["name"]
- if type_["inherits"]["type"]["name"] not in self.interfaces:
- self.interfaces.append(type_["inherits"]["type"]["name"])
- if for_response:
- self.response_interfaces.append(type_["inherits"]["type"]["name"])
+ if "generics" in type_["inherits"]:
+ # Generics are only supported for certain specific cases at this
+ # time. Here we just save them so that they can be recalled later
+ # while traversing over to parent classes to find inherited
+ # attributes.
+ for generic_type in type_["inherits"]["generics"]:
+ generics.append(generic_type)
+
type_ = self.find_type(
type_["inherits"]["type"]["name"],
type_["inherits"]["type"]["namespace"],
@@ -755,16 +835,6 @@ def generate_types_py(schema, filename):
if k in classes_list:
continue
classes_list.append(k)
- parent = k.get("parent")
- parent_index = len(classes_list) - 1
- while parent:
- try:
- classes_list.index(classes[parent])
- break
- except ValueError:
- pass
- classes_list.insert(parent_index, classes[parent])
- parent = classes[parent].get("parent")
with open(filename, "wt") as f:
f.write(types_py.render(classes=classes_list))
diff --git a/utils/templates/response.__init__.py.tpl b/utils/templates/response.__init__.py.tpl
index a3812616..f9ae5c4e 100644
--- a/utils/templates/response.__init__.py.tpl
+++ b/utils/templates/response.__init__.py.tpl
@@ -40,7 +40,7 @@ if TYPE_CHECKING:
from ..update_by_query_base import UpdateByQueryBase
from .. import types
-__all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta"]
+__all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta", "AggregateResponseType"]
class Response(AttrDict[Any], Generic[_R]):
@@ -173,21 +173,25 @@ class Response(AttrDict[Any], Generic[_R]):
return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore
+AggregateResponseType = {{ response["aggregate_type"] }}
+
+
class AggResponse(AttrDict[Any], Generic[_R]):
+ """An Elasticsearch aggregation response."""
_meta: Dict[str, Any]
def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]):
super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs})
super().__init__(data)
- def __getitem__(self, attr_name: str) -> Any:
+ def __getitem__(self, attr_name: str) -> AggregateResponseType:
if attr_name in self._meta["aggs"]:
# don't do self._meta['aggs'][attr_name] to avoid copying
agg = self._meta["aggs"].aggs[attr_name]
- return agg.result(self._meta["search"], self._d_[attr_name])
- return super().__getitem__(attr_name)
+ return cast(AggregateResponseType, agg.result(self._meta["search"], self._d_[attr_name]))
+ return super().__getitem__(attr_name) # type: ignore
- def __iter__(self) -> Iterator["Agg"]: # type: ignore[override]
+ def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override]
for name in self._meta["aggs"]:
yield self[name]
diff --git a/utils/templates/types.py.tpl b/utils/templates/types.py.tpl
index 49617f5f..0571e068 100644
--- a/utils/templates/types.py.tpl
+++ b/utils/templates/types.py.tpl
@@ -28,8 +28,16 @@ PipeSeparatedFlags = str
{% for k in classes %}
class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}):
- {% if k.args %}
+ {% if k.docstring or k.args %}
"""
+ {% for line in k.docstring %}
+ {{ line }}
+ {% endfor %}
+ {% if k.args %}
+ {% if k.docstring %}
+
+ {% endif %}
+ {% endif %}
{% for arg in k.args %}
{% for line in arg.doc %}
{{ line }}
@@ -86,6 +94,12 @@ class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}):
super().__init__(kwargs)
{% endif %}
{% endif %}
+ {% if k.buckets_as_dict %}
+
+ @property
+ def buckets_as_dict(self) -> Mapping[str, {{ k.buckets_as_dict }}]:
+ return self.buckets # type: ignore
+ {% endif %}
{% else %}
pass
{% endif %}