diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..7ea27df9c --- /dev/null +++ b/404.html @@ -0,0 +1,1956 @@ + + + +
+ + + + + + + + + + + + + + + + +Fastapi app creation.
+DEFAULT_STATUS_CODES
+
STAC_API_VERSION
+
class StacApi(
+ settings: stac_fastapi.types.config.ApiSettings,
+ client: Union[stac_fastapi.types.core.AsyncBaseCoreClient, stac_fastapi.types.core.BaseCoreClient],
+ extensions: List[stac_fastapi.types.extension.ApiExtension] = NOTHING,
+ exceptions: Dict[Type[Exception], int] = NOTHING,
+ app: fastapi.applications.FastAPI = NOTHING,
+ router: fastapi.routing.APIRouter = NOTHING,
+ title: str = NOTHING,
+ api_version: str = NOTHING,
+ stac_version: str = '1.0.0',
+ description: str = NOTHING,
+ search_get_request_model: Type[stac_fastapi.types.search.BaseSearchGetRequest] = <class 'stac_fastapi.types.search.BaseSearchGetRequest'>,
+ search_post_request_model: Type[stac_fastapi.types.search.BaseSearchPostRequest] = <class 'stac_fastapi.types.search.BaseSearchPostRequest'>,
+ collections_get_request_model: Type[stac_fastapi.types.search.APIRequest] = <class 'stac_fastapi.api.models.EmptyRequest'>,
+ collection_get_request_model: Type[stac_fastapi.types.search.APIRequest] = <class 'stac_fastapi.api.models.CollectionUri'>,
+ items_get_request_model: Type[stac_fastapi.types.search.APIRequest] = <class 'stac_fastapi.api.models.ItemCollectionUri'>,
+ item_get_request_model: Type[stac_fastapi.types.search.APIRequest] = <class 'stac_fastapi.api.models.ItemUri'>,
+ response_class: Type[starlette.responses.Response] = <class 'starlette.responses.JSONResponse'>,
+ middlewares: List[starlette.middleware.Middleware] = NOTHING,
+ route_dependencies: List[Tuple[List[stac_fastapi.api.routes.Scope], List[fastapi.params.Depends]]] = []
+)
+
StacApi factory.
+Factory for creating a STAC-compliant FastAPI application. After
+instantation, the application is accessible from the StacApi.app
attribute.
Name | +Type | +Description | +Default | +
---|---|---|---|
settings | +None | +API settings and configuration, potentially using environment variables. See pydantic-docs.helpmanual.io/usage/settings/. |
+None | +
client | +None | +A subclass of stac_api.clients.BaseCoreClient . Defines theapplication logic which is injected into the API. |
+None | +
extensions | +None | +API extensions to include with the application. This may include official STAC extensions as well as third-party add ons. |
+None | +
exceptions | +None | +Defines a global mapping between exceptions and status codes, allowing configuration of response behavior on certain exceptions (fastapi.tiangolo.com/tutorial/handling-errors/#install-custom-exception-handlers). |
+None | +
app | +None | +The FastAPI application, defaults to a fresh application. | +None | +
route_dependencies | +None | +List of tuples of route scope dicts (eg {'path':<br>'/collections', 'method': 'POST'} ) and list of dependencies (e.g.[Depends(oauth2_scheme)] )). Applies specified dependencies tospecified routes. This is useful for applying custom auth requirements to routes defined elsewhere in the application. |
+None | +
def add_health_check(
+ self
+)
+
Add a health check.
+def add_route_dependencies(
+ self,
+ scopes: List[stac_fastapi.api.routes.Scope],
+ dependencies=typing.List[fastapi.params.Depends]
+) -> None
+
Add custom dependencies to routes.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
scopes | +None | +list of scopes. Each scope should be a dict with a path and method property. |
+None | +
dependencies | +None | +list of FastAPI dependencies to apply to each scope. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
def customize_openapi(
+ self
+) -> Optional[Dict[str, Any]]
+
Customize openapi schema.
+def get_extension(
+ self,
+ extension: Type[stac_fastapi.types.extension.ApiExtension]
+) -> Optional[stac_fastapi.types.extension.ApiExtension]
+
Get an extension.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
extension | +None | +extension to check for. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The extension instance, if it exists. | +
def register_conformance_classes(
+ self
+)
+
Register conformance classes (GET /conformance).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_core(
+ self
+)
+
Register core STAC endpoints.
+GET / + GET /conformance + GET /collections + GET /collections/{collection_id} + GET /collections/{collection_id}/items + GET /collection/{collection_id}/items/{item_id} + GET /search + POST /search
+Injects application logic (StacApi.client) into the API layer.
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_get_collection(
+ self
+)
+
Register get collection endpoint (GET /collection/{collection_id}).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_get_collections(
+ self
+)
+
Register get collections endpoint (GET /collections).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_get_item(
+ self
+)
+
Register get item endpoint (GET /collections/{collection_id}/items/{item_id}).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_get_item_collection(
+ self
+)
+
Register get item collection endpoint (GET /collection/{collection_id}/items).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_get_search(
+ self
+)
+
Register search endpoint (GET /search).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_landing_page(
+ self
+)
+
Register landing page (GET /).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_post_search(
+ self
+)
+
Register search endpoint (POST /search).
+Returns:
+Type | +Description | +
---|---|
None | +None | +
Application settings.
+class AddOns(
+ *args,
+ **kwds
+)
+
Enumeration of available third party add ons.
+bulk_transaction
+
name
+
value
+
class ApiExtensions(
+ *args,
+ **kwds
+)
+
Enumeration of available stac api extensions.
+Ref: @stac-api-extensions
+aggregation
+
collection_search
+
fields
+
filter
+
free_text
+
name
+
query
+
sort
+
transaction
+
value
+
Error handling.
+DEFAULT_STATUS_CODES
+
logger
+
def add_exception_handlers(
+ app: fastapi.applications.FastAPI,
+ status_codes: Dict[Type[Exception], int]
+) -> None
+
Add exception handlers to the FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +the FastAPI application. | +None | +
status_codes | +None | +mapping between exceptions and status codes. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
def exception_handler_factory(
+ status_code: int
+) -> Callable
+
Create a FastAPI exception handler for a particular status code.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
status_code | +None | +HTTP status code. | +None | +
Returns:
+Type | +Description | +
---|---|
callable | +an exception handler. | +
class ErrorResponse(
+ /,
+ *args,
+ **kwargs
+)
+
A JSON error response returned by the API.
+The STAC API spec expects that code
and description
are both present in
+the payload.
Name | +Type | +Description | +Default | +
---|---|---|---|
code | +None | +A code representing the error, semantics are up to implementor. | +None | +
description | +None | +A description of the error. | +None | +
def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+ + + + + + + + + + + + + +Api middleware.
+HTTPS_PORT
+
HTTP_PORT
+
class CORSMiddleware(
+ app: Callable[[MutableMapping[str, Any], Callable[[], Awaitable[MutableMapping[str, Any]]], Callable[[MutableMapping[str, Any]], Awaitable[NoneType]]], Awaitable[NoneType]],
+ allow_origins: Sequence[str] = ('*',),
+ allow_methods: Sequence[str] = ('OPTIONS', 'POST', 'GET'),
+ allow_headers: Sequence[str] = ('Content-Type',),
+ allow_credentials: bool = False,
+ allow_origin_regex: Optional[str] = None,
+ expose_headers: Sequence[str] = (),
+ max_age: int = 600
+)
+
Subclass of Starlette's standard CORS middleware with default values set to those
+recommended by the STAC API spec.
+ +def allow_explicit_origin(
+ headers: 'MutableHeaders',
+ origin: 'str'
+) -> 'None'
+
def is_allowed_origin(
+ self,
+ origin: 'str'
+) -> 'bool'
+
def preflight_response(
+ self,
+ request_headers: 'Headers'
+) -> 'Response'
+
def send(
+ self,
+ message: 'Message',
+ send: 'Send',
+ request_headers: 'Headers'
+) -> 'None'
+
def simple_response(
+ self,
+ scope: 'Scope',
+ receive: 'Receive',
+ send: 'Send',
+ request_headers: 'Headers'
+) -> 'None'
+
class ProxyHeaderMiddleware(
+ app: Callable[[MutableMapping[str, Any], Callable[[], Awaitable[MutableMapping[str, Any]]], Callable[[MutableMapping[str, Any]], Awaitable[NoneType]]], Awaitable[NoneType]]
+)
+
Account for forwarding headers when deriving base URL.
+Prioritise standard Forwarded header, look for non-standard X-Forwarded-* if missing. +Default to what can be derived from the URL if no headers provided. Middleware updates +the host header that is interpreted by starlette when deriving Request.base_url.
+ + + + + + + + + + + + + +Api request/response models.
+def create_get_request_model(
+ extensions: Optional[List[stac_fastapi.types.extension.ApiExtension]],
+ base_model: stac_fastapi.types.search.BaseSearchGetRequest = <class 'stac_fastapi.types.search.BaseSearchGetRequest'>
+) -> stac_fastapi.types.search.APIRequest
+
Wrap create_request_model to create the GET request model.
+def create_post_request_model(
+ extensions: Optional[List[stac_fastapi.types.extension.ApiExtension]],
+ base_model: stac_fastapi.types.search.BaseSearchPostRequest = <class 'stac_fastapi.types.search.BaseSearchPostRequest'>
+) -> Type[pydantic.main.BaseModel]
+
Wrap create_request_model to create the POST request model.
+def create_request_model(
+ model_name='SearchGetRequest',
+ base_model: Union[Type[pydantic.main.BaseModel], stac_fastapi.types.search.APIRequest] = <class 'stac_fastapi.types.search.BaseSearchGetRequest'>,
+ extensions: Optional[List[stac_fastapi.types.extension.ApiExtension]] = None,
+ mixins: Union[List[pydantic.main.BaseModel], List[stac_fastapi.types.search.APIRequest], NoneType] = None,
+ request_type: Optional[str] = 'GET'
+) -> Union[Type[pydantic.main.BaseModel], stac_fastapi.types.search.APIRequest]
+
Create a pydantic model for validating request bodies.
+class CollectionUri(
+ collection_id: typing.Annotated[str, Path(PydanticUndefined)]
+)
+
Get or delete collection.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class EmptyRequest(
+
+)
+
Empty request.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class GeoJSONResponse(
+ content: 'typing.Any',
+ status_code: 'int' = 200,
+ headers: 'typing.Mapping[str, str] | None' = None,
+ media_type: 'str | None' = None,
+ background: 'BackgroundTask | None' = None
+)
+
JSON with custom, vendor content-type.
+charset
+
media_type
+
headers
+
def delete_cookie(
+ self,
+ key: 'str',
+ path: 'str' = '/',
+ domain: 'str | None' = None,
+ secure: 'bool' = False,
+ httponly: 'bool' = False,
+ samesite: "typing.Literal['lax', 'strict', 'none'] | None" = 'lax'
+) -> 'None'
+
def init_headers(
+ self,
+ headers: 'typing.Mapping[str, str] | None' = None
+) -> 'None'
+
def render(
+ self,
+ content: 'typing.Any'
+) -> 'bytes'
+
def set_cookie(
+ self,
+ key: 'str',
+ value: 'str' = '',
+ max_age: 'int | None' = None,
+ expires: 'datetime | str | int | None' = None,
+ path: 'str | None' = '/',
+ domain: 'str | None' = None,
+ secure: 'bool' = False,
+ httponly: 'bool' = False,
+ samesite: "typing.Literal['lax', 'strict', 'none'] | None" = 'lax'
+) -> 'None'
+
class ItemCollectionUri(
+ collection_id: typing.Annotated[str, Path(PydanticUndefined)],
+ limit: Annotated[Optional[Annotated[int, Gt(gt=0), AfterValidator(func=<function crop at 0x7f85cf1b1ee0>)]], Query(PydanticUndefined)] = 10,
+ bbox: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ datetime: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Get item collection.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class ItemUri(
+ collection_id: typing.Annotated[str, Path(PydanticUndefined)],
+ item_id: typing.Annotated[str, Path(PydanticUndefined)]
+)
+
Get or delete item.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class JSONSchemaResponse(
+ content: 'typing.Any',
+ status_code: 'int' = 200,
+ headers: 'typing.Mapping[str, str] | None' = None,
+ media_type: 'str | None' = None,
+ background: 'BackgroundTask | None' = None
+)
+
JSON with custom, vendor content-type.
+charset
+
media_type
+
headers
+
def delete_cookie(
+ self,
+ key: 'str',
+ path: 'str' = '/',
+ domain: 'str | None' = None,
+ secure: 'bool' = False,
+ httponly: 'bool' = False,
+ samesite: "typing.Literal['lax', 'strict', 'none'] | None" = 'lax'
+) -> 'None'
+
def init_headers(
+ self,
+ headers: 'typing.Mapping[str, str] | None' = None
+) -> 'None'
+
def render(
+ self,
+ content: 'typing.Any'
+) -> 'bytes'
+
def set_cookie(
+ self,
+ key: 'str',
+ value: 'str' = '',
+ max_age: 'int | None' = None,
+ expires: 'datetime | str | int | None' = None,
+ path: 'str | None' = '/',
+ domain: 'str | None' = None,
+ secure: 'bool' = False,
+ httponly: 'bool' = False,
+ samesite: "typing.Literal['lax', 'strict', 'none'] | None" = 'lax'
+) -> 'None'
+
openapi.
+def update_openapi(
+ app: fastapi.applications.FastAPI
+) -> fastapi.applications.FastAPI
+
Update OpenAPI response content-type.
+This function modifies the openapi route to comply with the STAC API spec's required +content-type response header.
+ + + + + + + + + + + + + +Route factories.
+HTTP_204_NO_CONTENT
+
def add_route_dependencies(
+ routes: List[starlette.routing.BaseRoute],
+ scopes: List[stac_fastapi.api.routes.Scope],
+ dependencies=typing.List[fastapi.params.Depends]
+) -> None
+
Add dependencies to routes.
+Allows a developer to add dependencies to a route after the route has been +defined.
+"*" can be used for path or method to match all allowed routes.
+Returns:
+Type | +Description | +
---|---|
None | +None | +
def create_async_endpoint(
+ func: Callable,
+ request_model: Union[Type[stac_fastapi.types.search.APIRequest], Type[pydantic.main.BaseModel], Dict]
+)
+
Wrap a function in a coroutine which may be used to create a FastAPI endpoint.
+Synchronous functions are executed asynchronously using a background thread.
+def sync_to_async(
+ func
+)
+
Run synchronous function asynchronously in a background thread.
+class Scope(
+ /,
+ *args,
+ **kwargs
+)
+
More strict version of Starlette's Scope.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+ + + + + + + + + + + + + +Library version.
+ + + + + + + + + + + + + +Aggregation Extension.
+class AggregationConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the Aggregation extension.
+See +stac-api-extensions/aggregation
+AGGREGATION
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+class AggregationExtension(
+ schema_href: Optional[str] = None,
+ client: Union[stac_fastapi.extensions.core.aggregation.client.AsyncBaseAggregationClient, stac_fastapi.extensions.core.aggregation.client.BaseAggregationClient] = NOTHING,
+ conformance_classes: List[str] = [<AggregationConformanceClasses.AGGREGATION: 'https://api.stacspec.org/v0.3.0/aggregation'>],
+ router: fastapi.routing.APIRouter = NOTHING
+)
+
Aggregation Extension.
+The purpose of the Aggregation Extension is to provide an endpoint similar to +the Search endpoint (/search), but which will provide aggregated information +on matching Items rather than the Items themselves. This is highly influenced +by the Elasticsearch and OpenSearch aggregation endpoint, but with a more +regular structure for responses.
+The Aggregation extension adds several endpoints which allow the retrieval of +available aggregation fields and aggregation buckets based on a seearch query: + GET /aggregations + POST /aggregations + GET /collections/{collection_id}/aggregations + POST /collections/{collection_id}/aggregations + GET /aggregate + POST /aggregate + GET /collections/{collection_id}/aggregate + POST /collections/{collection_id}/aggregate
+github.com/stac-api-extensions/aggregation/blob/main/README.md
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +None | +Conformance classes provided by the extension | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Aggregation extensions clients.
+class AsyncBaseAggregationClient(
+
+)
+
Defines an async pattern for implementing the STAC aggregation extension.
+def aggregate(
+ self,
+ collection_id: Optional[str] = None,
+ aggregations: Union[str, List[str], NoneType] = None,
+ collections: Optional[List[str]] = None,
+ ids: Optional[List[str]] = None,
+ bbox: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]], NoneType] = None,
+ intersects: Optional[Annotated[Union[geojson_pydantic.geometries.Point, geojson_pydantic.geometries.MultiPoint, geojson_pydantic.geometries.LineString, geojson_pydantic.geometries.MultiLineString, geojson_pydantic.geometries.Polygon, geojson_pydantic.geometries.MultiPolygon, geojson_pydantic.geometries.GeometryCollection], FieldInfo(annotation=NoneType, required=True, discriminator='type')]] = None,
+ datetime: Union[datetime.datetime, Tuple[datetime.datetime, datetime.datetime], Tuple[datetime.datetime, NoneType], Tuple[NoneType, datetime.datetime], NoneType] = None,
+ limit: Optional[int] = 10,
+ **kwargs
+) -> stac_fastapi.extensions.core.aggregation.types.AggregationCollection
+
Return the aggregation buckets for a given search result
+def get_aggregations(
+ self,
+ collection_id: Optional[str] = None,
+ **kwargs
+) -> stac_fastapi.extensions.core.aggregation.types.AggregationCollection
+
Get the aggregations available for the given collection_id.
+If collection_id is None, returns the available aggregations over all +collections.
+class BaseAggregationClient(
+
+)
+
Defines a pattern for implementing the STAC aggregation extension.
+def aggregate(
+ self,
+ collection_id: Optional[str] = None,
+ **kwargs
+) -> stac_fastapi.extensions.core.aggregation.types.AggregationCollection
+
Return the aggregation buckets for a given search result
+def get_aggregations(
+ self,
+ collection_id: Optional[str] = None,
+ **kwargs
+) -> stac_fastapi.extensions.core.aggregation.types.AggregationCollection
+
Get the aggregations available for the given collection_id.
+If collection_id is None, returns the available aggregations over all +collections.
+ + + + + + + + + + + + + +Aggregation extension module.
+class AggregationExtension(
+ schema_href: Optional[str] = None,
+ client: Union[stac_fastapi.extensions.core.aggregation.client.AsyncBaseAggregationClient, stac_fastapi.extensions.core.aggregation.client.BaseAggregationClient] = NOTHING,
+ conformance_classes: List[str] = [<AggregationConformanceClasses.AGGREGATION: 'https://api.stacspec.org/v0.3.0/aggregation'>],
+ router: fastapi.routing.APIRouter = NOTHING
+)
+
Aggregation Extension.
+The purpose of the Aggregation Extension is to provide an endpoint similar to +the Search endpoint (/search), but which will provide aggregated information +on matching Items rather than the Items themselves. This is highly influenced +by the Elasticsearch and OpenSearch aggregation endpoint, but with a more +regular structure for responses.
+The Aggregation extension adds several endpoints which allow the retrieval of +available aggregation fields and aggregation buckets based on a seearch query: + GET /aggregations + POST /aggregations + GET /collections/{collection_id}/aggregations + POST /collections/{collection_id}/aggregations + GET /aggregate + POST /aggregate + GET /collections/{collection_id}/aggregate + POST /collections/{collection_id}/aggregate
+github.com/stac-api-extensions/aggregation/blob/main/README.md
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +None | +Conformance classes provided by the extension | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Request model for the Aggregation extension.
+class AggregationExtensionGetRequest(
+ collections: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ ids: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ bbox: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ intersects: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ datetime: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ limit: Annotated[Optional[Annotated[int, Gt(gt=0), AfterValidator(func=<function crop at 0x7f85cf1b1ee0>)]], Query(PydanticUndefined)] = 10,
+ aggregations: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Aggregation Extension GET request model.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class AggregationExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Aggregation Extension POST request model.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
def validate_bbox(
+ v: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
+) -> Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
+
def validate_datetime(
+ value: str
+) -> str
+
def validate_spatial(
+ values: Dict[str, Any]
+) -> Dict[str, Any]
+
end_date
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+spatial_filter
+
Return a geojson-pydantic object representing the spatial filter for the search request.
+Check for both because the bbox
and intersects
parameters are mutually exclusive.
start_date
+
def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self: 'BaseModel',
+ context: 'Any',
+ /
+) -> 'None'
+
We need to both initialize private attributes and call the user-defined model_post_init
+method.
+ + + + + + + + + + + + + +Aggregation Extension types.
+class Aggregation(
+ /,
+ *args,
+ **kwargs
+)
+
A STAC aggregation.
+buckets
+
overflow
+
value
+
def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class AggregationCollection(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Item Aggregation Collection.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class Bucket(
+ /,
+ *args,
+ **kwargs
+)
+
A STAC aggregation bucket.
+frequency
+
to
+
def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+ + + + + + + + + + + + + +collection-search extensions clients.
+class AsyncBaseCollectionSearchClient(
+
+)
+
Defines a pattern for implementing the STAC collection-search POST extension.
+def post_all_collections(
+ self,
+ search_request: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Get all available collections.
+Called with POST /collections
.
Returns:
+Type | +Description | +
---|---|
None | +A list of collections. | +
class BaseCollectionSearchClient(
+
+)
+
Defines a pattern for implementing the STAC collection-search POST extension.
+def post_all_collections(
+ self,
+ search_request: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Get all available collections.
+Called with POST /collections
.
Returns:
+Type | +Description | +
---|---|
None | +A list of collections. | +
Collection-Search extension.
+class CollectionSearchExtension(
+ GET: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest'>,
+ conformance_classes: List[str] = [<ConformanceClasses.COLLECTIONSEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search'>, <ConformanceClasses.BASIS: 'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/simple-query'>],
+ schema_href: Optional[str] = None
+)
+
Collection-Search Extension.
+The Collection-Search extension adds functionality to the GET - /collections
+endpoint which allows the caller to include or exclude specific from the API
+response.
+Registering this extension with the application has the added effect of
+removing the ItemCollection
response model from the /search
endpoint, as
+the Fields extension allows the API to return potentially invalid responses
+by excluding fields which are required by the STAC spec, such as geometry.
stac-api-extensions/collection-search
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def from_extensions(
+ extensions: List[stac_fastapi.types.extension.ApiExtension],
+ schema_href: Optional[str] = None
+) -> 'CollectionSearchExtension'
+
Create CollectionSearchExtension object from extensions.
+def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +fastapi.FastAPI | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class CollectionSearchPostExtension(
+ client: Union[stac_fastapi.extensions.core.collection_search.client.AsyncBaseCollectionSearchClient, stac_fastapi.extensions.core.collection_search.client.BaseCollectionSearchClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ conformance_classes: List[str] = [<ConformanceClasses.COLLECTIONSEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search'>, <ConformanceClasses.BASIS: 'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/simple-query'>],
+ schema_href: Optional[str] = None,
+ router: fastapi.routing.APIRouter = NOTHING,
+ GET: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest'>,
+ POST: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest'>
+)
+
Collection-Search Extension.
+Extents the collection-search extension with an additional +POST - /collections endpoint
+NOTE: the POST - /collections endpoint can be conflicting with the +POST /collections endpoint registered for the Transaction extension.
+stac-api-extensions/collection-search
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def from_extensions(
+ extensions: List[stac_fastapi.types.extension.ApiExtension],
+ *,
+ client: Union[stac_fastapi.extensions.core.collection_search.client.AsyncBaseCollectionSearchClient, stac_fastapi.extensions.core.collection_search.client.BaseCollectionSearchClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ schema_href: Optional[str] = None,
+ router: Optional[fastapi.routing.APIRouter] = None
+) -> 'CollectionSearchPostExtension'
+
Create CollectionSearchPostExtension object from extensions.
+def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class ConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the Collection-Search extension.
+See +stac-api-extensions/collection-search
+BASIS
+
COLLECTIONSEARCH
+
FIELDS
+
FILTER
+
FREETEXT
+
QUERY
+
SORT
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+ + + + + + + + + + + + + +Collection-Search extension module.
+class CollectionSearchExtension(
+ GET: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest'>,
+ conformance_classes: List[str] = [<ConformanceClasses.COLLECTIONSEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search'>, <ConformanceClasses.BASIS: 'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/simple-query'>],
+ schema_href: Optional[str] = None
+)
+
Collection-Search Extension.
+The Collection-Search extension adds functionality to the GET - /collections
+endpoint which allows the caller to include or exclude specific from the API
+response.
+Registering this extension with the application has the added effect of
+removing the ItemCollection
response model from the /search
endpoint, as
+the Fields extension allows the API to return potentially invalid responses
+by excluding fields which are required by the STAC spec, such as geometry.
stac-api-extensions/collection-search
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def from_extensions(
+ extensions: List[stac_fastapi.types.extension.ApiExtension],
+ schema_href: Optional[str] = None
+) -> 'CollectionSearchExtension'
+
Create CollectionSearchExtension object from extensions.
+def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +fastapi.FastAPI | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class CollectionSearchPostExtension(
+ client: Union[stac_fastapi.extensions.core.collection_search.client.AsyncBaseCollectionSearchClient, stac_fastapi.extensions.core.collection_search.client.BaseCollectionSearchClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ conformance_classes: List[str] = [<ConformanceClasses.COLLECTIONSEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search'>, <ConformanceClasses.BASIS: 'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/simple-query'>],
+ schema_href: Optional[str] = None,
+ router: fastapi.routing.APIRouter = NOTHING,
+ GET: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest'>,
+ POST: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest'>
+)
+
Collection-Search Extension.
+Extents the collection-search extension with an additional +POST - /collections endpoint
+NOTE: the POST - /collections endpoint can be conflicting with the +POST /collections endpoint registered for the Transaction extension.
+stac-api-extensions/collection-search
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def from_extensions(
+ extensions: List[stac_fastapi.types.extension.ApiExtension],
+ *,
+ client: Union[stac_fastapi.extensions.core.collection_search.client.AsyncBaseCollectionSearchClient, stac_fastapi.extensions.core.collection_search.client.BaseCollectionSearchClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ schema_href: Optional[str] = None,
+ router: Optional[fastapi.routing.APIRouter] = None
+) -> 'CollectionSearchPostExtension'
+
Create CollectionSearchPostExtension object from extensions.
+def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class ConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the Collection-Search extension.
+See +stac-api-extensions/collection-search
+BASIS
+
COLLECTIONSEARCH
+
FIELDS
+
FILTER
+
FREETEXT
+
QUERY
+
SORT
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+ + + + + + + + + + + + + +Request models for the Collection-Search extension.
+class BaseCollectionSearchGetRequest(
+ bbox: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ datetime: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ limit: Annotated[Optional[Annotated[int, Gt(gt=0), AfterValidator(func=<function crop at 0x7f85cf1b1ee0>)]], Query(PydanticUndefined)] = 10
+)
+
Basics additional Collection-Search parameters for the GET request.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class BaseCollectionSearchPostRequest(
+ /,
+ **data: 'Any'
+)
+
Collection-Search POST model.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
def validate_bbox(
+ v: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
+) -> Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
+
validate bbox.
+def validate_datetime(
+ value: str
+) -> str
+
validate datetime.
+end_date
+
end date.
+model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+start_date
+
start date.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self: 'BaseModel',
+ context: 'Any',
+ /
+) -> 'None'
+
This function is meant to behave like a BaseModel method to initialise private attributes.
+It takes context as an argument since that's what pydantic-core passes when calling it.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
self | +None | +The BaseModel instance. | +None | +
context | +None | +The context. | +None | +
Fields extension.
+class FieldsExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Fields Extension.
+The Fields extension adds functionality to the /search
endpoint which
+allows the caller to include or exclude specific from the API response.
+Registering this extension with the application has the added effect of
+removing the ItemCollection
response model from the /search
endpoint, as
+the Fields extension allows the API to return potentially invalid responses
+by excluding fields which are required by the STAC spec, such as geometry.
Name | +Type | +Description | +Default | +
---|---|---|---|
default_includes | +set | +defines the default set of included fields. | +None | +
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +fastapi.FastAPI | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Fields extension module.
+class FieldsExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Fields Extension.
+The Fields extension adds functionality to the /search
endpoint which
+allows the caller to include or exclude specific from the API response.
+Registering this extension with the application has the added effect of
+removing the ItemCollection
response model from the /search
endpoint, as
+the Fields extension allows the API to return potentially invalid responses
+by excluding fields which are required by the STAC spec, such as geometry.
Name | +Type | +Description | +Default | +
---|---|---|---|
default_includes | +set | +defines the default set of included fields. | +None | +
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +fastapi.FastAPI | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Request models for the fields extension.
+class FieldsExtensionGetRequest(
+ fields: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Additional fields for the GET request.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class FieldsExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Additional fields and schema for the POST request.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class PostFieldsExtension(
+ /,
+ **data: 'Any'
+)
+
FieldsExtension.
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +set of fields to include. | +None | +
exclude | +None | +set of fields to exclude. | +None | +
model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +Filter extensions clients.
+class AsyncBaseFiltersClient(
+
+)
+
Defines a pattern for implementing the STAC filter extension.
+def get_queryables(
+ self,
+ collection_id: Optional[str] = None,
+ **kwargs
+) -> Dict[str, Any]
+
Get the queryables available for the given collection_id.
+If collection_id is None, returns the intersection of all queryables over all +collections.
+This base implementation returns a blank queryable schema. This is not allowed +under OGC CQL but it is allowed by the STAC API Filter Extension +github.com/radiantearth/stac-api-spec/tree/master/fragments/filter#queryables
+class BaseFiltersClient(
+
+)
+
Defines a pattern for implementing the STAC filter extension.
+def get_queryables(
+ self,
+ collection_id: Optional[str] = None,
+ **kwargs
+) -> Dict[str, Any]
+
Get the queryables available for the given collection_id.
+If collection_id is None, returns the intersection of all queryables over all +collections.
+This base implementation returns a blank queryable schema. This is not allowed +under OGC CQL but it is allowed by the STAC API Filter Extension +stac-api-extensions/filter#queryables
+ + + + + + + + + + + + + +Filter Extension.
+class FilterConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the Filter extension.
+See +stac-api-extensions/filter
+ACCENT_CASE_INSENSITIVE_COMPARISON
+
ADVANCED_COMPARISON_OPERATORS
+
ARITHMETIC
+
ARRAYS
+
BASIC_CQL2
+
BASIC_SPATIAL_OPERATORS
+
CQL2_JSON
+
CQL2_TEXT
+
FEATURES_FILTER
+
FILTER
+
FUNCTIONS
+
ITEM_SEARCH_FILTER
+
PROPERTY_PROPERTY
+
SPATIAL_OPERATORS
+
TEMPORAL_OPERATORS
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+class FilterExtension(
+ schema_href: Optional[str] = None,
+ client: Union[stac_fastapi.extensions.core.filter.client.AsyncBaseFiltersClient, stac_fastapi.extensions.core.filter.client.BaseFiltersClient] = NOTHING,
+ conformance_classes: List[str] = [<FilterConformanceClasses.FILTER: 'http://www.opengis.net/spec/ogcapi-features-3/1.0/conf/filter'>, <FilterConformanceClasses.FEATURES_FILTER: 'http://www.opengis.net/spec/ogcapi-features-3/1.0/conf/features-filter'>, <FilterConformanceClasses.ITEM_SEARCH_FILTER: 'https://api.stacspec.org/v1.0.0-rc.2/item-search#filter'>, <FilterConformanceClasses.BASIC_CQL2: 'http://www.opengis.net/spec/cql2/1.0/conf/basic-cql2'>, <FilterConformanceClasses.CQL2_JSON: 'http://www.opengis.net/spec/cql2/1.0/conf/cql2-json'>, <FilterConformanceClasses.CQL2_TEXT: 'http://www.opengis.net/spec/cql2/1.0/conf/cql2-text'>],
+ router: fastapi.routing.APIRouter = NOTHING,
+ response_class: Type[starlette.responses.Response] = <class 'stac_fastapi.api.models.JSONSchemaResponse'>
+)
+
Filter Extension.
+The filter extension adds several endpoints which allow the retrieval of +queryables and provides an expressive mechanism for searching based on Item
+Name | +Type | +Description | +Default | +
---|---|---|---|
client | +None | +Queryables endpoint logic | +None | +
conformance_classes | +None | +Conformance classes provided by the extension | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Filter extension module.
+class FilterExtension(
+ schema_href: Optional[str] = None,
+ client: Union[stac_fastapi.extensions.core.filter.client.AsyncBaseFiltersClient, stac_fastapi.extensions.core.filter.client.BaseFiltersClient] = NOTHING,
+ conformance_classes: List[str] = [<FilterConformanceClasses.FILTER: 'http://www.opengis.net/spec/ogcapi-features-3/1.0/conf/filter'>, <FilterConformanceClasses.FEATURES_FILTER: 'http://www.opengis.net/spec/ogcapi-features-3/1.0/conf/features-filter'>, <FilterConformanceClasses.ITEM_SEARCH_FILTER: 'https://api.stacspec.org/v1.0.0-rc.2/item-search#filter'>, <FilterConformanceClasses.BASIC_CQL2: 'http://www.opengis.net/spec/cql2/1.0/conf/basic-cql2'>, <FilterConformanceClasses.CQL2_JSON: 'http://www.opengis.net/spec/cql2/1.0/conf/cql2-json'>, <FilterConformanceClasses.CQL2_TEXT: 'http://www.opengis.net/spec/cql2/1.0/conf/cql2-text'>],
+ router: fastapi.routing.APIRouter = NOTHING,
+ response_class: Type[starlette.responses.Response] = <class 'stac_fastapi.api.models.JSONSchemaResponse'>
+)
+
Filter Extension.
+The filter extension adds several endpoints which allow the retrieval of +queryables and provides an expressive mechanism for searching based on Item
+Name | +Type | +Description | +Default | +
---|---|---|---|
client | +None | +Queryables endpoint logic | +None | +
conformance_classes | +None | +Conformance classes provided by the extension | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Filter extension request models.
+FilterLang
+
class FilterExtensionGetRequest(
+ filter: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ filter_crs: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ filter_lang: Annotated[Optional[Literal['cql-json', 'cql2-json', 'cql2-text']], Query(PydanticUndefined)] = 'cql2-text'
+)
+
Filter extension GET request model.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class FilterExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Filter extension POST request model.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +Free-text extension.
+class FreeTextAdvancedExtension(
+ conformance_classes: List[str] = [<FreeTextConformanceClasses.SEARCH_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/item-search#advanced-free-text'>, <FreeTextConformanceClasses.COLLECTIONS_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search#advanced-free-text'>, <FreeTextConformanceClasses.ITEMS_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/ogcapi-features#advanced-free-text'>],
+ schema_href: Optional[str] = None
+)
+
Free-text Extension.
+The Free-text extension adds an additional q
parameter to /search
requests which
+allows the caller to perform free-text queries against STAC metadata.
stac-api-extensions/freetext-search?tab=readme-ov-file#advanced
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class FreeTextConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the Free-Text extension.
+See stac-api-extensions/freetext-search
+COLLECTIONS
+
COLLECTIONS_ADVANCED
+
ITEMS
+
ITEMS_ADVANCED
+
SEARCH
+
SEARCH_ADVANCED
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+class FreeTextExtension(
+ conformance_classes: List[str] = [<FreeTextConformanceClasses.SEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/item-search#free-text'>, <FreeTextConformanceClasses.COLLECTIONS: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search#free-text'>, <FreeTextConformanceClasses.ITEMS: 'https://api.stacspec.org/v1.0.0-rc.1/ogcapi-features#free-text'>],
+ schema_href: Optional[str] = None
+)
+
Free-text Extension.
+The Free-text extension adds an additional q
parameter to /search
requests which
+allows the caller to perform free-text queries against STAC metadata.
stac-api-extensions/freetext-search?tab=readme-ov-file#basic
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Query extension module.
+class FreeTextAdvancedExtension(
+ conformance_classes: List[str] = [<FreeTextConformanceClasses.SEARCH_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/item-search#advanced-free-text'>, <FreeTextConformanceClasses.COLLECTIONS_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search#advanced-free-text'>, <FreeTextConformanceClasses.ITEMS_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/ogcapi-features#advanced-free-text'>],
+ schema_href: Optional[str] = None
+)
+
Free-text Extension.
+The Free-text extension adds an additional q
parameter to /search
requests which
+allows the caller to perform free-text queries against STAC metadata.
stac-api-extensions/freetext-search?tab=readme-ov-file#advanced
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class FreeTextConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the Free-Text extension.
+See stac-api-extensions/freetext-search
+COLLECTIONS
+
COLLECTIONS_ADVANCED
+
ITEMS
+
ITEMS_ADVANCED
+
SEARCH
+
SEARCH_ADVANCED
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+class FreeTextExtension(
+ conformance_classes: List[str] = [<FreeTextConformanceClasses.SEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/item-search#free-text'>, <FreeTextConformanceClasses.COLLECTIONS: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search#free-text'>, <FreeTextConformanceClasses.ITEMS: 'https://api.stacspec.org/v1.0.0-rc.1/ogcapi-features#free-text'>],
+ schema_href: Optional[str] = None
+)
+
Free-text Extension.
+The Free-text extension adds an additional q
parameter to /search
requests which
+allows the caller to perform free-text queries against STAC metadata.
stac-api-extensions/freetext-search?tab=readme-ov-file#basic
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Request model for the Free-text extension.
+class FreeTextAdvancedExtensionGetRequest(
+ q: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Free-text Extension GET request model.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class FreeTextAdvancedExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Free-text Extension POST request model.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class FreeTextExtensionGetRequest(
+ q: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Free-text Extension GET request model.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class FreeTextExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Free-text Extension POST request model.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +stac_api.extensions.core module.
+class AggregationExtension(
+ schema_href: Optional[str] = None,
+ client: Union[stac_fastapi.extensions.core.aggregation.client.AsyncBaseAggregationClient, stac_fastapi.extensions.core.aggregation.client.BaseAggregationClient] = NOTHING,
+ conformance_classes: List[str] = [<AggregationConformanceClasses.AGGREGATION: 'https://api.stacspec.org/v0.3.0/aggregation'>],
+ router: fastapi.routing.APIRouter = NOTHING
+)
+
Aggregation Extension.
+The purpose of the Aggregation Extension is to provide an endpoint similar to +the Search endpoint (/search), but which will provide aggregated information +on matching Items rather than the Items themselves. This is highly influenced +by the Elasticsearch and OpenSearch aggregation endpoint, but with a more +regular structure for responses.
+The Aggregation extension adds several endpoints which allow the retrieval of +available aggregation fields and aggregation buckets based on a seearch query: + GET /aggregations + POST /aggregations + GET /collections/{collection_id}/aggregations + POST /collections/{collection_id}/aggregations + GET /aggregate + POST /aggregate + GET /collections/{collection_id}/aggregate + POST /collections/{collection_id}/aggregate
+github.com/stac-api-extensions/aggregation/blob/main/README.md
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +None | +Conformance classes provided by the extension | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class CollectionSearchExtension(
+ GET: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest'>,
+ conformance_classes: List[str] = [<ConformanceClasses.COLLECTIONSEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search'>, <ConformanceClasses.BASIS: 'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/simple-query'>],
+ schema_href: Optional[str] = None
+)
+
Collection-Search Extension.
+The Collection-Search extension adds functionality to the GET - /collections
+endpoint which allows the caller to include or exclude specific from the API
+response.
+Registering this extension with the application has the added effect of
+removing the ItemCollection
response model from the /search
endpoint, as
+the Fields extension allows the API to return potentially invalid responses
+by excluding fields which are required by the STAC spec, such as geometry.
stac-api-extensions/collection-search
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def from_extensions(
+ extensions: List[stac_fastapi.types.extension.ApiExtension],
+ schema_href: Optional[str] = None
+) -> 'CollectionSearchExtension'
+
Create CollectionSearchExtension object from extensions.
+def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +fastapi.FastAPI | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class CollectionSearchPostExtension(
+ client: Union[stac_fastapi.extensions.core.collection_search.client.AsyncBaseCollectionSearchClient, stac_fastapi.extensions.core.collection_search.client.BaseCollectionSearchClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ conformance_classes: List[str] = [<ConformanceClasses.COLLECTIONSEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search'>, <ConformanceClasses.BASIS: 'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/simple-query'>],
+ schema_href: Optional[str] = None,
+ router: fastapi.routing.APIRouter = NOTHING,
+ GET: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchGetRequest'>,
+ POST: stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest = <class 'stac_fastapi.extensions.core.collection_search.request.BaseCollectionSearchPostRequest'>
+)
+
Collection-Search Extension.
+Extents the collection-search extension with an additional +POST - /collections endpoint
+NOTE: the POST - /collections endpoint can be conflicting with the +POST /collections endpoint registered for the Transaction extension.
+stac-api-extensions/collection-search
+Name | +Type | +Description | +Default | +
---|---|---|---|
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def from_extensions(
+ extensions: List[stac_fastapi.types.extension.ApiExtension],
+ *,
+ client: Union[stac_fastapi.extensions.core.collection_search.client.AsyncBaseCollectionSearchClient, stac_fastapi.extensions.core.collection_search.client.BaseCollectionSearchClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ schema_href: Optional[str] = None,
+ router: Optional[fastapi.routing.APIRouter] = None
+) -> 'CollectionSearchPostExtension'
+
Create CollectionSearchPostExtension object from extensions.
+def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class FieldsExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Fields Extension.
+The Fields extension adds functionality to the /search
endpoint which
+allows the caller to include or exclude specific from the API response.
+Registering this extension with the application has the added effect of
+removing the ItemCollection
response model from the /search
endpoint, as
+the Fields extension allows the API to return potentially invalid responses
+by excluding fields which are required by the STAC spec, such as geometry.
Name | +Type | +Description | +Default | +
---|---|---|---|
default_includes | +set | +defines the default set of included fields. | +None | +
conformance_classes | +list | +Defines the list of conformance classes for the extension |
+None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +fastapi.FastAPI | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class FilterExtension(
+ schema_href: Optional[str] = None,
+ client: Union[stac_fastapi.extensions.core.filter.client.AsyncBaseFiltersClient, stac_fastapi.extensions.core.filter.client.BaseFiltersClient] = NOTHING,
+ conformance_classes: List[str] = [<FilterConformanceClasses.FILTER: 'http://www.opengis.net/spec/ogcapi-features-3/1.0/conf/filter'>, <FilterConformanceClasses.FEATURES_FILTER: 'http://www.opengis.net/spec/ogcapi-features-3/1.0/conf/features-filter'>, <FilterConformanceClasses.ITEM_SEARCH_FILTER: 'https://api.stacspec.org/v1.0.0-rc.2/item-search#filter'>, <FilterConformanceClasses.BASIC_CQL2: 'http://www.opengis.net/spec/cql2/1.0/conf/basic-cql2'>, <FilterConformanceClasses.CQL2_JSON: 'http://www.opengis.net/spec/cql2/1.0/conf/cql2-json'>, <FilterConformanceClasses.CQL2_TEXT: 'http://www.opengis.net/spec/cql2/1.0/conf/cql2-text'>],
+ router: fastapi.routing.APIRouter = NOTHING,
+ response_class: Type[starlette.responses.Response] = <class 'stac_fastapi.api.models.JSONSchemaResponse'>
+)
+
Filter Extension.
+The filter extension adds several endpoints which allow the retrieval of +queryables and provides an expressive mechanism for searching based on Item
+Name | +Type | +Description | +Default | +
---|---|---|---|
client | +None | +Queryables endpoint logic | +None | +
conformance_classes | +None | +Conformance classes provided by the extension | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class FreeTextAdvancedExtension(
+ conformance_classes: List[str] = [<FreeTextConformanceClasses.SEARCH_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/item-search#advanced-free-text'>, <FreeTextConformanceClasses.COLLECTIONS_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search#advanced-free-text'>, <FreeTextConformanceClasses.ITEMS_ADVANCED: 'https://api.stacspec.org/v1.0.0-rc.1/ogcapi-features#advanced-free-text'>],
+ schema_href: Optional[str] = None
+)
+
Free-text Extension.
+The Free-text extension adds an additional q
parameter to /search
requests which
+allows the caller to perform free-text queries against STAC metadata.
stac-api-extensions/freetext-search?tab=readme-ov-file#advanced
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class FreeTextExtension(
+ conformance_classes: List[str] = [<FreeTextConformanceClasses.SEARCH: 'https://api.stacspec.org/v1.0.0-rc.1/item-search#free-text'>, <FreeTextConformanceClasses.COLLECTIONS: 'https://api.stacspec.org/v1.0.0-rc.1/collection-search#free-text'>, <FreeTextConformanceClasses.ITEMS: 'https://api.stacspec.org/v1.0.0-rc.1/ogcapi-features#free-text'>],
+ schema_href: Optional[str] = None
+)
+
Free-text Extension.
+The Free-text extension adds an additional q
parameter to /search
requests which
+allows the caller to perform free-text queries against STAC metadata.
stac-api-extensions/freetext-search?tab=readme-ov-file#basic
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class OffsetPaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Offset Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class PaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Token Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class QueryExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Query Extension.
+The Query extension adds an additional query
parameter to /search
requests which
+allows the caller to perform queries against item metadata (ex. find all images with
+cloud cover less than 15%).
+stac-api-extensions/query
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class SortExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Sort Extension.
+The Sort extension adds the sortby
parameter to the /search
endpoint, allowing the
+caller to specify the sort order of the returned items.
+stac-api-extensions/sort
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class TokenPaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Token Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class TransactionExtension(
+ client: Union[stac_fastapi.types.core.AsyncBaseTransactionsClient, stac_fastapi.types.core.BaseTransactionsClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None,
+ router: fastapi.routing.APIRouter = NOTHING,
+ response_class: Type[starlette.responses.Response] = <class 'starlette.responses.JSONResponse'>
+)
+
Transaction Extension.
+The transaction extension adds several endpoints which allow the creation, +deletion, and updating of items and collections: + POST /collections + PUT /collections/{collection_id} + DELETE /collections/{collection_id} + POST /collections/{collection_id}/items + PUT /collections/{collection_id}/items + DELETE /collections/{collection_id}/items
+stac-api-extensions/transaction +stac-api-extensions/collection-transaction
+Name | +Type | +Description | +Default | +
---|---|---|---|
client | +None | +CRUD application logic | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_create_collection(
+ self
+)
+
Register create collection endpoint (POST /collections).
+def register_create_item(
+ self
+)
+
Register create item endpoint (POST /collections/{collection_id}/items).
+def register_delete_collection(
+ self
+)
+
Register delete collection endpoint (DELETE /collections/{collection_id}).
+def register_delete_item(
+ self
+)
+
Register delete item endpoint (DELETE
+/collections/{collection_id}/items/{item_id}).
+def register_patch_collection(
+ self
+)
+
Register patch collection endpoint (PATCH /collections/{collection_id}).
+def register_patch_item(
+ self
+)
+
Register patch item endpoint (PATCH
+/collections/{collection_id}/items/{item_id}).
+def register_update_collection(
+ self
+)
+
Register update collection endpoint (PUT /collections/{collection_id}).
+def register_update_item(
+ self
+)
+
Register update item endpoint (PUT
+/collections/{collection_id}/items/{item_id}).
+ + + + + + + + + + + + + +Pagination classes as extensions.
+class OffsetPaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Offset Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class PaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Token Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class TokenPaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Token Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Pagination API extension.
+class OffsetPaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Offset Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Pagination API extension.
+class PaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Token Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Pagination extension request models.
+class GETOffsetPagination(
+ offset: Annotated[Optional[int], Query(PydanticUndefined)] = None
+)
+
Offset pagination for GET requests.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class GETPagination(
+ page: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Page based pagination for GET requests.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class GETTokenPagination(
+ token: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Token pagination for GET requests.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class POSTOffsetPagination(
+ /,
+ **data: 'Any'
+)
+
Offset pagination model for POST requests.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class POSTPagination(
+ /,
+ **data: 'Any'
+)
+
Page based pagination for POST requests.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class POSTTokenPagination(
+ /,
+ **data: 'Any'
+)
+
Token pagination model for POST requests.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +Token pagination API extension.
+class TokenPaginationExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Token Pagination.
+Though not strictly an extension, the chosen pagination will modify the form of the +request object. By making pagination an extension class, we can use +create_request_model to dynamically add the correct pagination parameter to the +request model for OpenAPI generation.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Query extension module.
+class QueryExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Query Extension.
+The Query extension adds an additional query
parameter to /search
requests which
+allows the caller to perform queries against item metadata (ex. find all images with
+cloud cover less than 15%).
+stac-api-extensions/query
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Query extension.
+class QueryExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Query Extension.
+The Query extension adds an additional query
parameter to /search
requests which
+allows the caller to perform queries against item metadata (ex. find all images with
+cloud cover less than 15%).
+stac-api-extensions/query
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Request model for the Query extension.
+class QueryExtensionGetRequest(
+ query: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Query Extension GET request model.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class QueryExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Query Extension POST request model.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +Sort extension module.
+class SortExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Sort Extension.
+The Sort extension adds the sortby
parameter to the /search
endpoint, allowing the
+caller to specify the sort order of the returned items.
+stac-api-extensions/sort
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Request model for the Sort Extension.
+class SortExtensionGetRequest(
+ sortby: Annotated[Optional[str], Query(PydanticUndefined)] = None
+)
+
Sortby Parameter for GET requests.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class SortExtensionPostRequest(
+ /,
+ **data: 'Any'
+)
+
Sortby parameter for POST requests.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +Sort extension.
+class SortExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Sort Extension.
+The Sort extension adds the sortby
parameter to the /search
endpoint, allowing the
+caller to specify the sort order of the returned items.
+stac-api-extensions/sort
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Transaction extension.
+class PostItem(
+ collection_id: typing.Annotated[str, Path(PydanticUndefined)],
+ item: Annotated[Union[stac_pydantic.item.Item, stac_pydantic.item_collection.ItemCollection], Body(PydanticUndefined)] = None
+)
+
Create Item.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class PutCollection(
+ collection_id: typing.Annotated[str, Path(PydanticUndefined)],
+ collection: typing.Annotated[stac_pydantic.collection.Collection, Body(PydanticUndefined)] = None
+)
+
Update Collection.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class PutItem(
+ collection_id: typing.Annotated[str, Path(PydanticUndefined)],
+ item_id: typing.Annotated[str, Path(PydanticUndefined)],
+ item: typing.Annotated[stac_pydantic.item.Item, Body(PydanticUndefined)] = None
+)
+
Update Item.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class TransactionExtension(
+ client: Union[stac_fastapi.types.core.AsyncBaseTransactionsClient, stac_fastapi.types.core.BaseTransactionsClient],
+ settings: stac_fastapi.types.config.ApiSettings,
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None,
+ router: fastapi.routing.APIRouter = NOTHING,
+ response_class: Type[starlette.responses.Response] = <class 'starlette.responses.JSONResponse'>
+)
+
Transaction Extension.
+The transaction extension adds several endpoints which allow the creation, +deletion, and updating of items and collections: + POST /collections + PUT /collections/{collection_id} + DELETE /collections/{collection_id} + POST /collections/{collection_id}/items + PUT /collections/{collection_id}/items + DELETE /collections/{collection_id}/items
+stac-api-extensions/transaction +stac-api-extensions/collection-transaction
+Name | +Type | +Description | +Default | +
---|---|---|---|
client | +None | +CRUD application logic | +None | +
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
def register_create_collection(
+ self
+)
+
Register create collection endpoint (POST /collections).
+def register_create_item(
+ self
+)
+
Register create item endpoint (POST /collections/{collection_id}/items).
+def register_delete_collection(
+ self
+)
+
Register delete collection endpoint (DELETE /collections/{collection_id}).
+def register_delete_item(
+ self
+)
+
Register delete item endpoint (DELETE
+/collections/{collection_id}/items/{item_id}).
+def register_patch_collection(
+ self
+)
+
Register patch collection endpoint (PATCH /collections/{collection_id}).
+def register_patch_item(
+ self
+)
+
Register patch item endpoint (PATCH
+/collections/{collection_id}/items/{item_id}).
+def register_update_collection(
+ self
+)
+
Register update collection endpoint (PUT /collections/{collection_id}).
+def register_update_item(
+ self
+)
+
Register update item endpoint (PUT
+/collections/{collection_id}/items/{item_id}).
+ + + + + + + + + + + + + +Bulk transactions extension.
+class AsyncBaseBulkTransactionsClient(
+
+)
+
BulkTransactionsClient.
+def bulk_item_insert(
+ self,
+ items: stac_fastapi.extensions.third_party.bulk_transactions.Items,
+ **kwargs
+) -> str
+
Bulk creation of items.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
items | +None | +list of items. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +Message indicating the status of the insert. | +
class BaseBulkTransactionsClient(
+
+)
+
BulkTransactionsClient.
+def bulk_item_insert(
+ self,
+ items: stac_fastapi.extensions.third_party.bulk_transactions.Items,
+ chunk_size: Optional[int] = None,
+ **kwargs
+) -> str
+
Bulk creation of items.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
items | +None | +list of items. | +None | +
chunk_size | +None | +number of items processed at a time. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +Message indicating the status of the insert. | +
class BulkTransactionExtension(
+ client: Union[stac_fastapi.extensions.third_party.bulk_transactions.AsyncBaseBulkTransactionsClient, stac_fastapi.extensions.third_party.bulk_transactions.BaseBulkTransactionsClient],
+ conformance_classes: List[str] = [],
+ schema_href: Optional[str] = None
+)
+
Bulk Transaction Extension.
+Bulk Transaction extension adds the POST
+/collections/{collection_id}/bulk_items
endpoint to the application for
+efficient bulk insertion of items. The input to this is an object with an
+attribute "items", that has a value that is an object with a group of
+attributes that are the ids of each Item, and the value is the Item entity.
Optionally, clients can specify a "method" attribute that is either "insert" +or "upsert". If "insert", then the items will be inserted if they do not +exist, and an error will be returned if they do. If "upsert", then the items +will be inserted if they do not exist, and updated if they do. This defaults +to "insert".
+{
+ "items": {
+ "id1": { "type": "Feature", ... },
+ "id2": { "type": "Feature", ... },
+ "id3": { "type": "Feature", ... }
+ },
+ "method": "insert"
+}
+
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
class BulkTransactionMethod(
+ *args,
+ **kwds
+)
+
Bulk Transaction Methods.
+INSERT
+
UPSERT
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+class Items(
+ /,
+ **data: 'Any'
+)
+
A group of STAC Item objects, in the form of a dictionary from Item.id -> Item.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + + + + + + + +stac_api.extensions.third_party module.
+class BulkTransactionExtension(
+ client: Union[stac_fastapi.extensions.third_party.bulk_transactions.AsyncBaseBulkTransactionsClient, stac_fastapi.extensions.third_party.bulk_transactions.BaseBulkTransactionsClient],
+ conformance_classes: List[str] = [],
+ schema_href: Optional[str] = None
+)
+
Bulk Transaction Extension.
+Bulk Transaction extension adds the POST
+/collections/{collection_id}/bulk_items
endpoint to the application for
+efficient bulk insertion of items. The input to this is an object with an
+attribute "items", that has a value that is an object with a group of
+attributes that are the ids of each Item, and the value is the Item entity.
Optionally, clients can specify a "method" attribute that is either "insert" +or "upsert". If "insert", then the items will be inserted if they do not +exist, and an error will be returned if they do. If "upsert", then the items +will be inserted if they do not exist, and updated if they do. This defaults +to "insert".
+{
+ "items": {
+ "id1": { "type": "Feature", ... },
+ "id2": { "type": "Feature", ... },
+ "id3": { "type": "Feature", ... }
+ },
+ "method": "insert"
+}
+
GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Library version.
+ + + + + + + + + + + + + +stac_fastapi.types.config module.
+class ApiSettings(
+ __pydantic_self__,
+ _case_sensitive: 'bool | None' = None,
+ _nested_model_default_partial_update: 'bool | None' = None,
+ _env_prefix: 'str | None' = None,
+ _env_file: 'DotenvType | None' = PosixPath('.'),
+ _env_file_encoding: 'str | None' = None,
+ _env_ignore_empty: 'bool | None' = None,
+ _env_nested_delimiter: 'str | None' = None,
+ _env_parse_none_str: 'str | None' = None,
+ _env_parse_enums: 'bool | None' = None,
+ _cli_prog_name: 'str | None' = None,
+ _cli_parse_args: 'bool | list[str] | tuple[str, ...] | None' = None,
+ _cli_settings_source: 'CliSettingsSource[Any] | None' = None,
+ _cli_parse_none_str: 'str | None' = None,
+ _cli_hide_none_type: 'bool | None' = None,
+ _cli_avoid_json: 'bool | None' = None,
+ _cli_enforce_required: 'bool | None' = None,
+ _cli_use_class_docs_for_groups: 'bool | None' = None,
+ _cli_exit_on_error: 'bool | None' = None,
+ _cli_prefix: 'str | None' = None,
+ _cli_implicit_flags: 'bool | None' = None,
+ _secrets_dir: 'PathType | None' = None,
+ **values: 'Any'
+)
+
ApiSettings.
+Defines api configuration, potentially through environment variables. +See pydantic-docs.helpmanual.io/usage/settings/.
+Name | +Type | +Description | +Default | +
---|---|---|---|
environment | +None | +name of the environment (ex. dev/prod). | +None | +
debug | +None | +toggles debug mode. | +None | +
forbidden_fields | +None | +set of fields defined by STAC but not included in the database. | +None | +
indexed_fields | +None | +set of fields which are usually in item.properties but are indexedas distinct columns in the database. |
+None | +
model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def settings_customise_sources(
+ settings_cls: 'type[BaseSettings]',
+ init_settings: 'PydanticBaseSettingsSource',
+ env_settings: 'PydanticBaseSettingsSource',
+ dotenv_settings: 'PydanticBaseSettingsSource',
+ file_secret_settings: 'PydanticBaseSettingsSource'
+) -> 'tuple[PydanticBaseSettingsSource, ...]'
+
Define the sources and their order for loading the settings values.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
settings_cls | +None | +The Settings class. | +None | +
init_settings | +None | +The InitSettingsSource instance. |
+None | +
env_settings | +None | +The EnvSettingsSource instance. |
+None | +
dotenv_settings | +None | +The DotEnvSettingsSource instance. |
+None | +
file_secret_settings | +None | +The SecretsSettingsSource instance. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +A tuple containing the sources and their order for loading the settings values. | +
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class Settings(
+ /,
+ *args,
+ **kwargs
+)
+
Holds the global instance of settings.
+def get(
+
+) -> stac_fastapi.types.config.ApiSettings
+
Get the settings.
+If they have not yet been set, throws an exception.
+def set(
+ base_settings: stac_fastapi.types.config.ApiSettings
+)
+
Set the global settings.
+ + + + + + + + + + + + + +Conformance Classes.
+BASE_CONFORMANCE_CLASSES
+
class OAFConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for OGC API - Features.
+CORE
+
GEOJSON
+
OPEN_API
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+class STACConformanceClasses(
+ *args,
+ **kwds
+)
+
Conformance classes for the STAC API spec.
+COLLECTIONS
+
CORE
+
ITEM_SEARCH
+
OGC_API_FEAT
+
name
+
value
+
def maketrans(
+ ...
+)
+
Return a translation table usable for str.translate().
+If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.
+def capitalize(
+ self,
+ /
+)
+
Return a capitalized version of the string.
+More specifically, make the first character have upper case and the rest lower +case.
+def casefold(
+ self,
+ /
+)
+
Return a version of the string suitable for caseless comparisons.
+def center(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a centered string of length width.
+Padding is done using the specified fill character (default is a space).
+def count(
+ ...
+)
+
S.count(sub[, start[, end]]) -> int
+Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.
+def encode(
+ self,
+ /,
+ encoding='utf-8',
+ errors='strict'
+)
+
Encode the string using the codec registered for encoding.
+encoding + The encoding in which to encode the string. +errors + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors.
+def endswith(
+ ...
+)
+
S.endswith(suffix[, start[, end]]) -> bool
+Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.
+def expandtabs(
+ self,
+ /,
+ tabsize=8
+)
+
Return a copy where all tab characters are expanded using spaces.
+If tabsize is not given, a tab size of 8 characters is assumed.
+def find(
+ ...
+)
+
S.find(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def format(
+ ...
+)
+
S.format(args, *kwargs) -> str
+Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces ('{' and '}').
+def format_map(
+ ...
+)
+
S.format_map(mapping) -> str
+Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces ('{' and '}').
+def index(
+ ...
+)
+
S.index(sub[, start[, end]]) -> int
+Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def isalnum(
+ self,
+ /
+)
+
Return True if the string is an alpha-numeric string, False otherwise.
+A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.
+def isalpha(
+ self,
+ /
+)
+
Return True if the string is an alphabetic string, False otherwise.
+A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.
+def isascii(
+ self,
+ /
+)
+
Return True if all characters in the string are ASCII, False otherwise.
+ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.
+def isdecimal(
+ self,
+ /
+)
+
Return True if the string is a decimal string, False otherwise.
+A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.
+def isdigit(
+ self,
+ /
+)
+
Return True if the string is a digit string, False otherwise.
+A string is a digit string if all characters in the string are digits and there +is at least one character in the string.
+def isidentifier(
+ self,
+ /
+)
+
Return True if the string is a valid Python identifier, False otherwise.
+Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as "def" or "class".
+def islower(
+ self,
+ /
+)
+
Return True if the string is a lowercase string, False otherwise.
+A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.
+def isnumeric(
+ self,
+ /
+)
+
Return True if the string is a numeric string, False otherwise.
+A string is numeric if all characters in the string are numeric and there is at +least one character in the string.
+def isprintable(
+ self,
+ /
+)
+
Return True if the string is printable, False otherwise.
+A string is printable if all of its characters are considered printable in +repr() or if it is empty.
+def isspace(
+ self,
+ /
+)
+
Return True if the string is a whitespace string, False otherwise.
+A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.
+def istitle(
+ self,
+ /
+)
+
Return True if the string is a title-cased string, False otherwise.
+In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.
+def isupper(
+ self,
+ /
+)
+
Return True if the string is an uppercase string, False otherwise.
+A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.
+def join(
+ self,
+ iterable,
+ /
+)
+
Concatenate any number of strings.
+The string whose method is called is inserted in between each given string. +The result is returned as a new string.
+Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
+def ljust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a left-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def lower(
+ self,
+ /
+)
+
Return a copy of the string converted to lowercase.
+def lstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def partition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.
+def removeprefix(
+ self,
+ prefix,
+ /
+)
+
Return a str with the given prefix string removed if present.
+If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.
+def removesuffix(
+ self,
+ suffix,
+ /
+)
+
Return a str with the given suffix string removed if present.
+If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.
+def replace(
+ self,
+ old,
+ new,
+ count=-1,
+ /
+)
+
Return a copy with all occurrences of substring old replaced by new.
+count + Maximum number of occurrences to replace. + -1 (the default value) means replace all occurrences.
+If the optional argument count is given, only the first count occurrences are +replaced.
+def rfind(
+ ...
+)
+
S.rfind(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Return -1 on failure.
+def rindex(
+ ...
+)
+
S.rindex(sub[, start[, end]]) -> int
+Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.
+Raises ValueError when the substring is not found.
+def rjust(
+ self,
+ width,
+ fillchar=' ',
+ /
+)
+
Return a right-justified string of length width.
+Padding is done using the specified fill character (default is a space).
+def rpartition(
+ self,
+ sep,
+ /
+)
+
Partition the string into three parts using the given separator.
+This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.
+If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.
+def rsplit(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the end of the string and works to the front.
+def rstrip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def split(
+ self,
+ /,
+ sep=None,
+ maxsplit=-1
+)
+
Return a list of the substrings in the string, using sep as the separator string.
+sep + The separator used to split the string.
+When set to None (the default value), will split on any whitespace
+character (including \n \r \t \f and spaces) and will discard
+empty strings from the result.
+
maxsplit + Maximum number of splits. + -1 (the default value) means no limit.
+Splitting starts at the front of the string and works to the end.
+Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.
+def splitlines(
+ self,
+ /,
+ keepends=False
+)
+
Return a list of the lines in the string, breaking at line boundaries.
+Line breaks are not included in the resulting list unless keepends is given and +true.
+def startswith(
+ ...
+)
+
S.startswith(prefix[, start[, end]]) -> bool
+Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.
+def strip(
+ self,
+ chars=None,
+ /
+)
+
Return a copy of the string with leading and trailing whitespace removed.
+If chars is given and not None, remove characters in chars instead.
+def swapcase(
+ self,
+ /
+)
+
Convert uppercase characters to lowercase and lowercase characters to uppercase.
+def title(
+ self,
+ /
+)
+
Return a version of the string where each word is titlecased.
+More specifically, words start with uppercased characters and all remaining +cased characters have lower case.
+def translate(
+ self,
+ table,
+ /
+)
+
Replace each character in the string using the given translation table.
+table + Translation table, which must be a mapping of Unicode ordinals to + Unicode ordinals, strings, or None.
+The table must implement lookup/indexing via getitem, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.
+def upper(
+ self,
+ /
+)
+
Return a copy of the string converted to uppercase.
+def zfill(
+ self,
+ width,
+ /
+)
+
Pad a numeric string with zeros on the left, to fill a field of the given width.
+The string is never truncated.
+ + + + + + + + + + + + + +Base clients.
+NumType
+
StacType
+
class AsyncBaseCoreClient(
+ stac_version: str = '1.0.0',
+ landing_page_id: str = 'stac-fastapi',
+ title: str = 'stac-fastapi',
+ description: str = 'stac-fastapi',
+ base_conformance_classes: List[str] = NOTHING,
+ extensions: List[stac_fastapi.types.extension.ApiExtension] = NOTHING,
+ post_request_model=<class 'stac_fastapi.types.search.BaseSearchPostRequest'>
+)
+
Defines a pattern for implementing STAC api core endpoints.
+Name | +Type | +Description | +Default | +
---|---|---|---|
extensions | +None | +list of registered api extensions. | +None | +
post_request_model
+
def all_collections(
+ self,
+ **kwargs
+) -> stac_fastapi.types.stac.Collections
+
Get all available collections.
+Called with GET /collections
.
Returns:
+Type | +Description | +
---|---|
None | +A list of collections. | +
def conformance(
+ self,
+ **kwargs
+) -> stac_fastapi.types.stac.Conformance
+
Conformance classes.
+Called with GET /conformance
.
Returns:
+Type | +Description | +
---|---|
None | +Conformance classes which the server conforms to. | +
def conformance_classes(
+ self
+) -> List[str]
+
Generate conformance classes by adding extension conformance to base
+conformance classes.
+def extension_is_enabled(
+ self,
+ extension: str
+) -> bool
+
Check if an api extension is enabled.
+def get_collection(
+ self,
+ collection_id: str,
+ **kwargs
+) -> stac_fastapi.types.stac.Collection
+
Get collection by id.
+Called with GET /collections/{collection_id}
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +Id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +Collection. | +
def get_item(
+ self,
+ item_id: str,
+ collection_id: str,
+ **kwargs
+) -> stac_fastapi.types.stac.Item
+
Get item by id.
+Called with GET /collections/{collection_id}/items/{item_id}
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item_id | +None | +Id of the item. | +None | +
collection_id | +None | +Id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +Item. | +
def get_search(
+ self,
+ collections: Optional[List[str]] = None,
+ ids: Optional[List[str]] = None,
+ bbox: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]], NoneType] = None,
+ intersects: Optional[Annotated[Union[geojson_pydantic.geometries.Point, geojson_pydantic.geometries.MultiPoint, geojson_pydantic.geometries.LineString, geojson_pydantic.geometries.MultiLineString, geojson_pydantic.geometries.Polygon, geojson_pydantic.geometries.MultiPolygon, geojson_pydantic.geometries.GeometryCollection], FieldInfo(annotation=NoneType, required=True, discriminator='type')]] = None,
+ datetime: Union[datetime.datetime, Tuple[datetime.datetime, datetime.datetime], Tuple[datetime.datetime, NoneType], Tuple[NoneType, datetime.datetime], NoneType] = None,
+ limit: Optional[int] = 10,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Cross catalog search (GET).
+Called with GET /search
.
Returns:
+Type | +Description | +
---|---|
None | +ItemCollection containing items which match the search criteria. | +
def item_collection(
+ self,
+ collection_id: str,
+ bbox: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]], NoneType] = None,
+ datetime: Union[datetime.datetime, Tuple[datetime.datetime, datetime.datetime], Tuple[datetime.datetime, NoneType], Tuple[NoneType, datetime.datetime], NoneType] = None,
+ limit: int = 10,
+ token: str = None,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Get all items from a specific collection.
+Called with GET /collections/{collection_id}/items
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +id of the collection. | +None | +
limit | +None | +number of items to return. | +None | +
token | +None | +pagination token. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +An ItemCollection. | +
def landing_page(
+ self,
+ **kwargs
+) -> stac_fastapi.types.stac.LandingPage
+
Landing page.
+Called with GET /
.
Returns:
+Type | +Description | +
---|---|
None | +API landing page, serving as an entry point to the API. | +
def post_search(
+ self,
+ search_request: stac_fastapi.types.search.BaseSearchPostRequest,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Cross catalog search (POST).
+Called with POST /search
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
search_request | +None | +search request parameters. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +ItemCollection containing items which match the search criteria. | +
class AsyncBaseTransactionsClient(
+
+)
+
Defines a pattern for implementing the STAC transaction extension.
+def create_collection(
+ self,
+ collection: stac_pydantic.collection.Collection,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Collection, starlette.responses.Response, NoneType]
+
Create a new collection.
+Called with POST /collections
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection | +None | +the collection | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The collection that was created. | +
def create_item(
+ self,
+ collection_id: str,
+ item: Union[stac_pydantic.item.Item, stac_pydantic.item_collection.ItemCollection],
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Item, starlette.responses.Response, NoneType]
+
Create a new item.
+Called with POST /collections/{collection_id}/items
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item | +None | +the item or item collection | +None | +
collection_id | +None | +the id of the collection from the resource path | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The item that was created or None if item collection. | +
def delete_collection(
+ self,
+ collection_id: str,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Collection, starlette.responses.Response, NoneType]
+
Delete a collection.
+Called with DELETE /collections/{collection_id}
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The deleted collection. | +
def delete_item(
+ self,
+ item_id: str,
+ collection_id: str,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Item, starlette.responses.Response, NoneType]
+
Delete an item from a collection.
+Called with DELETE /collections/{collection_id}/items/{item_id}
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item_id | +None | +id of the item. | +None | +
collection_id | +None | +id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The deleted item. | +
def update_collection(
+ self,
+ collection_id: str,
+ collection: stac_pydantic.collection.Collection,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Collection, starlette.responses.Response, NoneType]
+
Perform a complete update on an existing collection.
+Called with PUT /collections/{collection_id}
. It is expected that this item
+already exists. The update should do a diff against the saved collection and
+perform any necessary updates. Partial updates are not supported by the
+transactions extension.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +id of the existing collection to be updated | +None | +
collection | +None | +the updated collection (must be complete) | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The updated collection. | +
def update_item(
+ self,
+ collection_id: str,
+ item_id: str,
+ item: stac_pydantic.item.Item,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Item, starlette.responses.Response, NoneType]
+
Perform a complete update on an existing item.
+Called with PUT /collections/{collection_id}/items
. It is expected
+that this item already exists. The update should do a diff against the
+saved item and perform any necessary updates. Partial updates are not
+supported by the transactions extension.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item | +None | +the item (must be complete) | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The updated item. | +
class BaseCoreClient(
+ stac_version: str = '1.0.0',
+ landing_page_id: str = 'stac-fastapi',
+ title: str = 'stac-fastapi',
+ description: str = 'stac-fastapi',
+ base_conformance_classes: List[str] = NOTHING,
+ extensions: List[stac_fastapi.types.extension.ApiExtension] = NOTHING,
+ post_request_model=<class 'stac_fastapi.types.search.BaseSearchPostRequest'>
+)
+
Defines a pattern for implementing STAC api core endpoints.
+Name | +Type | +Description | +Default | +
---|---|---|---|
extensions | +None | +list of registered api extensions. | +None | +
post_request_model
+
def all_collections(
+ self,
+ **kwargs
+) -> stac_fastapi.types.stac.Collections
+
Get all available collections.
+Called with GET /collections
.
Returns:
+Type | +Description | +
---|---|
None | +A list of collections. | +
def conformance(
+ self,
+ **kwargs
+) -> stac_fastapi.types.stac.Conformance
+
Conformance classes.
+Called with GET /conformance
.
Returns:
+Type | +Description | +
---|---|
None | +Conformance classes which the server conforms to. | +
def conformance_classes(
+ self
+) -> List[str]
+
Generate conformance classes by adding extension conformance to base
+conformance classes.
+def extension_is_enabled(
+ self,
+ extension: str
+) -> bool
+
Check if an api extension is enabled.
+def get_collection(
+ self,
+ collection_id: str,
+ **kwargs
+) -> stac_fastapi.types.stac.Collection
+
Get collection by id.
+Called with GET /collections/{collection_id}
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +Id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +Collection. | +
def get_item(
+ self,
+ item_id: str,
+ collection_id: str,
+ **kwargs
+) -> stac_fastapi.types.stac.Item
+
Get item by id.
+Called with GET /collections/{collection_id}/items/{item_id}
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item_id | +None | +Id of the item. | +None | +
collection_id | +None | +Id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +Item. | +
def get_search(
+ self,
+ collections: Optional[List[str]] = None,
+ ids: Optional[List[str]] = None,
+ bbox: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]], NoneType] = None,
+ intersects: Optional[Annotated[Union[geojson_pydantic.geometries.Point, geojson_pydantic.geometries.MultiPoint, geojson_pydantic.geometries.LineString, geojson_pydantic.geometries.MultiLineString, geojson_pydantic.geometries.Polygon, geojson_pydantic.geometries.MultiPolygon, geojson_pydantic.geometries.GeometryCollection], FieldInfo(annotation=NoneType, required=True, discriminator='type')]] = None,
+ datetime: Union[datetime.datetime, Tuple[datetime.datetime, datetime.datetime], Tuple[datetime.datetime, NoneType], Tuple[NoneType, datetime.datetime], NoneType] = None,
+ limit: Optional[int] = 10,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Cross catalog search (GET).
+Called with GET /search
.
Returns:
+Type | +Description | +
---|---|
None | +ItemCollection containing items which match the search criteria. | +
def item_collection(
+ self,
+ collection_id: str,
+ bbox: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]], NoneType] = None,
+ datetime: Union[datetime.datetime, Tuple[datetime.datetime, datetime.datetime], Tuple[datetime.datetime, NoneType], Tuple[NoneType, datetime.datetime], NoneType] = None,
+ limit: int = 10,
+ token: str = None,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Get all items from a specific collection.
+Called with GET /collections/{collection_id}/items
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +id of the collection. | +None | +
limit | +None | +number of items to return. | +None | +
token | +None | +pagination token. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +An ItemCollection. | +
def landing_page(
+ self,
+ **kwargs
+) -> stac_fastapi.types.stac.LandingPage
+
Landing page.
+Called with GET /
.
Returns:
+Type | +Description | +
---|---|
None | +API landing page, serving as an entry point to the API. | +
def list_conformance_classes(
+ self
+)
+
Return a list of conformance classes, including implemented extensions.
+def post_search(
+ self,
+ search_request: stac_fastapi.types.search.BaseSearchPostRequest,
+ **kwargs
+) -> stac_fastapi.types.stac.ItemCollection
+
Cross catalog search (POST).
+Called with POST /search
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
search_request | +None | +search request parameters. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +ItemCollection containing items which match the search criteria. | +
class BaseTransactionsClient(
+
+)
+
Defines a pattern for implementing the STAC API Transaction Extension.
+def create_collection(
+ self,
+ collection: stac_pydantic.collection.Collection,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Collection, starlette.responses.Response, NoneType]
+
Create a new collection.
+Called with POST /collections
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection | +None | +the collection | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The collection that was created. | +
def create_item(
+ self,
+ collection_id: str,
+ item: Union[stac_pydantic.item.Item, stac_pydantic.item_collection.ItemCollection],
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Item, starlette.responses.Response, NoneType]
+
Create a new item.
+Called with POST /collections/{collection_id}/items
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item | +None | +the item or item collection | +None | +
collection_id | +None | +the id of the collection from the resource path | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The item that was created or None if item collection. | +
def delete_collection(
+ self,
+ collection_id: str,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Collection, starlette.responses.Response, NoneType]
+
Delete a collection.
+Called with DELETE /collections/{collection_id}
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The deleted collection. | +
def delete_item(
+ self,
+ item_id: str,
+ collection_id: str,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Item, starlette.responses.Response, NoneType]
+
Delete an item from a collection.
+Called with DELETE /collections/{collection_id}/items/{item_id}
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item_id | +None | +id of the item. | +None | +
collection_id | +None | +id of the collection. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The deleted item. | +
def update_collection(
+ self,
+ collection_id: str,
+ collection: stac_pydantic.collection.Collection,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Collection, starlette.responses.Response, NoneType]
+
Perform a complete update on an existing collection.
+Called with PUT /collections/{collection_id}
. It is expected that this
+collection already exists. The update should do a diff against the saved
+collection and perform any necessary updates. Partial updates are not
+supported by the transactions extension.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
collection_id | +None | +id of the existing collection to be updated | +None | +
collection | +None | +the updated collection (must be complete) | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The updated collection. | +
def update_item(
+ self,
+ collection_id: str,
+ item_id: str,
+ item: stac_pydantic.item.Item,
+ **kwargs
+) -> Union[stac_fastapi.types.stac.Item, starlette.responses.Response, NoneType]
+
Perform a complete update on an existing item.
+Called with PUT /collections/{collection_id}/items
. It is expected
+that this item already exists. The update should do a diff against the
+saved item and perform any necessary updates. Partial updates are not
+supported by the transactions extension.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
item | +None | +the item (must be complete) | +None | +
collection_id | +None | +the id of the collection from the resource path | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The updated item. | +
class LandingPageMixin(
+ stac_version: str = '1.0.0',
+ landing_page_id: str = 'stac-fastapi',
+ title: str = 'stac-fastapi',
+ description: str = 'stac-fastapi'
+)
+
Create a STAC landing page (GET /).
+stac_fastapi.types.errors module.
+class ConflictError(
+ /,
+ *args,
+ **kwargs
+)
+
Database conflict.
+args
+
def add_note(
+ ...
+)
+
Exception.add_note(note) --
+add a note to the exception
+def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class DatabaseError(
+ /,
+ *args,
+ **kwargs
+)
+
Generic database errors.
+args
+
def add_note(
+ ...
+)
+
Exception.add_note(note) --
+add a note to the exception
+def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class ForeignKeyError(
+ /,
+ *args,
+ **kwargs
+)
+
Foreign key error (collection does not exist).
+args
+
def add_note(
+ ...
+)
+
Exception.add_note(note) --
+add a note to the exception
+def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidQueryParameter(
+ /,
+ *args,
+ **kwargs
+)
+
Error for unknown or invalid query parameters.
+Used to capture errors that should respond according to +docs.opengeospatial.org/is/17-069r3/17-069r3.html#query_parameters
+args
+
def add_note(
+ ...
+)
+
Exception.add_note(note) --
+add a note to the exception
+def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class NotFoundError(
+ /,
+ *args,
+ **kwargs
+)
+
Resource not found.
+args
+
def add_note(
+ ...
+)
+
Exception.add_note(note) --
+add a note to the exception
+def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class StacApiError(
+ /,
+ *args,
+ **kwargs
+)
+
Generic API error.
+args
+
def add_note(
+ ...
+)
+
Exception.add_note(note) --
+add a note to the exception
+def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+ + + + + + + + + + + + + +Base api extension.
+class ApiExtension(
+ conformance_classes: List[str] = NOTHING,
+ schema_href: Optional[str] = None
+)
+
Abstract base class for defining API extensions.
+GET
+
POST
+
def get_request_model(
+ self,
+ verb: Optional[str] = 'GET'
+) -> Optional[pydantic.main.BaseModel]
+
Return the request model for the extension.method.
+The model can differ based on HTTP verb
+def register(
+ self,
+ app: fastapi.applications.FastAPI
+) -> None
+
Register the extension with a FastAPI application.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
app | +None | +target FastAPI application. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +None | +
Backend submodule.
+Link helpers.
+INFERRED_LINK_RELS
+
def filter_links(
+ links: List[Dict]
+) -> List[Dict]
+
Remove inferred links.
+def resolve_links(
+ links: list,
+ base_url: str
+) -> List[Dict]
+
Convert relative links to absolute links.
+class BaseLinks(
+ collection_id: str,
+ base_url: str
+)
+
Create inferred links common to collections and items.
+def root(
+ self
+) -> Dict[str, Any]
+
Return the catalog root.
+class CollectionLinks(
+ collection_id: str,
+ base_url: str
+)
+
Create inferred links specific to collections.
+def create_links(
+ self
+) -> List[Dict[str, Any]]
+
Return all inferred links.
+def items(
+ self
+) -> Dict[str, Any]
+
Create the items
link.
def parent(
+ self
+) -> Dict[str, Any]
+
Create the parent
link.
def root(
+ self
+) -> Dict[str, Any]
+
Return the catalog root.
+def self(
+ self
+) -> Dict[str, Any]
+
Create the self
link.
class ItemLinks(
+ collection_id: str,
+ base_url: str,
+ item_id: str
+)
+
Create inferred links specific to items.
+def collection(
+ self
+) -> Dict[str, Any]
+
Create the collection
link.
def create_links(
+ self
+) -> List[Dict[str, Any]]
+
Return all inferred links.
+def parent(
+ self
+) -> Dict[str, Any]
+
Create the parent
link.
def root(
+ self
+) -> Dict[str, Any]
+
Return the catalog root.
+def self(
+ self
+) -> Dict[str, Any]
+
Create the self
link.
rfc3339.
+DateTimeType
+
RFC33339_PATTERN
+
def datetime_to_str(
+ dt: datetime.datetime,
+ timespec: str = 'auto'
+) -> str
+
Converts a :class:datetime.datetime
instance to an ISO8601 string in the
RFC 3339, section 5.6
+<https://datatracker.ietf.org/doc/html/rfc3339#section-5.6>
__ format required by
+the :stac-spec:STAC Spec <master/item-spec/common-metadata.md#date-and-time>
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
dt | +None | +The datetime to convert. | +None | +
timespec | +None | +An optional argument that specifies the number of additional terms of the time to include. Valid options are 'auto', 'hours', 'minutes', 'seconds', 'milliseconds' and 'microseconds'. The default value is 'auto'. |
+None | +
Returns:
+Type | +Description | +
---|---|
str | +The ISO8601 (RFC 3339) formatted string representing the datetime. | +
def now_in_utc(
+
+) -> datetime.datetime
+
Return a datetime value of now with the UTC timezone applied.
+def now_to_rfc3339_str(
+
+) -> str
+
Return an RFC 3339 string representing now.
+def parse_single_date(
+ date_str: str
+) -> datetime.datetime
+
Parse a single RFC3339 date string into a datetime object.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
date_str | +str | +A string representing the date in RFC3339 format. | +None | +
Returns:
+Type | +Description | +
---|---|
datetime | +A datetime object parsed from the date_str. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If the date_str is empty or contains the placeholder '..'. | +
def rfc3339_str_to_datetime(
+ s: str
+) -> datetime.datetime
+
Convert a string conforming to RFC 3339 to a :class:datetime.datetime
.
Uses :meth:iso8601.parse_date
under the hood.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
s | +str | +The string to convert to :class:datetime.datetime . |
+None | +
Returns:
+Type | +Description | +
---|---|
str | +The datetime represented by the ISO8601 (RFC 3339) formatted string. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If the string is not a valid RFC 3339 string. | +
def str_to_interval(
+ interval: Optional[str]
+) -> Union[datetime.datetime, Tuple[datetime.datetime, datetime.datetime], Tuple[datetime.datetime, NoneType], Tuple[NoneType, datetime.datetime], NoneType]
+
Extract a single datetime object or a tuple of datetime objects from an
+interval string defined by the OGC API. The interval can either be a +single datetime or a range with start and end datetime.
+Args: + interval (Optional[str]): The interval string to convert to datetime objects, + or None if no datetime is specified.
+Returns: + Optional[DateTimeType]: A single datetime.datetime object, a tuple of + datetime.datetime objects, or None if input is None.
+Raises: + HTTPException: If the string is not valid for various reasons such as being empty, + having more than one slash, or if date formats are invalid.
+ + + + + + + + + + + + + +stac_fastapi.types.search module.
+Limit
+
NumType
+
def crop(
+ v: typing.Annotated[int, Gt(gt=0)]
+) -> typing.Annotated[int, Gt(gt=0)]
+
Crop value to 10,000.
+def str2bbox(
+ x: str
+) -> Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]], NoneType]
+
Convert string to BBox based on , delimiter.
+def str2list(
+ x: str
+) -> Optional[List[str]]
+
Convert string to list base on , delimiter.
+class APIRequest(
+
+)
+
Generic API Request base class.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class BaseSearchGetRequest(
+ collections: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ ids: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ bbox: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ intersects: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ datetime: Annotated[Optional[str], Query(PydanticUndefined)] = None,
+ limit: Annotated[Optional[Annotated[int, Gt(gt=0), AfterValidator(func=<function crop at 0x7f85cf1b1ee0>)]], Query(PydanticUndefined)] = 10
+)
+
Base arguments for GET Request.
+def kwargs(
+ self
+) -> Dict
+
Transform api request params into format which matches the signature of the
+endpoint.
+class BaseSearchPostRequest(
+ /,
+ **data: 'Any'
+)
+
Base arguments for POST Request.
+model_computed_fields
+
model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
def from_orm(
+ obj: 'Any'
+) -> 'Self'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Self'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
Note
+model_construct()
generally respects the model_config.extra
setting on the provided model.
+That is, if model_config.extra == 'allow'
, then all extra passed values are added to the model instance's __dict__
+and __pydantic_extra__
fields. If model_config.extra == 'ignore'
(the default), then all extra passed values are ignored.
+Because no validation is performed with a call to model_construct()
, having model_config.extra == 'forbid'
does not result in
+an error if extra values are passed, but they will be ignored.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +A set of field names that were originally explicitly set during instantiation. If provided, this is directly used for the [ model_fields_set ][pydantic.BaseModel.model_fields_set] attribute.Otherwise, the field names from the values argument will be used. |
+None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/json/#json-parsing
+Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If json_data is not a JSON string or the object could not be validated. |
+
def model_validate_strings(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ context: 'Any | None' = None
+) -> 'Self'
+
Validate the given object with string data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object containing string data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Self'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: 'DeprecatedParseProtocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Self'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Self'
+
def validate_bbox(
+ v: Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
+) -> Union[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]], Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
+
def validate_datetime(
+ value: str
+) -> str
+
def validate_spatial(
+ values: Dict[str, Any]
+) -> Dict[str, Any]
+
end_date
+
model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been explicitly set on this model instance.
+spatial_filter
+
Return a geojson-pydantic object representing the spatial filter for the search request.
+Check for both because the bbox
and intersects
parameters are mutually exclusive.
start_date
+
def copy(
+ self,
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. | +None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. | +None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. | +None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep-copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self,
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Self'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal['json', 'python'] | str" = 'python',
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. |
+None | +
include | +None | +A set of fields to include in the output. | +None | +
exclude | +None | +A set of fields to exclude from the output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx | None' = None,
+ exclude: 'IncEx | None' = None,
+ context: 'Any | None' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: "bool | Literal['none', 'warn', 'error']" = True,
+ serialize_as_any: 'bool' = False
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.9/concepts/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. | +None | +
context | +None | +Additional context to pass to the serializer. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +If True, dumped values should be valid as input for non-idempotent types such as Json[T]. | +None | +
warnings | +None | +How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [ PydanticSerializationError ][pydantic_core.PydanticSerializationError]. |
+None | +
serialize_as_any | +None | +Whether to serialize fields with duck-typing serialization behavior. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self: 'BaseModel',
+ context: 'Any',
+ /
+) -> 'None'
+
We need to both initialize private attributes and call the user-defined model_post_init
+method.
+ + + + + + + + + + + + + +STAC types.
+NumType
+
class Catalog(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Catalog.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class Collection(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Collection.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class Collections(
+ /,
+ *args,
+ **kwargs
+)
+
All collections endpoint.
+github.com/radiantearth/stac-api-spec/tree/master/collections
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class Conformance(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Conformance Classes.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class Item(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Item.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class ItemCollection(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Item Collection.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+class LandingPage(
+ /,
+ *args,
+ **kwargs
+)
+
STAC Landing Page.
+def clear(
+ ...
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ ...
+)
+
D.copy() -> a shallow copy of D
+def fromkeys(
+ iterable,
+ value=None,
+ /
+)
+
Create a new dictionary with keys from iterable and values set to value.
+def get(
+ self,
+ key,
+ default=None,
+ /
+)
+
Return the value for key if key is in the dictionary, else default.
+def items(
+ ...
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ ...
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ ...
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If the key is not found, return the default if given; otherwise, +raise a KeyError.
+def popitem(
+ self,
+ /
+)
+
Remove and return a (key, value) pair as a 2-tuple.
+Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.
+def setdefault(
+ self,
+ key,
+ default=None,
+ /
+)
+
Insert key with a value of default if key is not in the dictionary.
+Return the value for key if key is in the dictionary, else default.
+def update(
+ ...
+)
+
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
+If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]
+def values(
+ ...
+)
+
D.values() -> an object providing a view on D's values
+ + + + + + + + + + + + + +Library version.
+ + + + + + + + + + + + + +