Skip to content

Commit

Permalink
[NEAT-516] Edge prop issues (#714)
Browse files Browse the repository at this point in the history
* refactor: tore entire error not just the message

* fix; store entire error

* build: changelog

* feat; fallback one by one

* build: changelog

* fix: force actual means force
  • Loading branch information
doctrino authored Nov 7, 2024
1 parent f4a6c4a commit ba0d508
Show file tree
Hide file tree
Showing 4 changed files with 72 additions and 15 deletions.
8 changes: 5 additions & 3 deletions cognite/neat/_rules/exporters/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,11 @@ def _repr_html_(cls) -> str:
class CDFExporter(BaseExporter[T_VerifiedRules, T_Export]):
@abstractmethod
def export_to_cdf_iterable(
self, rules: T_VerifiedRules, client: CogniteClient, dry_run: bool = False
self, rules: T_VerifiedRules, client: CogniteClient, dry_run: bool = False, fallback_one_by_one: bool = False
) -> Iterable[UploadResult]:
raise NotImplementedError

def export_to_cdf(self, rules: T_VerifiedRules, client: CogniteClient, dry_run: bool = False) -> UploadResultList:
return UploadResultList(self.export_to_cdf_iterable(rules, client, dry_run))
def export_to_cdf(
self, rules: T_VerifiedRules, client: CogniteClient, dry_run: bool = False, fallback_one_by_one: bool = False
) -> UploadResultList:
return UploadResultList(self.export_to_cdf_iterable(rules, client, dry_run, fallback_one_by_one))
55 changes: 46 additions & 9 deletions cognite/neat/_rules/exporters/_rules2dms.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,15 +162,15 @@ def delete_from_cdf(self, rules: DMSRules, client: CogniteClient, dry_run: bool
)

def export_to_cdf_iterable(
self, rules: DMSRules, client: CogniteClient, dry_run: bool = False
self, rules: DMSRules, client: CogniteClient, dry_run: bool = False, fallback_one_by_one: bool = False
) -> Iterable[UploadResult]:
to_export = self._prepare_exporters(rules, client)

redeploy_data_model = False
for items, loader in to_export:
# The conversion from DMS to GraphQL does not seem to be triggered even if the views
# are changed. This is a workaround to force the conversion.
is_redeploying = loader is DataModelingLoader and redeploy_data_model
is_redeploying = (loader is DataModelingLoader and redeploy_data_model) or self.existing_handling == "force"

to_create, to_delete, to_update, unchanged = self._categorize_items_for_upload(
loader, items, is_redeploying
Expand All @@ -183,8 +183,10 @@ def export_to_cdf_iterable(
created: set[Hashable] = set()
skipped: set[Hashable] = set()
changed: set[Hashable] = set()
deleted: set[Hashable] = set()
failed_created: set[Hashable] = set()
failed_changed: set[Hashable] = set()
failed_deleted: set[Hashable] = set()
error_messages: list[str] = []
if dry_run:
if self.existing_handling in ["update", "force"]:
Expand All @@ -200,27 +202,60 @@ def export_to_cdf_iterable(
try:
loader.delete(to_delete)
except CogniteAPIError as e:
error_messages.append(f"Failed delete: {e.message}")
if fallback_one_by_one:
for item in to_delete:
try:
loader.delete([item])
except CogniteAPIError as item_e:
failed_deleted.add(loader.get_id(item))
error_messages.append(f"Failed delete: {item_e!s}")
else:
deleted.add(loader.get_id(item))
else:
error_messages.append(f"Failed delete: {e!s}")
failed_deleted.update(loader.get_id(item) for item in e.failed + e.unknown)
else:
deleted.update(loader.get_id(item) for item in to_delete)

if isinstance(loader, DataModelingLoader):
to_create = loader.sort_by_dependencies(to_create)

try:
loader.create(to_create)
except CogniteAPIError as e:
failed_created.update(loader.get_id(item) for item in e.failed + e.unknown)
created.update(loader.get_id(item) for item in e.successful)
error_messages.append(e.message)
if fallback_one_by_one:
for item in to_create:
try:
loader.create([item])
except CogniteAPIError as item_e:
failed_created.add(loader.get_id(item))
error_messages.append(f"Failed create: {item_e!s}")
else:
created.add(loader.get_id(item))
else:
failed_created.update(loader.get_id(item) for item in e.failed + e.unknown)
created.update(loader.get_id(item) for item in e.successful)
error_messages.append(f"Failed create: {e!s}")
else:
created.update(loader.get_id(item) for item in to_create)

if self.existing_handling in ["update", "force"]:
try:
loader.update(to_update)
except CogniteAPIError as e:
failed_changed.update(loader.get_id(item) for item in e.failed + e.unknown)
changed.update(loader.get_id(item) for item in e.successful)
error_messages.append(e.message)
if fallback_one_by_one:
for item in to_update:
try:
loader.update([item])
except CogniteAPIError as e_item:
failed_changed.add(loader.get_id(item))
error_messages.append(f"Failed update: {e_item!s}")
else:
changed.add(loader.get_id(item))
else:
failed_changed.update(loader.get_id(item) for item in e.failed + e.unknown)
changed.update(loader.get_id(item) for item in e.successful)
error_messages.append(f"Failed update: {e!s}")
else:
changed.update(loader.get_id(item) for item in to_update)
elif self.existing_handling == "skip":
Expand All @@ -232,10 +267,12 @@ def export_to_cdf_iterable(
name=loader.resource_name,
created=created,
changed=changed,
deleted=deleted,
unchanged={loader.get_id(item) for item in unchanged},
skipped=skipped,
failed_created=failed_created,
failed_changed=failed_changed,
failed_deleted=failed_deleted,
error_messages=error_messages,
issues=issue_list,
)
Expand Down
12 changes: 10 additions & 2 deletions cognite/neat/_session/_to.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,18 @@ def instances(self, space: str | None = None):

return loader.load_into_cdf(self._client)

def data_model(self, existing_handling: Literal["fail", "skip", "update", "force"] = "skip", dry_run: bool = False):
def data_model(
self,
existing_handling: Literal["fail", "skip", "update", "force"] = "skip",
dry_run: bool = False,
fallback_one_by_one: bool = False,
):
"""Export the verified DMS data model to CDF.
Args:
existing_handling: How to handle if component of data model exists. Defaults to "skip".
dry_run: If True, no changes will be made to CDF. Defaults to False.
fallback_one_by_one: If True, will fall back to one-by-one upload if batch upload fails. Defaults to False.
... note::
Expand All @@ -88,7 +94,9 @@ def data_model(self, existing_handling: Literal["fail", "skip", "update", "force

conversion_issues = IssueList(action="to.cdf.data_model")
with catch_warnings(conversion_issues):
result = exporter.export_to_cdf(self._state.last_verified_dms_rules, self._client, dry_run)
result = exporter.export_to_cdf(
self._state.last_verified_dms_rules, self._client, dry_run, fallback_one_by_one
)
result.insert(0, UploadResultCore(name="schema", issues=conversion_issues))
self._state.outcome.append(result)
print("You can inspect the details with the .inspect.outcome(...) method.")
Expand Down
12 changes: 11 additions & 1 deletion docs/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,17 @@ Changes are grouped as follows:
## TBD
### Fixed
- Serializing `ResourceNotDefinedError` class no longer raises a `ValueError`. This happens when a `ResourceNotDefinedError`
is found, for example, when calling `neat.verify()`.
is found, for example, when calling `neat.verify()`.
- Setting `neat.to.cdf.data_model(existing_handling='force)` will now correctly delete and recreate views and containers
if they already exist in CDF.

### Improved
- When running `neat.to.cdf.data_model()` the entire response from CDF is now stored as an error message, not just the
text.

### Added
- `neat.to.cdf.data_model()` now has a `fallback_one_by_one` parameter. If set to `True`, the views/containers will
be created one by one, if the batch creation fails.

## [0.96.4] - 05-11-**2024**
### Fixed
Expand Down

0 comments on commit ba0d508

Please sign in to comment.