From a86cca0837f61b08f3b00bcdc4d9233b12ad1021 Mon Sep 17 00:00:00 2001 From: henry-binary Date: Mon, 22 Apr 2024 13:18:18 -0400 Subject: [PATCH 1/4] Implemented Cascade storage method in upload functionality --- src/config/data.rs | 6 + src/create_config/process.rs | 19 ++- src/main.rs | 1 + src/upload/methods/cascade.rs | 277 ++++++++++++++++++++++++++++++++++ src/upload/methods/mod.rs | 2 + src/upload/uploader.rs | 3 + 6 files changed, 307 insertions(+), 1 deletion(-) create mode 100644 src/upload/methods/cascade.rs diff --git a/src/config/data.rs b/src/config/data.rs index 337f7ca8..06f27c2a 100644 --- a/src/config/data.rs +++ b/src/config/data.rs @@ -76,6 +76,10 @@ pub struct ConfigData { // Pinata specific configuration pub pinata_config: Option, + // SDRIVE specific configuration + #[serde(serialize_with = "to_option_string")] + pub cascade_api_key: Option, + /// Hidden setttings pub hidden_settings: Option, @@ -228,6 +232,8 @@ pub enum UploadMethod { Pinata, #[serde(rename = "sdrive")] Sdrive, + #[serde(rename = "cascade")] + Cascade, } impl Display for UploadMethod { diff --git a/src/create_config/process.rs b/src/create_config/process.rs index 1628ef8b..1916a125 100644 --- a/src/create_config/process.rs +++ b/src/create_config/process.rs @@ -309,7 +309,15 @@ pub fn process_create_config(args: CreateConfigArgs) -> Result<()> { }; // upload method - let upload_options = vec!["Bundlr", "AWS", "NFT Storage", "SHDW", "Pinata", "SDrive"]; + let upload_options = vec![ + "Bundlr", + "AWS", + "NFT Storage", + "SHDW", + "Pinata", + "SDrive", + "Cascade", + ]; config_data.upload_method = match Select::with_theme(&theme) .with_prompt("What upload method do you want to use?") .items(&upload_options) @@ -323,6 +331,7 @@ pub fn process_create_config(args: CreateConfigArgs) -> Result<()> { 3 => UploadMethod::SHDW, 4 => UploadMethod::Pinata, 5 => UploadMethod::Sdrive, + 6 => UploadMethod::Cascade, _ => UploadMethod::Bundlr, }; @@ -424,6 +433,14 @@ pub fn process_create_config(args: CreateConfigArgs) -> Result<()> { }); } + if config_data.upload_method == UploadMethod::Cascade { + config_data.cascade_api_key = Some( + Input::with_theme(&theme) + .with_prompt("What is the Cascade api key?") + .interact() + .unwrap(), + ); + } // is mutable config_data.is_mutable = Confirm::with_theme(&theme) diff --git a/src/main.rs b/src/main.rs index 3cb87a4c..67fcf74d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -53,6 +53,7 @@ fn setup_logging(level: Option) -> Result<()> { let file = OpenOptions::new() .write(true) .create(true) + .truncate(true) .open(log_path) .unwrap(); diff --git a/src/upload/methods/cascade.rs b/src/upload/methods/cascade.rs new file mode 100644 index 00000000..8077766e --- /dev/null +++ b/src/upload/methods/cascade.rs @@ -0,0 +1,277 @@ +use std::{ + fs, + path::Path, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use async_trait::async_trait; +use reqwest::{ + header, + multipart::{Form, Part}, + Client, StatusCode, +}; +use tokio::time::{sleep, Duration}; + +use crate::{common::*, config::*, upload::*}; + +// API end point. +const CASCADE_API_URL: &str = "https://gateway-api.pastel.network/"; +// Request time window (ms) to avoid the rate limit. +const REQUEST_WAIT: u64 = 10000; +// File size limit (100mb). +const FILE_SIZE_LIMIT: u64 = 100 * 1024 * 1024; +// Number of files per request limit. +const FILE_COUNT_LIMIT: u64 = 100; + +pub enum CascadeStorageError { + ApiError(Value), +} + +/// response after an nft was stored +#[derive(Debug, Deserialize, Default)] +pub struct UploadResponse { + /// id of the request + pub request_id: String, + /// status of the request + pub request_status: String, + /// stored nft data + pub results: Vec, +} + +/// main obj that hold all the response data +#[derive(Debug, Deserialize, Default)] +#[serde(default)] +pub struct UploadResult { + pub result_id: String, + pub result_status: String, + pub original_file_ipfs_link: Option, + pub error: Option, +} + +pub struct CascadeStorageMethod { + client: Arc, +} + +impl CascadeStorageMethod { + /// Initialize a new CascadeStorageHandler. + pub async fn new(config_data: &ConfigData) -> Result { + if let Some(api_key) = &config_data.cascade_api_key { + let client_builder = Client::builder(); + + let mut headers = header::HeaderMap::new(); + let mut api_key_mut = header::HeaderValue::from_str(api_key)?; + api_key_mut.set_sensitive(true); + headers.insert("Api_key", api_key_mut); + + let client = client_builder.default_headers(headers).build()?; + + let url = format!("{}/api/v1/cascade/gateway_requests", CASCADE_API_URL); + let response = client.get(url).send().await?; + + match response.status() { + StatusCode::OK => Ok(Self { + client: Arc::new(client), + }), + StatusCode::UNAUTHORIZED => Err(anyhow!("Invalid cascade api key.")), + code => Err(anyhow!("Could not initialize cascade client: {code}")), + } + } else { + Err(anyhow!("Missing 'CascadeApiKey' value in config file.")) + } + } +} + +#[async_trait] +impl Prepare for CascadeStorageMethod { + /// Verifies that no file is larger than 100MB (upload of files larger than 100MB are + /// not currently supported). + async fn prepare( + &self, + _sugar_config: &SugarConfig, + asset_pairs: &HashMap, + asset_indices: Vec<(DataType, &[isize])>, + ) -> Result<()> { + for (data_type, indices) in asset_indices { + for index in indices { + let item = asset_pairs.get(index).unwrap(); + let size = match data_type { + DataType::Image => { + let path = Path::new(&item.image); + fs::metadata(path)?.len() + } + DataType::Animation => { + if let Some(animation) = &item.animation { + let path = Path::new(animation); + fs::metadata(path)?.len() + } else { + 0 + } + } + DataType::Metadata => { + let mock_uri = "x".repeat(MOCK_URI_SIZE); + let animation = if item.animation.is_some() { + Some(mock_uri.clone()) + } else { + None + }; + + get_updated_metadata(&item.metadata, &mock_uri.clone(), &animation)? + .into_bytes() + .len() as u64 + } + }; + + if size > FILE_SIZE_LIMIT { + return Err(anyhow!( + "File '{}' exceeds the current 100MB file size limit", + item.name, + )); + } + } + } + Ok(()) + } +} + +#[async_trait] +impl Uploader for CascadeStorageMethod { + /// Upload the data to Nft Storage + async fn upload( + &self, + _sugar_config: &SugarConfig, + cache: &mut Cache, + data_type: DataType, + assets: &mut Vec, + progress: &ProgressBar, + interrupted: Arc, + ) -> Result> { + let mut batches: Vec> = Vec::new(); + let mut current: Vec<&AssetInfo> = Vec::new(); + let mut upload_size = 0; + let mut upload_count = 0; + + for asset_info in assets { + let size = match data_type { + DataType::Image | DataType::Animation => { + let path = Path::new(&asset_info.content); + fs::metadata(path)?.len() + } + DataType::Metadata => { + let content = String::from(&asset_info.content); + content.into_bytes().len() as u64 + } + }; + + if (upload_size + size) > FILE_SIZE_LIMIT || (upload_count + 1) > FILE_COUNT_LIMIT { + batches.push(current); + current = Vec::new(); + upload_size = 0; + upload_count = 0; + } + + upload_size += size; + upload_count += 1; + current.push(asset_info); + } + // adds the last chunk (if there is one) + if !current.is_empty() { + batches.push(current); + } + + let mut errors = Vec::new(); + // sets the length of the progress bar as the number of batches + progress.set_length(batches.len() as u64); + + while !interrupted.load(Ordering::SeqCst) && !batches.is_empty() { + let batch = batches.remove(0); + let mut form = Form::new(); + + for asset_info in &batch { + let data = match asset_info.data_type { + DataType::Image | DataType::Animation => fs::read(&asset_info.content)?, + DataType::Metadata => { + let content = String::from(&asset_info.content); + content.into_bytes() + } + }; + + let file = Part::bytes(data) + .file_name(asset_info.name.clone()) + .mime_str(asset_info.content_type.as_str())?; + form = form.part("files", file); + } + + let response = self + .client + .post(format!( + "{CASCADE_API_URL}/api/v1/cascade?make_publicly_accessible=true" + )) + .multipart(form) + .send() + .await?; + let status = response.status(); + + if status.is_success() { + let body = response.json::().await?; + let response: UploadResponse = serde_json::from_value(body)?; + + // updates the cache content + + for asset_info in batch { + let id = asset_info.asset_id.clone(); + if response.results[0].original_file_ipfs_link.is_some() { + let uri = response.results[0].original_file_ipfs_link.clone().unwrap(); + // cache item to update + let item = cache.items.get_mut(&id).unwrap(); + + match data_type { + DataType::Image => item.image_link = uri, + DataType::Metadata => item.metadata_link = uri, + DataType::Animation => item.animation_link = Some(uri), + } + } else { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({})", + response.results[0].result_status + ))); + } + } + // syncs cache (checkpoint) + cache.sync_file()?; + // updates the progress bar + progress.inc(1); + } else { + let body = response.json::().await?; + let response: UploadResponse = serde_json::from_value(body)?; + if !response.results.is_empty() { + if response.results[0].error.is_some() { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({}): {}", + status, + response.results[0].error.clone().unwrap() + ))); + } else { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({}): {}", + status, response.results[0].result_status + ))); + } + } else { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({}): {}", + status, response.request_status + ))); + } + } + if !batches.is_empty() { + // wait to minimize the chance of getting caught by the rate limit + sleep(Duration::from_millis(REQUEST_WAIT)).await; + } + } + + Ok(errors) + } +} diff --git a/src/upload/methods/mod.rs b/src/upload/methods/mod.rs index d5627cf9..f36a0d4f 100644 --- a/src/upload/methods/mod.rs +++ b/src/upload/methods/mod.rs @@ -1,5 +1,6 @@ pub mod aws; pub mod bundlr; +pub mod cascade; pub mod nft_storage; pub mod pinata; pub mod sdrive; @@ -7,5 +8,6 @@ pub mod shdw; pub use aws::*; pub use bundlr::*; +pub use cascade::*; pub use nft_storage::*; pub use sdrive::*; diff --git a/src/upload/uploader.rs b/src/upload/uploader.rs index 7b0cc088..c224b959 100644 --- a/src/upload/uploader.rs +++ b/src/upload/uploader.rs @@ -292,5 +292,8 @@ pub async fn initialize( UploadMethod::Sdrive => { Box::new(sdrive::SdriveMethod::new(config_data).await?) as Box } + UploadMethod::Cascade => { + Box::new(cascade::CascadeStorageMethod::new(config_data).await?) as Box + } }) } From 7fdc4ae63ec3c005c7a82e7f6a2c1821876ab5cd Mon Sep 17 00:00:00 2001 From: henry-binary Date: Fri, 26 Apr 2024 14:35:18 -0400 Subject: [PATCH 2/4] cascade_id added to metadata and cache file and also updated docs about cascade protocol --- .autodoc/docs/markdown/src/config/data.md | 6 +-- .../markdown/src/upload/methods/cascade.md | 48 +++++++++++++++++++ .../docs/markdown/src/upload/methods/mod.md | 4 +- .../markdown/src/upload/methods/summary.md | 2 +- .autodoc/docs/markdown/src/upload/summary.md | 2 +- .autodoc/docs/markdown/src/upload/uploader.md | 2 +- src/cache.rs | 3 ++ src/upload/assets.rs | 48 +++++++++++++++++++ src/upload/methods/cascade.rs | 6 ++- src/upload/process.rs | 18 +++++-- src/validate/format.rs | 2 + 11 files changed, 128 insertions(+), 13 deletions(-) create mode 100644 .autodoc/docs/markdown/src/upload/methods/cascade.md diff --git a/.autodoc/docs/markdown/src/config/data.md b/.autodoc/docs/markdown/src/config/data.md index 7495ecb9..8aeecf44 100644 --- a/.autodoc/docs/markdown/src/config/data.md +++ b/.autodoc/docs/markdown/src/config/data.md @@ -1,8 +1,8 @@ [View code on GitHub](https://github.com/metaplex-foundation/sugar/src/config/data.rs) -The `sugar` code defines the configuration and data structures for a project that deals with non-fungible tokens (NFTs) and programmable non-fungible tokens (pNFTs). The main structure, `ConfigData`, contains various fields related to the token standard, asset properties, creator information, and storage configurations for different platforms like AWS, NFT.Storage, Shadow Drive, and Pinata. +The `sugar` code defines the configuration and data structures for a project that deals with non-fungible tokens (NFTs) and programmable non-fungible tokens (pNFTs). The main structure, `ConfigData`, contains various fields related to the token standard, asset properties, creator information, and storage configurations for different platforms like AWS, NFT.Storage, Shadow Drive, Pinata and Cascade. -The `SugarConfig` struct holds the keypair and RPC URL for the Solana network, while `SolanaConfig` contains the JSON RPC URL, keypair path, and commitment level. The `AwsConfig` and `PinataConfig` structs store the respective platform-specific configurations. +The `SugarConfig` struct holds the keypair and RPC URL for the Solana network, while `SolanaConfig` contains the JSON RPC URL, keypair path, and commitment level. The `AwsConfig` and `PinataConfig` structs store the respective platform-specific configurations. `cascade_api_key` store api key to access Pastel's Cascade Protocol. The `Creator` struct represents a creator with an address and share percentage. The `Cluster` enum represents different Solana network clusters (Devnet, Mainnet, Localnet, and Unknown). The `TokenStandard` enum distinguishes between NFT and pNFT standards. @@ -16,7 +16,7 @@ These structures and utility functions can be used throughout the project to man 2. **What are the different `UploadMethod` options available and how do they affect the behavior of the code?** - The `UploadMethod` enum has five variants: `Bundlr`, `AWS`, `NftStorage`, `SHDW`, and `Pinata`. These options represent different storage services or methods for uploading assets. The choice of `UploadMethod` will determine which storage service or method is used when uploading assets in the project. + The `UploadMethod` enum has five variants: `Bundlr`, `AWS`, `NftStorage`, `SHDW`, `Pinata` and `Cascade`. These options represent different storage services or methods for uploading assets. The choice of `UploadMethod` will determine which storage service or method is used when uploading assets in the project. 3. **How does the `TokenStandard` enum work and what are its possible values?** diff --git a/.autodoc/docs/markdown/src/upload/methods/cascade.md b/.autodoc/docs/markdown/src/upload/methods/cascade.md new file mode 100644 index 00000000..9fc9e77e --- /dev/null +++ b/.autodoc/docs/markdown/src/upload/methods/cascade.md @@ -0,0 +1,48 @@ +[View code on GitHub](https://github.com/metaplex-foundation/sugar/src/upload/methods/cascade.rs) + +The code in this file is responsible for uploading files to the Pastel's Cascade service. It defines the `CascadeStorageMethod` struct and implements the `Prepare` and `Uploader` traits for it. The main purpose of this code is to handle the process of uploading files to NFT Storage while adhering to the service's limitations, such as file size and request rate limits. + +The `CascadeStorageMethod` struct contains an `Arc` for making HTTP requests. The `new` method initializes the struct by creating an HTTP client with the necessary headers, including the authentication token. + +The `prepare` method, which is part of the `Prepare` trait implementation, checks if any file in the provided asset pairs exceeds the 100MB file size limit. If any file is too large, an error is returned. + +The `upload` method, which is part of the `Uploader` trait implementation, is responsible for uploading the files to Cascade Protocol. It first groups the files into batches, ensuring that each batch does not exceed the file size and count limits. Then, it iterates through the batches and uploads them using a multipart HTTP request. If the upload is successful, the cache is updated with the new file URLs and active registration id of Cascade, and the progress bar is incremented. If an error occurs during the upload, it is added to a list of errors that is returned at the end of the method. The + +To avoid hitting the rate limit, the code waits for a specified duration (`REQUEST_WAIT`) between uploading batches. Additionally, an `interrupted` flag is used to stop the upload process if needed. + +Here's an example of how this code might be used in the larger project: + +```rust +let config_data = ConfigData::load("config.toml")?; +let cascade_storage_method = CascadeStorageMethod::new(&config_data).await?; + +let sugar_config = SugarConfig::load("sugar_config.toml")?; +let asset_pairs = load_asset_pairs(&sugar_config)?; +let asset_indices = get_asset_indices(&asset_pairs)?; + +cascade_storage_method.prepare(&sugar_config, &asset_pairs, asset_indices).await?; + +let mut cache = Cache::load("cache.toml")?; +let mut assets = prepare_assets(&asset_pairs, &cache)?; +let progress = ProgressBar::new(assets.len() as u64); +let interrupted = Arc::new(AtomicBool::new(false)); + +let errors = cascade_storage_method + .upload(&sugar_config, &mut cache, DataType::Image, &mut assets, &progress, interrupted) + .await?; +``` + +This example demonstrates how to initialize the `CascadeStorageMethod`, prepare the assets for upload, and then upload them using the `upload` method. +## Questions: + 1. **Question**: What is the purpose of the `CascadeStorageMethod` struct and its associated methods? + **Answer**: The `CascadeStorageMethod` struct is used to handle the interaction with the Cascade Protocol API. It provides methods for initializing a new instance with the necessary authentication, preparing the assets for upload by checking file size limits, and uploading the assets to the Cascade Protocol API. + +2. **Question**: What are the constants defined at the beginning of the code and what are their purposes? + **Answer**: The constants defined at the beginning of the code are: + - `CASCADE_STORAGE_API_URL`: The base URL for the Cascade Protocol API. + - `REQUEST_WAIT`: The time window (in milliseconds) to wait between requests to avoid rate limits. + - `FILE_SIZE_LIMIT`: The maximum file size allowed for upload (100 MB). + - `FILE_COUNT_LIMIT`: The maximum number of files allowed per request. + +3. **Question**: How does the `upload` method handle uploading assets in batches? + **Answer**: The `upload` method first groups the assets into batches based on the file size and count limits. It then iterates through each batch, creating a multipart form with the assets, and sends a POST request to the Cascade Protocol API. After each successful upload, the cache is updated, and the progress bar is incremented. If there are more batches to process, the method waits for a specified duration to avoid rate limits before proceeding with the next batch. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/upload/methods/mod.md b/.autodoc/docs/markdown/src/upload/methods/mod.md index ecf50fc7..a2d7f5be 100644 --- a/.autodoc/docs/markdown/src/upload/methods/mod.md +++ b/.autodoc/docs/markdown/src/upload/methods/mod.md @@ -41,11 +41,13 @@ This code is part of a larger project and serves as a module that provides vario shdw::sync_data("source-storage", "destination-storage"); ``` +6. **cascade** This sub-module provides integration with Cascade protocol. Cascade is a protocol that allows users to store data permanently in a highly redundant, distributed fashion with a single upfront fee. It contain functions to upload and manage IPFS content and also adds cascade id to the metadata to get TxID of the Action Registration ticket. + By using `pub use` statements, the code re-exports the contents of each sub-module, making their functions and types available to other parts of the project without the need to explicitly import each sub-module. ## Questions: 1. **What is the purpose of each module in this code?** - Each module (aws, bundlr, nft_storage, pinata, and shdw) likely represents a different component or service within the Sugar project, but it's not clear from this code snippet alone what each module does specifically. + Each module (aws, bundlr, nft_storage, pinata, shdw and cascade) likely represents a different component or service within the Sugar project, but it's not clear from this code snippet alone what each module does specifically. 2. **How are these modules being used in the rest of the project?** diff --git a/.autodoc/docs/markdown/src/upload/methods/summary.md b/.autodoc/docs/markdown/src/upload/methods/summary.md index 08c09df8..976ccdf9 100644 --- a/.autodoc/docs/markdown/src/upload/methods/summary.md +++ b/.autodoc/docs/markdown/src/upload/methods/summary.md @@ -22,7 +22,7 @@ let (asset_id, uploaded_url) = upload_handle.await??; Similarly, the `bundlr.rs` file provides a module for uploading assets to the Bundlr platform using the Solana blockchain. The `BundlrMethod` struct handles the upload process, including setting up the Bundlr client, funding the Bundlr address, and uploading the assets. An example usage is provided in the file summary. -The `nft_storage.rs` file handles uploading files to the NFT Storage service, while the `pinata.rs` file provides functionality for uploading files to the Pinata IPFS service. Both files define structs that implement the `Prepare` and `Uploader` or `ParallelUploader` traits, respectively. Example usages for these modules can be found in their respective file summaries. +The `nft_storage.rs` file handles uploading files to the NFT Storage service, while the `pinata.rs` file provides functionality for uploading files to the Pinata IPFS service and also `cascade.rs` file provides uploading and generate `cascade_id` (which is uploading result_id to get active registration TxId) using Pastel's Cascade protocol and add that to metadata. Those files define structs that implement the `Prepare` and `Uploader` or `ParallelUploader` traits, respectively. Example usages for these modules can be found in their respective file summaries. Finally, the `shdw.rs` file is responsible for handling the storage and uploading of assets to the Shadow Drive, a decentralized storage solution. It provides a `SHDWMethod` struct that implements the `Prepare` and `ParallelUploader` traits. An example usage is provided in the file summary. diff --git a/.autodoc/docs/markdown/src/upload/summary.md b/.autodoc/docs/markdown/src/upload/summary.md index 70308dac..923ec395 100644 --- a/.autodoc/docs/markdown/src/upload/summary.md +++ b/.autodoc/docs/markdown/src/upload/summary.md @@ -4,7 +4,7 @@ The code in the `upload` folder is responsible for managing and uploading assets For example, the `assets.rs` file provides functions to manage assets, calculate their sizes, and update their metadata. The `errors.rs` file defines a custom error type called `UploadError` for handling various errors that may occur during the upload process. The `process.rs` file is responsible for uploading assets to a storage system, while the `uploader.rs` file handles the uploading of assets and defines traits and structs for managing the upload process. -The `methods` subfolder contains code for handling the upload of assets to different storage services and platforms, such as Amazon S3, Bundlr, NFT Storage, Pinata IPFS, and Shadow Drive. Each storage method is implemented in a separate file, providing a clean and modular approach to integrating various storage services into the larger project. +The `methods` subfolder contains code for handling the upload of assets to different storage services and platforms, such as Amazon S3, Bundlr, NFT Storage, Pinata IPFS, Shadow Drive and Cascade. Each storage method is implemented in a separate file, providing a clean and modular approach to integrating various storage services into the larger project. Here's an example of how the code in the `upload` folder might be used in the larger project: diff --git a/.autodoc/docs/markdown/src/upload/uploader.md b/.autodoc/docs/markdown/src/upload/uploader.md index cf161caa..4a716db5 100644 --- a/.autodoc/docs/markdown/src/upload/uploader.md +++ b/.autodoc/docs/markdown/src/upload/uploader.md @@ -37,4 +37,4 @@ This code would initialize an uploader object based on the configuration, prepar **Answer:** The `ParallelUploader` trait is designed for upload methods that support parallel uploads. It abstracts the threading logic, allowing methods to focus on the logic of uploading a single asset. It inherits from the `Uploader` trait and requires implementing the `upload_asset` function, which returns a `JoinHandle` for the task responsible for uploading the specified asset. 3. **Question:** How does the `initialize` function work and what is its purpose? - **Answer:** The `initialize` function acts as a factory function for creating uploader objects based on the configuration's `uploadMethod`. It takes `sugar_config` and `config_data` as arguments and returns a `Result` containing a boxed `Uploader` trait object. Depending on the `uploadMethod`, it initializes the appropriate uploader object (e.g., `AWSMethod`, `BundlrMethod`, `NftStorageMethod`, `SHDWMethod`, or `PinataMethod`). \ No newline at end of file + **Answer:** The `initialize` function acts as a factory function for creating uploader objects based on the configuration's `uploadMethod`. It takes `sugar_config` and `config_data` as arguments and returns a `Result` containing a boxed `Uploader` trait object. Depending on the `uploadMethod`, it initializes the appropriate uploader object (e.g., `AWSMethod`, `BundlrMethod`, `NftStorageMethod`, `SHDWMethod`, `PinataMethod` or `CascadeStorageMethod`). \ No newline at end of file diff --git a/src/cache.rs b/src/cache.rs index 72862d2d..71d62e27 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -128,6 +128,9 @@ pub struct CacheItem { pub animation_hash: Option, #[serde(skip_serializing_if = "Option::is_none")] pub animation_link: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub cascade_id: Option, } impl CacheItem { diff --git a/src/upload/assets.rs b/src/upload/assets.rs index b361f0cc..a9502136 100644 --- a/src/upload/assets.rs +++ b/src/upload/assets.rs @@ -42,6 +42,7 @@ impl AssetPair { on_chain: false, animation_hash: self.animation_hash, animation_link: None, + cascade_id: None, } } } @@ -361,6 +362,53 @@ pub fn get_updated_metadata( Ok(serde_json::to_string(&metadata).unwrap()) } +pub fn get_updated_metadata_with_cascade_id( + metadata_file: &str, + image_link: &str, + animation_link: &Option, + cascade_id: &Option, +) -> Result { + let mut metadata: Metadata = { + let m = OpenOptions::new() + .read(true) + .open(metadata_file) + .map_err(|e| { + anyhow!("Failed to read metadata file '{metadata_file}' with error: {e}") + })?; + serde_json::from_reader(&m)? + }; + + if metadata.properties.creators.is_some() { + println!("The creators field is deprecated in the JSON metadata, it should be set in the config file instead.") + } + + for file in &mut metadata.properties.files { + if file.uri.eq(&metadata.image) { + file.uri = image_link.to_string(); + } + if let Some(ref animation_link) = animation_link { + if let Some(ref animation_url) = metadata.animation_url { + if file.uri.eq(animation_url) { + file.uri = animation_link.to_string(); + } + } + } + } + + metadata.image = image_link.to_string(); + + if cascade_id.is_some() { + metadata.cascade_id = cascade_id.clone(); + } + + if animation_link.is_some() { + // only updates the link if we have a new value + metadata.animation_url = animation_link.clone(); + } + + Ok(serde_json::to_string(&metadata).unwrap()) +} + pub fn is_complete_uri(value: &str) -> bool { url::Url::parse(value).is_ok() } diff --git a/src/upload/methods/cascade.rs b/src/upload/methods/cascade.rs index 8077766e..04b5f6e0 100644 --- a/src/upload/methods/cascade.rs +++ b/src/upload/methods/cascade.rs @@ -47,6 +47,7 @@ pub struct UploadResponse { pub struct UploadResult { pub result_id: String, pub result_status: String, + pub registration_ticket_txid: Option, pub original_file_ipfs_link: Option, pub error: Option, } @@ -228,7 +229,10 @@ impl Uploader for CascadeStorageMethod { let item = cache.items.get_mut(&id).unwrap(); match data_type { - DataType::Image => item.image_link = uri, + DataType::Image => { + item.cascade_id = Some(response.results[0].result_id.clone()); + item.image_link = uri; + } DataType::Metadata => item.metadata_link = uri, DataType::Animation => item.animation_link = Some(uri), } diff --git a/src/upload/process.rs b/src/upload/process.rs index 9944b6b4..fe26455f 100644 --- a/src/upload/process.rs +++ b/src/upload/process.rs @@ -552,11 +552,19 @@ async fn upload_data( let content = match data_type { // replaces the media link without modifying the original file to avoid // changing the hash of the metadata file - DataType::Metadata => get_updated_metadata( - &file_path, - &cache_item.image_link, - &cache_item.animation_link, - )?, + DataType::Metadata => match &cache_item.cascade_id.is_some() { + true => get_updated_metadata_with_cascade_id( + &file_path, + &cache_item.image_link, + &cache_item.animation_link, + &cache_item.cascade_id, + )?, + false => get_updated_metadata( + &file_path, + &cache_item.image_link, + &cache_item.animation_link, + )?, + }, _ => file_path.clone(), }; diff --git a/src/validate/format.rs b/src/validate/format.rs index e20dfdff..80a3818e 100644 --- a/src/validate/format.rs +++ b/src/validate/format.rs @@ -19,6 +19,8 @@ pub struct Metadata { #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option>, pub properties: Property, + #[serde(skip_serializing_if = "Option::is_none")] + pub cascade_id: Option, #[serde(flatten)] pub extra: HashMap, } From 72cef312a6a3f9560dc5ea66967c05eb48530f1e Mon Sep 17 00:00:00 2001 From: mastercodercat Date: Thu, 27 Jun 2024 05:53:52 -0400 Subject: [PATCH 3/4] fix config documentation and comment --- .autodoc/docs/markdown/src/config/data.md | 2 +- src/config/data.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.autodoc/docs/markdown/src/config/data.md b/.autodoc/docs/markdown/src/config/data.md index 8aeecf44..d3d4748b 100644 --- a/.autodoc/docs/markdown/src/config/data.md +++ b/.autodoc/docs/markdown/src/config/data.md @@ -16,7 +16,7 @@ These structures and utility functions can be used throughout the project to man 2. **What are the different `UploadMethod` options available and how do they affect the behavior of the code?** - The `UploadMethod` enum has five variants: `Bundlr`, `AWS`, `NftStorage`, `SHDW`, `Pinata` and `Cascade`. These options represent different storage services or methods for uploading assets. The choice of `UploadMethod` will determine which storage service or method is used when uploading assets in the project. + The `UploadMethod` enum has six variants: `Bundlr`, `AWS`, `NftStorage`, `SHDW`, `Pinata` and `Cascade`. These options represent different storage services or methods for uploading assets. The choice of `UploadMethod` will determine which storage service or method is used when uploading assets in the project. 3. **How does the `TokenStandard` enum work and what are its possible values?** diff --git a/src/config/data.rs b/src/config/data.rs index 06f27c2a..ee5cc814 100644 --- a/src/config/data.rs +++ b/src/config/data.rs @@ -76,7 +76,7 @@ pub struct ConfigData { // Pinata specific configuration pub pinata_config: Option, - // SDRIVE specific configuration + // Cascade specific configuration #[serde(serialize_with = "to_option_string")] pub cascade_api_key: Option, From 33a15b27209fdfb85163ab3eb4cb7e367dd5cb9f Mon Sep 17 00:00:00 2001 From: mastercodercat Date: Tue, 20 Aug 2024 15:38:01 -0400 Subject: [PATCH 4/4] add sense support --- .autodoc/docs/markdown/src/config/data.md | 6 +- .../docs/markdown/src/upload/methods/mod.md | 4 +- .../docs/markdown/src/upload/methods/sense.md | 48 +++ .../markdown/src/upload/methods/summary.md | 2 +- .autodoc/docs/markdown/src/upload/summary.md | 2 +- .autodoc/docs/markdown/src/upload/uploader.md | 2 +- src/cache.rs | 3 + src/config/data.rs | 6 + src/create_config/process.rs | 12 + src/upload/assets.rs | 8 +- src/upload/methods/mod.rs | 2 + src/upload/methods/sense.rs | 281 ++++++++++++++++++ src/upload/process.rs | 3 +- src/upload/uploader.rs | 3 + src/validate/format.rs | 2 + 15 files changed, 375 insertions(+), 9 deletions(-) create mode 100644 .autodoc/docs/markdown/src/upload/methods/sense.md create mode 100644 src/upload/methods/sense.rs diff --git a/.autodoc/docs/markdown/src/config/data.md b/.autodoc/docs/markdown/src/config/data.md index d3d4748b..caf816eb 100644 --- a/.autodoc/docs/markdown/src/config/data.md +++ b/.autodoc/docs/markdown/src/config/data.md @@ -1,8 +1,8 @@ [View code on GitHub](https://github.com/metaplex-foundation/sugar/src/config/data.rs) -The `sugar` code defines the configuration and data structures for a project that deals with non-fungible tokens (NFTs) and programmable non-fungible tokens (pNFTs). The main structure, `ConfigData`, contains various fields related to the token standard, asset properties, creator information, and storage configurations for different platforms like AWS, NFT.Storage, Shadow Drive, Pinata and Cascade. +The `sugar` code defines the configuration and data structures for a project that deals with non-fungible tokens (NFTs) and programmable non-fungible tokens (pNFTs). The main structure, `ConfigData`, contains various fields related to the token standard, asset properties, creator information, and storage configurations for different platforms like AWS, NFT.Storage, Shadow Drive, Pinata, Cascade and Sense. -The `SugarConfig` struct holds the keypair and RPC URL for the Solana network, while `SolanaConfig` contains the JSON RPC URL, keypair path, and commitment level. The `AwsConfig` and `PinataConfig` structs store the respective platform-specific configurations. `cascade_api_key` store api key to access Pastel's Cascade Protocol. +The `SugarConfig` struct holds the keypair and RPC URL for the Solana network, while `SolanaConfig` contains the JSON RPC URL, keypair path, and commitment level. The `AwsConfig` and `PinataConfig` structs store the respective platform-specific configurations. `cascade_api_key`, `sense_api_key` store api key to access Pastel's Cascade and Sense Protocol. The `Creator` struct represents a creator with an address and share percentage. The `Cluster` enum represents different Solana network clusters (Devnet, Mainnet, Localnet, and Unknown). The `TokenStandard` enum distinguishes between NFT and pNFT standards. @@ -16,7 +16,7 @@ These structures and utility functions can be used throughout the project to man 2. **What are the different `UploadMethod` options available and how do they affect the behavior of the code?** - The `UploadMethod` enum has six variants: `Bundlr`, `AWS`, `NftStorage`, `SHDW`, `Pinata` and `Cascade`. These options represent different storage services or methods for uploading assets. The choice of `UploadMethod` will determine which storage service or method is used when uploading assets in the project. + The `UploadMethod` enum has six variants: `Bundlr`, `AWS`, `NftStorage`, `SHDW`, `Pinata`, `Cascade` and `Sense`. These options represent different storage services or methods for uploading assets. The choice of `UploadMethod` will determine which storage service or method is used when uploading assets in the project. 3. **How does the `TokenStandard` enum work and what are its possible values?** diff --git a/.autodoc/docs/markdown/src/upload/methods/mod.md b/.autodoc/docs/markdown/src/upload/methods/mod.md index a2d7f5be..28478674 100644 --- a/.autodoc/docs/markdown/src/upload/methods/mod.md +++ b/.autodoc/docs/markdown/src/upload/methods/mod.md @@ -43,11 +43,13 @@ This code is part of a larger project and serves as a module that provides vario 6. **cascade** This sub-module provides integration with Cascade protocol. Cascade is a protocol that allows users to store data permanently in a highly redundant, distributed fashion with a single upfront fee. It contain functions to upload and manage IPFS content and also adds cascade id to the metadata to get TxID of the Action Registration ticket. +7. **sense** This sub-module provides integration with Sense protocol. Sense is a lightweight protocol on the Pastel Network, built to assess the relative rareness of a given NFT against near-duplicate meta-data. It contain functions to upload and manage IPFS content and also adds sense id to the metadata to get TxID of the Action Registration ticket. + By using `pub use` statements, the code re-exports the contents of each sub-module, making their functions and types available to other parts of the project without the need to explicitly import each sub-module. ## Questions: 1. **What is the purpose of each module in this code?** - Each module (aws, bundlr, nft_storage, pinata, shdw and cascade) likely represents a different component or service within the Sugar project, but it's not clear from this code snippet alone what each module does specifically. + Each module (aws, bundlr, nft_storage, pinata, shdw, cascade and sense) likely represents a different component or service within the Sugar project, but it's not clear from this code snippet alone what each module does specifically. 2. **How are these modules being used in the rest of the project?** diff --git a/.autodoc/docs/markdown/src/upload/methods/sense.md b/.autodoc/docs/markdown/src/upload/methods/sense.md new file mode 100644 index 00000000..fd1de4f1 --- /dev/null +++ b/.autodoc/docs/markdown/src/upload/methods/sense.md @@ -0,0 +1,48 @@ +[View code on GitHub](https://github.com/metaplex-foundation/sugar/src/upload/methods/sense.rs) + +The code in this file is responsible for uploading files to the Pastel's Sense service. It defines the `SenseStorageMethod` struct and implements the `Prepare` and `Uploader` traits for it. The main purpose of this code is to handle the process of uploading files to NFT Storage while adhering to the service's limitations, such as file size and request rate limits. + +The `SenseStorageMethod` struct contains an `Arc` for making HTTP requests. The `new` method initializes the struct by creating an HTTP client with the necessary headers, including the authentication token. + +The `prepare` method, which is part of the `Prepare` trait implementation, checks if any file in the provided asset pairs exceeds the 100MB file size limit. If any file is too large, an error is returned. + +The `upload` method, which is part of the `Uploader` trait implementation, is responsible for uploading the files to Sense Protocol. It first groups the files into batches, ensuring that each batch does not exceed the file size and count limits. Then, it iterates through the batches and uploads them using a multipart HTTP request. If the upload is successful, the cache is updated with the new file URLs and active registration id of Sense, and the progress bar is incremented. If an error occurs during the upload, it is added to a list of errors that is returned at the end of the method. The + +To avoid hitting the rate limit, the code waits for a specified duration (`REQUEST_WAIT`) between uploading batches. Additionally, an `interrupted` flag is used to stop the upload process if needed. + +Here's an example of how this code might be used in the larger project: + +```rust +let config_data = ConfigData::load("config.toml")?; +let sense_storage_method = SenseStorageMethod::new(&config_data).await?; + +let sugar_config = SugarConfig::load("sugar_config.toml")?; +let asset_pairs = load_asset_pairs(&sugar_config)?; +let asset_indices = get_asset_indices(&asset_pairs)?; + +sense_storage_method.prepare(&sugar_config, &asset_pairs, asset_indices).await?; + +let mut cache = Cache::load("cache.toml")?; +let mut assets = prepare_assets(&asset_pairs, &cache)?; +let progress = ProgressBar::new(assets.len() as u64); +let interrupted = Arc::new(AtomicBool::new(false)); + +let errors = sense_storage_method + .upload(&sugar_config, &mut cache, DataType::Image, &mut assets, &progress, interrupted) + .await?; +``` + +This example demonstrates how to initialize the `SenseStorageMethod`, prepare the assets for upload, and then upload them using the `upload` method. +## Questions: + 1. **Question**: What is the purpose of the `SenseStorageMethod` struct and its associated methods? + **Answer**: The `SenseStorageMethod` struct is used to handle the interaction with the Sense Protocol API. It provides methods for initializing a new instance with the necessary authentication, preparing the assets for upload by checking file size limits, and uploading the assets to the Sense Protocol API. + +2. **Question**: What are the constants defined at the beginning of the code and what are their purposes? + **Answer**: The constants defined at the beginning of the code are: + - `SENSE_STORAGE_API_URL`: The base URL for the Sense Protocol API. + - `REQUEST_WAIT`: The time window (in milliseconds) to wait between requests to avoid rate limits. + - `FILE_SIZE_LIMIT`: The maximum file size allowed for upload (100 MB). + - `FILE_COUNT_LIMIT`: The maximum number of files allowed per request. + +3. **Question**: How does the `upload` method handle uploading assets in batches? + **Answer**: The `upload` method first groups the assets into batches based on the file size and count limits. It then iterates through each batch, creating a multipart form with the assets, and sends a POST request to the Sense Protocol API. After each successful upload, the cache is updated, and the progress bar is incremented. If there are more batches to process, the method waits for a specified duration to avoid rate limits before proceeding with the next batch. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/upload/methods/summary.md b/.autodoc/docs/markdown/src/upload/methods/summary.md index 976ccdf9..67461614 100644 --- a/.autodoc/docs/markdown/src/upload/methods/summary.md +++ b/.autodoc/docs/markdown/src/upload/methods/summary.md @@ -22,7 +22,7 @@ let (asset_id, uploaded_url) = upload_handle.await??; Similarly, the `bundlr.rs` file provides a module for uploading assets to the Bundlr platform using the Solana blockchain. The `BundlrMethod` struct handles the upload process, including setting up the Bundlr client, funding the Bundlr address, and uploading the assets. An example usage is provided in the file summary. -The `nft_storage.rs` file handles uploading files to the NFT Storage service, while the `pinata.rs` file provides functionality for uploading files to the Pinata IPFS service and also `cascade.rs` file provides uploading and generate `cascade_id` (which is uploading result_id to get active registration TxId) using Pastel's Cascade protocol and add that to metadata. Those files define structs that implement the `Prepare` and `Uploader` or `ParallelUploader` traits, respectively. Example usages for these modules can be found in their respective file summaries. +The `nft_storage.rs` file handles uploading files to the NFT Storage service, while the `pinata.rs` file provides functionality for uploading files to the Pinata IPFS service and also `cascade.rs` abd `sense.rs` files provides uploading and generate `cascade_id`, `sense_id` (which is uploading result_id to get active registration TxId) using Pastel's Cascade and Sense protocol and add that to metadata. Those files define structs that implement the `Prepare` and `Uploader` or `ParallelUploader` traits, respectively. Example usages for these modules can be found in their respective file summaries. Finally, the `shdw.rs` file is responsible for handling the storage and uploading of assets to the Shadow Drive, a decentralized storage solution. It provides a `SHDWMethod` struct that implements the `Prepare` and `ParallelUploader` traits. An example usage is provided in the file summary. diff --git a/.autodoc/docs/markdown/src/upload/summary.md b/.autodoc/docs/markdown/src/upload/summary.md index 923ec395..24e059c0 100644 --- a/.autodoc/docs/markdown/src/upload/summary.md +++ b/.autodoc/docs/markdown/src/upload/summary.md @@ -4,7 +4,7 @@ The code in the `upload` folder is responsible for managing and uploading assets For example, the `assets.rs` file provides functions to manage assets, calculate their sizes, and update their metadata. The `errors.rs` file defines a custom error type called `UploadError` for handling various errors that may occur during the upload process. The `process.rs` file is responsible for uploading assets to a storage system, while the `uploader.rs` file handles the uploading of assets and defines traits and structs for managing the upload process. -The `methods` subfolder contains code for handling the upload of assets to different storage services and platforms, such as Amazon S3, Bundlr, NFT Storage, Pinata IPFS, Shadow Drive and Cascade. Each storage method is implemented in a separate file, providing a clean and modular approach to integrating various storage services into the larger project. +The `methods` subfolder contains code for handling the upload of assets to different storage services and platforms, such as Amazon S3, Bundlr, NFT Storage, Pinata IPFS, Shadow Drive, Cascade and Sense. Each storage method is implemented in a separate file, providing a clean and modular approach to integrating various storage services into the larger project. Here's an example of how the code in the `upload` folder might be used in the larger project: diff --git a/.autodoc/docs/markdown/src/upload/uploader.md b/.autodoc/docs/markdown/src/upload/uploader.md index 4a716db5..75506212 100644 --- a/.autodoc/docs/markdown/src/upload/uploader.md +++ b/.autodoc/docs/markdown/src/upload/uploader.md @@ -37,4 +37,4 @@ This code would initialize an uploader object based on the configuration, prepar **Answer:** The `ParallelUploader` trait is designed for upload methods that support parallel uploads. It abstracts the threading logic, allowing methods to focus on the logic of uploading a single asset. It inherits from the `Uploader` trait and requires implementing the `upload_asset` function, which returns a `JoinHandle` for the task responsible for uploading the specified asset. 3. **Question:** How does the `initialize` function work and what is its purpose? - **Answer:** The `initialize` function acts as a factory function for creating uploader objects based on the configuration's `uploadMethod`. It takes `sugar_config` and `config_data` as arguments and returns a `Result` containing a boxed `Uploader` trait object. Depending on the `uploadMethod`, it initializes the appropriate uploader object (e.g., `AWSMethod`, `BundlrMethod`, `NftStorageMethod`, `SHDWMethod`, `PinataMethod` or `CascadeStorageMethod`). \ No newline at end of file + **Answer:** The `initialize` function acts as a factory function for creating uploader objects based on the configuration's `uploadMethod`. It takes `sugar_config` and `config_data` as arguments and returns a `Result` containing a boxed `Uploader` trait object. Depending on the `uploadMethod`, it initializes the appropriate uploader object (e.g., `AWSMethod`, `BundlrMethod`, `NftStorageMethod`, `SHDWMethod`, `PinataMethod`, `CascadeStorageMethod` or `SenseStorageMethod`). \ No newline at end of file diff --git a/src/cache.rs b/src/cache.rs index 71d62e27..4819dacc 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -131,6 +131,9 @@ pub struct CacheItem { #[serde(skip_serializing_if = "Option::is_none")] pub cascade_id: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub sense_id: Option, } impl CacheItem { diff --git a/src/config/data.rs b/src/config/data.rs index ee5cc814..21f0933e 100644 --- a/src/config/data.rs +++ b/src/config/data.rs @@ -80,6 +80,10 @@ pub struct ConfigData { #[serde(serialize_with = "to_option_string")] pub cascade_api_key: Option, + // Sense specific configuration + #[serde(serialize_with = "to_option_string")] + pub sense_api_key: Option, + /// Hidden setttings pub hidden_settings: Option, @@ -234,6 +238,8 @@ pub enum UploadMethod { Sdrive, #[serde(rename = "cascade")] Cascade, + #[serde(rename = "sense")] + Sense, } impl Display for UploadMethod { diff --git a/src/create_config/process.rs b/src/create_config/process.rs index 1916a125..1475a62e 100644 --- a/src/create_config/process.rs +++ b/src/create_config/process.rs @@ -317,6 +317,7 @@ pub fn process_create_config(args: CreateConfigArgs) -> Result<()> { "Pinata", "SDrive", "Cascade", + "Sense" ]; config_data.upload_method = match Select::with_theme(&theme) .with_prompt("What upload method do you want to use?") @@ -332,6 +333,7 @@ pub fn process_create_config(args: CreateConfigArgs) -> Result<()> { 4 => UploadMethod::Pinata, 5 => UploadMethod::Sdrive, 6 => UploadMethod::Cascade, + 7 => UploadMethod::Sense, _ => UploadMethod::Bundlr, }; @@ -441,6 +443,16 @@ pub fn process_create_config(args: CreateConfigArgs) -> Result<()> { .unwrap(), ); } + + + if config_data.upload_method == UploadMethod::Sense { + config_data.sense_api_key = Some( + Input::with_theme(&theme) + .with_prompt("What is the Sense api key?") + .interact() + .unwrap(), + ); + } // is mutable config_data.is_mutable = Confirm::with_theme(&theme) diff --git a/src/upload/assets.rs b/src/upload/assets.rs index a9502136..4f8c7030 100644 --- a/src/upload/assets.rs +++ b/src/upload/assets.rs @@ -43,6 +43,7 @@ impl AssetPair { animation_hash: self.animation_hash, animation_link: None, cascade_id: None, + sense_id: None, } } } @@ -362,11 +363,12 @@ pub fn get_updated_metadata( Ok(serde_json::to_string(&metadata).unwrap()) } -pub fn get_updated_metadata_with_cascade_id( +pub fn get_updated_metadata_with_cascade_and_sense_id( metadata_file: &str, image_link: &str, animation_link: &Option, cascade_id: &Option, + sense_id: &Option, ) -> Result { let mut metadata: Metadata = { let m = OpenOptions::new() @@ -401,6 +403,10 @@ pub fn get_updated_metadata_with_cascade_id( metadata.cascade_id = cascade_id.clone(); } + if sense_id.is_some() { + metadata.sense_id = sense_id.clone(); + } + if animation_link.is_some() { // only updates the link if we have a new value metadata.animation_url = animation_link.clone(); diff --git a/src/upload/methods/mod.rs b/src/upload/methods/mod.rs index f36a0d4f..5e21dd59 100644 --- a/src/upload/methods/mod.rs +++ b/src/upload/methods/mod.rs @@ -1,6 +1,7 @@ pub mod aws; pub mod bundlr; pub mod cascade; +pub mod sense; pub mod nft_storage; pub mod pinata; pub mod sdrive; @@ -9,5 +10,6 @@ pub mod shdw; pub use aws::*; pub use bundlr::*; pub use cascade::*; +pub use sense::*; pub use nft_storage::*; pub use sdrive::*; diff --git a/src/upload/methods/sense.rs b/src/upload/methods/sense.rs new file mode 100644 index 00000000..e4cf2101 --- /dev/null +++ b/src/upload/methods/sense.rs @@ -0,0 +1,281 @@ +use std::{ + fs, + path::Path, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use async_trait::async_trait; +use reqwest::{ + header, + multipart::{Form, Part}, + Client, StatusCode, +}; +use tokio::time::{sleep, Duration}; + +use crate::{common::*, config::*, upload::*}; + +// API end point. +const SENSE_API_URL: &str = "https://gateway-api.pastel.network/"; +// Request time window (ms) to avoid the rate limit. +const REQUEST_WAIT: u64 = 10000; +// File size limit (100mb). +const FILE_SIZE_LIMIT: u64 = 100 * 1024 * 1024; +// Number of files per request limit. +const FILE_COUNT_LIMIT: u64 = 100; + +pub enum SenseStorageError { + ApiError(Value), +} + +/// response after an nft was stored +#[derive(Debug, Deserialize, Default)] +pub struct UploadResponse { + /// id of the request + pub request_id: String, + /// status of the request + pub request_status: String, + /// stored nft data + pub results: Vec, +} + +/// main obj that hold all the response data +#[derive(Debug, Deserialize, Default)] +#[serde(default)] +pub struct UploadResult { + pub result_id: String, + pub result_status: String, + pub registration_ticket_txid: Option, + pub original_file_ipfs_link: Option, + pub error: Option, +} + +pub struct SenseStorageMethod { + client: Arc, +} + +impl SenseStorageMethod { + /// Initialize a new SenseStorageHandler. + pub async fn new(config_data: &ConfigData) -> Result { + if let Some(api_key) = &config_data.sense_api_key { + let client_builder = Client::builder(); + + let mut headers = header::HeaderMap::new(); + let mut api_key_mut = header::HeaderValue::from_str(api_key)?; + api_key_mut.set_sensitive(true); + headers.insert("Api_key", api_key_mut); + + let client = client_builder.default_headers(headers).build()?; + + let url = format!("{}/api/v1/sense/gateway_requests", SENSE_API_URL); + let response = client.get(url).send().await?; + + match response.status() { + StatusCode::OK => Ok(Self { + client: Arc::new(client), + }), + StatusCode::UNAUTHORIZED => Err(anyhow!("Invalid sense api key.")), + code => Err(anyhow!("Could not initialize sense client: {code}")), + } + } else { + Err(anyhow!("Missing 'SenseApiKey' value in config file.")) + } + } +} + +#[async_trait] +impl Prepare for SenseStorageMethod { + /// Verifies that no file is larger than 100MB (upload of files larger than 100MB are + /// not currently supported). + async fn prepare( + &self, + _sugar_config: &SugarConfig, + asset_pairs: &HashMap, + asset_indices: Vec<(DataType, &[isize])>, + ) -> Result<()> { + for (data_type, indices) in asset_indices { + for index in indices { + let item = asset_pairs.get(index).unwrap(); + let size = match data_type { + DataType::Image => { + let path = Path::new(&item.image); + fs::metadata(path)?.len() + } + DataType::Animation => { + if let Some(animation) = &item.animation { + let path = Path::new(animation); + fs::metadata(path)?.len() + } else { + 0 + } + } + DataType::Metadata => { + let mock_uri = "x".repeat(MOCK_URI_SIZE); + let animation = if item.animation.is_some() { + Some(mock_uri.clone()) + } else { + None + }; + + get_updated_metadata(&item.metadata, &mock_uri.clone(), &animation)? + .into_bytes() + .len() as u64 + } + }; + + if size > FILE_SIZE_LIMIT { + return Err(anyhow!( + "File '{}' exceeds the current 100MB file size limit", + item.name, + )); + } + } + } + Ok(()) + } +} + +#[async_trait] +impl Uploader for SenseStorageMethod { + /// Upload the data to Nft Storage + async fn upload( + &self, + _sugar_config: &SugarConfig, + cache: &mut Cache, + data_type: DataType, + assets: &mut Vec, + progress: &ProgressBar, + interrupted: Arc, + ) -> Result> { + let mut batches: Vec> = Vec::new(); + let mut current: Vec<&AssetInfo> = Vec::new(); + let mut upload_size = 0; + let mut upload_count = 0; + + for asset_info in assets { + let size = match data_type { + DataType::Image | DataType::Animation => { + let path = Path::new(&asset_info.content); + fs::metadata(path)?.len() + } + DataType::Metadata => { + let content = String::from(&asset_info.content); + content.into_bytes().len() as u64 + } + }; + + if (upload_size + size) > FILE_SIZE_LIMIT || (upload_count + 1) > FILE_COUNT_LIMIT { + batches.push(current); + current = Vec::new(); + upload_size = 0; + upload_count = 0; + } + + upload_size += size; + upload_count += 1; + current.push(asset_info); + } + // adds the last chunk (if there is one) + if !current.is_empty() { + batches.push(current); + } + + let mut errors = Vec::new(); + // sets the length of the progress bar as the number of batches + progress.set_length(batches.len() as u64); + + while !interrupted.load(Ordering::SeqCst) && !batches.is_empty() { + let batch = batches.remove(0); + let mut form = Form::new(); + + for asset_info in &batch { + let data = match asset_info.data_type { + DataType::Image | DataType::Animation => fs::read(&asset_info.content)?, + DataType::Metadata => { + let content = String::from(&asset_info.content); + content.into_bytes() + } + }; + + let file = Part::bytes(data) + .file_name(asset_info.name.clone()) + .mime_str(asset_info.content_type.as_str())?; + form = form.part("files", file); + } + + let response = self + .client + .post(format!( + "{SENSE_API_URL}/api/v1/sense?make_publicly_accessible=true" + )) + .multipart(form) + .send() + .await?; + let status = response.status(); + + if status.is_success() { + let body = response.json::().await?; + let response: UploadResponse = serde_json::from_value(body)?; + + // updates the cache content + + for asset_info in batch { + let id = asset_info.asset_id.clone(); + if response.results[0].original_file_ipfs_link.is_some() { + let uri = response.results[0].original_file_ipfs_link.clone().unwrap(); + // cache item to update + let item = cache.items.get_mut(&id).unwrap(); + + match data_type { + DataType::Image => { + item.sense_id = Some(response.results[0].result_id.clone()); + item.image_link = uri; + } + DataType::Metadata => item.metadata_link = uri, + DataType::Animation => item.animation_link = Some(uri), + } + } else { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({})", + response.results[0].result_status + ))); + } + } + // syncs cache (checkpoint) + cache.sync_file()?; + // updates the progress bar + progress.inc(1); + } else { + let body = response.json::().await?; + let response: UploadResponse = serde_json::from_value(body)?; + if !response.results.is_empty() { + if response.results[0].error.is_some() { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({}): {}", + status, + response.results[0].error.clone().unwrap() + ))); + } else { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({}): {}", + status, response.results[0].result_status + ))); + } + } else { + errors.push(UploadError::SendDataFailed(format!( + "Error uploading batch ({}): {}", + status, response.request_status + ))); + } + } + if !batches.is_empty() { + // wait to minimize the chance of getting caught by the rate limit + sleep(Duration::from_millis(REQUEST_WAIT)).await; + } + } + + Ok(errors) + } +} diff --git a/src/upload/process.rs b/src/upload/process.rs index fe26455f..a948e72e 100644 --- a/src/upload/process.rs +++ b/src/upload/process.rs @@ -553,11 +553,12 @@ async fn upload_data( // replaces the media link without modifying the original file to avoid // changing the hash of the metadata file DataType::Metadata => match &cache_item.cascade_id.is_some() { - true => get_updated_metadata_with_cascade_id( + true => get_updated_metadata_with_cascade_and_sense_id( &file_path, &cache_item.image_link, &cache_item.animation_link, &cache_item.cascade_id, + &cache_item.sense_id, )?, false => get_updated_metadata( &file_path, diff --git a/src/upload/uploader.rs b/src/upload/uploader.rs index c224b959..9a795103 100644 --- a/src/upload/uploader.rs +++ b/src/upload/uploader.rs @@ -295,5 +295,8 @@ pub async fn initialize( UploadMethod::Cascade => { Box::new(cascade::CascadeStorageMethod::new(config_data).await?) as Box } + UploadMethod::Sense => { + Box::new(sense::SenseStorageMethod::new(config_data).await?) as Box + } }) } diff --git a/src/validate/format.rs b/src/validate/format.rs index 80a3818e..6a23f49e 100644 --- a/src/validate/format.rs +++ b/src/validate/format.rs @@ -21,6 +21,8 @@ pub struct Metadata { pub properties: Property, #[serde(skip_serializing_if = "Option::is_none")] pub cascade_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub sense_id: Option, #[serde(flatten)] pub extra: HashMap, }