diff --git a/.github/workflows/registry-updates.yaml b/.github/workflows/registry-updates.yaml index fd0a6afe..ee4fde28 100644 --- a/.github/workflows/registry-updates.yaml +++ b/.github/workflows/registry-updates.yaml @@ -4,6 +4,7 @@ on: pull_request_target: branches: - main + - kc/new-connector-publication-automation types: [opened, synchronize, reopened] paths: - registry/** diff --git a/registry-automation/README.md b/registry-automation/README.md new file mode 100644 index 00000000..2c0d4852 --- /dev/null +++ b/registry-automation/README.md @@ -0,0 +1,38 @@ +# Introduction + +## Steps to runs + +1. Consider the following `changed_files.json` file: +```json + +{ + "added_files": [ + "registry/hasura/azure-cosmos/releases/v0.1.6/connector-packaging.json" + ], + "modified_files": [ + "registry/hasura/azure-cosmos/metadata.json" + ], + "deleted_files": [] +} +``` + +2. You will require the following environment variables: + +1. GCP_BUCKET_NAME +2. CLOUDINARY_URL +3. GCP_SERVICE_ACCOUNT_KEY +4. CONNECTOR_REGISTRY_GQL_URL +5. CONNECTOR_PUBLICATION_KEY +6. GCP_SERVICE_ACCOUNT_DETAILS + + + +```bash + + +2. Run the following command from the `registry-automation` directory: + + +```bash +go run main.go ci --changed-files-path changed_files.json +``` diff --git a/registry-automation/cmd/ci.go b/registry-automation/cmd/ci.go index cd9c31f9..11847204 100644 --- a/registry-automation/cmd/ci.go +++ b/registry-automation/cmd/ci.go @@ -16,7 +16,6 @@ import ( "github.com/machinebox/graphql" "github.com/spf13/cobra" "google.golang.org/api/option" - "gopkg.in/yaml.v2" ) // ciCmd represents the ci command @@ -26,83 +25,6 @@ var ciCmd = &cobra.Command{ Run: runCI, } -type ChangedFiles struct { - Added []string `json:"added_files"` - Modified []string `json:"modified_files"` - Deleted []string `json:"deleted_files"` -} - -// ConnectorVersion represents a version of a connector, this type is -// used to insert a new version of a connector in the registry. -type ConnectorVersion struct { - // Namespace of the connector, e.g. "hasura" - Namespace string `json:"namespace"` - // Name of the connector, e.g. "mongodb" - Name string `json:"name"` - // Semantic version of the connector version, e.g. "v1.0.0" - Version string `json:"version"` - // Docker image of the connector version (optional) - // This field is only required if the connector version is of type `PrebuiltDockerImage` - Image *string `json:"image,omitempty"` - // URL to the connector's metadata - PackageDefinitionURL string `json:"package_definition_url"` - // Is the connector version multitenant? - IsMultitenant bool `json:"is_multitenant"` - // Type of the connector packaging `PrebuiltDockerImage`/`ManagedDockerBuild` - Type string `json:"type"` -} - -// Create a struct with the following fields: -// type string -// image *string (optional) -type ConnectionVersionMetadata struct { - Type string `yaml:"type"` - Image *string `yaml:"image,omitempty"` -} - -type WhereClause struct { - ConnectorName string - ConnectorNamespace string -} - -func (wc WhereClause) MarshalJSON() ([]byte, error) { - where := map[string]interface{}{ - "_and": []map[string]interface{}{ - {"name": map[string]string{"_eq": wc.ConnectorName}}, - {"namespace": map[string]string{"_eq": wc.ConnectorNamespace}}, - }, - } - return json.Marshal(where) -} - -type ConnectorOverviewUpdate struct { - Set struct { - Docs *string `json:"docs,omitempty"` - Logo *string `json:"logo,omitempty"` - } `json:"_set"` - Where WhereClause `json:"where"` -} - -type ConnectorOverviewUpdates struct { - Updates []ConnectorOverviewUpdate `json:"updates"` -} - -const ( - ManagedDockerBuild = "ManagedDockerBuild" - PrebuiltDockerImage = "PrebuiltDockerImage" -) - -// Make a struct with the fields expected in the command line arguments -type ConnectorRegistryArgs struct { - ChangedFilesPath string - PublicationEnv string - ConnectorRegistryGQLUrl string - ConnectorPublicationKey string - GCPServiceAccountDetails string - GCPBucketName string - CloudinaryUrl string -} - var ciCmdArgs ConnectorRegistryArgs func init() { @@ -125,13 +47,20 @@ func init() { } -func buildContext() { +func buildContext() Context { // Connector registry Hasura GraphQL URL registryGQLURL := os.Getenv("CONNECTOR_REGISTRY_GQL_URL") + var registryGQLClient *graphql.Client + var storageClient *storage.Client + var cloudinaryClient *cloudinary.Cloudinary + var cloudinaryWrapper *CloudinaryWrapper + var storageWrapper *StorageClientWrapper + if registryGQLURL == "" { log.Fatalf("CONNECTOR_REGISTRY_GQL_URL is not set") } else { ciCmdArgs.ConnectorRegistryGQLUrl = registryGQLURL + registryGQLClient = graphql.NewClient(registryGQLURL) } // Connector publication key @@ -147,6 +76,15 @@ func buildContext() { if gcpServiceAccountDetails == "" { log.Fatalf("GCP_SERVICE_ACCOUNT_DETAILS is not set") } else { + var err error + storageClient, err = storage.NewClient(context.Background(), option.WithCredentialsJSON([]byte(gcpServiceAccountDetails))) + if err != nil { + log.Fatalf("Failed to create Google bucket client: %v", err) + } + defer storageClient.Close() + + storageWrapper = &StorageClientWrapper{storageClient} + ciCmdArgs.GCPServiceAccountDetails = gcpServiceAccountDetails } @@ -163,115 +101,211 @@ func buildContext() { if cloudinaryUrl == "" { log.Fatalf("CLOUDINARY_URL is not set") } else { - ciCmdArgs.CloudinaryUrl = cloudinaryUrl + var err error + cloudinaryClient, err = cloudinary.NewFromURL(cloudinaryUrl) + if err != nil { + log.Fatalf("Failed to create cloudinary client: %v", err) + + } + cloudinaryWrapper = &CloudinaryWrapper{cloudinaryClient} + + } + + return Context{ + Env: ciCmdArgs.PublicationEnv, + RegistryGQLClient: registryGQLClient, + StorageClient: storageWrapper, + Cloudinary: cloudinaryWrapper, } } -// processChangedFiles processes the files in the PR and extracts the connector name and version -// This function checks for the following things: -// 1. If a new connector version is added, it adds the connector version to the `newlyAddedConnectorVersions` map. -// 2. If the logo file is modified, it adds the connector name and the path to the modified logo to the `modifiedLogos` map. -// 3. If the README file is modified, it adds the connector name and the path to the modified README to the `modifiedReadmes` map. -func processChangedFiles(changedFiles ChangedFiles) (NewConnectorVersions, ModifiedLogos, ModifiedReadmes) { +type fileProcessor struct { + regex *regexp.Regexp + process func(matches []string, file string) +} - newlyAddedConnectorVersions := make(map[Connector]map[string]string) - modifiedLogos := make(map[Connector]string) - modifiedReadmes := make(map[Connector]string) +// processChangedFiles categorizes changes in connector files within a registry system. +// It handles new and modified files including metadata, logos, READMEs, and connector versions. +// +// The function takes a ChangedFiles struct containing slices of added and modified filenames, +// and returns a ProcessedChangedFiles struct with categorized changes. +// +// Files are processed based on their path and type: +// - metadata.json: New connectors +// - logo.(png|svg): New or modified logos +// - README.md: New or modified READMEs +// - connector-packaging.json: New connector versions +// +// Any files not matching these patterns are logged as skipped. +// +// Example usage: +// +// changedFiles := ChangedFiles{ +// Added: []string{"registry/namespace1/connector1/metadata.json"}, +// Modified: []string{"registry/namespace2/connector2/README.md"}, +// } +// result := processChangedFiles(changedFiles) +func processChangedFiles(changedFiles ChangedFiles) ProcessedChangedFiles { + result := ProcessedChangedFiles{ + NewConnectorVersions: make(map[Connector]map[string]string), + ModifiedLogos: make(map[Connector]string), + ModifiedReadmes: make(map[Connector]string), + NewConnectors: make(map[Connector]MetadataFile), + NewLogos: make(map[Connector]string), + NewReadmes: make(map[Connector]string), + } + + processors := []fileProcessor{ + { + regex: regexp.MustCompile(`^registry/([^/]+)/([^/]+)/metadata.json$`), + process: func(matches []string, file string) { + // IsNew is set to true because we are processing newly added metadata.json + connector := Connector{Name: matches[2], Namespace: matches[1]} + result.NewConnectors[connector] = MetadataFile(file) + fmt.Printf("Processing metadata file for connector: %s\n", connector.Name) + }, + }, + { + regex: regexp.MustCompile(`^registry/([^/]+)/([^/]+)/logo\.(png|svg)$`), + process: func(matches []string, file string) { + connector := Connector{Name: matches[2], Namespace: matches[1]} + result.NewLogos[connector] = file + fmt.Printf("Processing logo file for connector: %s\n", connector.Name) + }, + }, + { + regex: regexp.MustCompile(`^registry/([^/]+)/([^/]+)/README\.md$`), + process: func(matches []string, file string) { + connector := Connector{Name: matches[2], Namespace: matches[1]} + result.NewReadmes[connector] = file + fmt.Printf("Processing README file for connector: %s\n", connector.Name) + }, + }, + { + regex: regexp.MustCompile(`^registry/([^/]+)/([^/]+)/releases/([^/]+)/connector-packaging\.json$`), + process: func(matches []string, file string) { + connector := Connector{Name: matches[2], Namespace: matches[1]} + version := matches[3] + if _, exists := result.NewConnectorVersions[connector]; !exists { + result.NewConnectorVersions[connector] = make(map[string]string) + } + result.NewConnectorVersions[connector][version] = file + }, + }, + } - var connectorVersionPackageRegex = regexp.MustCompile(`^registry/([^/]+)/([^/]+)/releases/([^/]+)/connector-packaging\.json$`) - var logoPngRegex = regexp.MustCompile(`^registry/([^/]+)/([^/]+)/logo\.(png|svg)$`) - var readmeMdRegex = regexp.MustCompile(`^registry/([^/]+)/([^/]+)/README\.md$`) + processFile := func(file string, isModified bool) { + for _, processor := range processors { + if matches := processor.regex.FindStringSubmatch(file); matches != nil { + if isModified { + connector := Connector{Name: matches[2], Namespace: matches[1]} + if processor.regex.String() == processors[1].regex.String() { + result.ModifiedLogos[connector] = file + } else if processor.regex.String() == processors[2].regex.String() { + result.ModifiedReadmes[connector] = file + } + } else { + processor.process(matches, file) + } + return + } + } + fmt.Printf("Skipping %s file: %s\n", map[bool]string{true: "modified", false: "newly added"}[isModified], file) + } for _, file := range changedFiles.Added { + processFile(file, false) + } - // Check if the file is a connector version package - if connectorVersionPackageRegex.MatchString(file) { + for _, file := range changedFiles.Modified { + processFile(file, true) + } - matches := connectorVersionPackageRegex.FindStringSubmatch(file) - if len(matches) == 4 { - connectorNamespace := matches[1] - connectorName := matches[2] - connectorVersion := matches[3] + return result +} - connector := Connector{ - Name: connectorName, - Namespace: connectorNamespace, - } +func processNewConnector(ciCtx Context, connector Connector, metadataFile MetadataFile) (ConnectorOverviewInsert, HubRegistryConnectorInsertInput, error) { + // Process the newly added connector + // Get the string value from metadataFile + var connectorOverviewAndAuthor ConnectorOverviewInsert + var hubRegistryConnectorInsertInput HubRegistryConnectorInsertInput - if _, exists := newlyAddedConnectorVersions[connector]; !exists { - newlyAddedConnectorVersions[connector] = make(map[string]string) - } + connectorMetadata, err := readJSONFile[ConnectorMetadata](string(metadataFile)) + if err != nil { + return connectorOverviewAndAuthor, hubRegistryConnectorInsertInput, fmt.Errorf("Failed to parse the connector metadata file: %v", err) + } - newlyAddedConnectorVersions[connector][connectorVersion] = file - } + docs, err := readFile(fmt.Sprintf("registry/%s/%s/README.md", connector.Namespace, connector.Name)) - } else { - fmt.Println("Skipping newly added file: ", file) - } + if err != nil { + return connectorOverviewAndAuthor, hubRegistryConnectorInsertInput, fmt.Errorf("Failed to read the README file of the connector: %s : %v", connector.Name, err) } - for _, file := range changedFiles.Modified { - if logoPngRegex.MatchString(file) { - // Process the logo file - // print the name of the connector and the version - matches := logoPngRegex.FindStringSubmatch(file) - if len(matches) == 4 { - - connectorNamespace := matches[1] - connectorName := matches[2] - connector := Connector{ - Name: connectorName, - Namespace: connectorNamespace, - } - modifiedLogos[connector] = file - fmt.Printf("Processing logo file for connector: %s\n", connectorName) - } - - } else if readmeMdRegex.MatchString(file) { - // Process the README file - // print the name of the connector and the version - matches := readmeMdRegex.FindStringSubmatch(file) + logoPath := fmt.Sprintf("registry/%s/%s/logo.png", connector.Namespace, connector.Name) - if len(matches) == 3 { + uploadedLogoUrl, err := uploadLogoToCloudinary(ciCtx.Cloudinary, Connector{Name: connector.Name, Namespace: connector.Namespace}, logoPath) + if err != nil { + return connectorOverviewAndAuthor, hubRegistryConnectorInsertInput, err + } - connectorNamespace := matches[1] - connectorName := matches[2] - connector := Connector{ - Name: connectorName, - Namespace: connectorNamespace, - } + // Get connector info from the registry + connectorInfo, err := getConnectorInfoFromRegistry(ciCtx.RegistryGQLClient, connector.Name, connector.Namespace) + if err != nil { + return connectorOverviewAndAuthor, hubRegistryConnectorInsertInput, + fmt.Errorf("Failed to get the connector info from the registry: %v", err) + } - modifiedReadmes[connector] = file + // Check if the connector already exists in the registry + if len(connectorInfo.HubRegistryConnector) > 0 { + if ciCtx.Env == "staging" { + fmt.Printf("Connector already exists in the registry: %s/%s\n", connector.Namespace, connector.Name) + fmt.Println("The connector is going to be overwritten in the registry.") - fmt.Printf("Processing README file for connector: %s\n", connectorName) - } } else { - fmt.Println("Skipping modified file: ", file) + + return connectorOverviewAndAuthor, hubRegistryConnectorInsertInput, fmt.Errorf("Attempting to create a new hub connector, but the connector already exists in the registry: %s/%s", connector.Namespace, connector.Name) } } - return newlyAddedConnectorVersions, modifiedLogos, modifiedReadmes + hubRegistryConnectorInsertInput = HubRegistryConnectorInsertInput{ + Name: connector.Name, + Namespace: connector.Namespace, + Title: connectorMetadata.Overview.Title, + } + + connectorOverviewAndAuthor = ConnectorOverviewInsert{ + Name: connector.Name, + Namespace: connector.Namespace, + Docs: string(docs), + Logo: uploadedLogoUrl, + Title: connectorMetadata.Overview.Title, + Description: connectorMetadata.Overview.Description, + IsVerified: connectorMetadata.IsVerified, + IsHosted: connectorMetadata.IsHostedByHasura, + Author: ConnectorAuthorNestedInsert{ + Data: ConnectorAuthor{ + Name: connectorMetadata.Author.Name, + SupportEmail: connectorMetadata.Author.SupportEmail, + Website: connectorMetadata.Author.Homepage, + }, + }, + } + return connectorOverviewAndAuthor, hubRegistryConnectorInsertInput, nil } // runCI is the main function that runs the CI workflow func runCI(cmd *cobra.Command, args []string) { - buildContext() + ctx := buildContext() changedFilesContent, err := os.Open(ciCmdArgs.ChangedFilesPath) if err != nil { log.Fatalf("Failed to open the file: %v, err: %v", ciCmdArgs.ChangedFilesPath, err) } defer changedFilesContent.Close() - client, err := storage.NewClient(context.Background(), option.WithCredentialsJSON([]byte(ciCmdArgs.GCPServiceAccountDetails))) - if err != nil { - log.Fatalf("Failed to create Google bucket client: %v", err) - } - defer client.Close() - // Read the changed file's contents. This file contains all the changed files in the PR changedFilesByteValue, err := io.ReadAll(changedFilesContent) if err != nil { @@ -288,65 +322,110 @@ func runCI(cmd *cobra.Command, args []string) { // Separate the modified files according to the type of file // Collect the added or modified connectors - newlyAddedConnectorVersions, modifiedLogos, modifiedReadmes := processChangedFiles(changedFiles) + processChangedFiles := processChangedFiles(changedFiles) - // check if the map is empty - if len(newlyAddedConnectorVersions) == 0 && len(modifiedLogos) == 0 && len(modifiedReadmes) == 0 { - fmt.Println("No connectors to be added or modified in the registry") - return - } else { - if len(newlyAddedConnectorVersions) > 0 { - processNewlyAddedConnectorVersions(client, newlyAddedConnectorVersions) - } + newlyAddedConnectorVersions := processChangedFiles.NewConnectorVersions + modifiedLogos := processChangedFiles.ModifiedLogos + modifiedReadmes := processChangedFiles.ModifiedReadmes + + newlyAddedConnectors := processChangedFiles.NewConnectors + + var newConnectorsToBeAdded NewConnectorsInsertInput + var newConnectorVersionsToBeAdded []ConnectorVersion + + newConnectorOverviewsToBeAdded := make([](ConnectorOverviewInsert), 0) + hubRegistryConnectorsToBeAdded := make([](HubRegistryConnectorInsertInput), 0) + connectorOverviewUpdates := make([]ConnectorOverviewUpdate, 0) + + if len(newlyAddedConnectors) > 0 { + fmt.Println("New connectors to be added to the registry: ", newlyAddedConnectors) + + for connector, metadataFile := range newlyAddedConnectors { + connectorOverviewAndAuthor, hubRegistryConnector, err := processNewConnector(ctx, connector, metadataFile) - if len(modifiedReadmes) > 0 { - err := processModifiedReadmes(modifiedReadmes) if err != nil { - log.Fatalf("Failed to process the modified READMEs: %v", err) + log.Fatalf("Failed to process the new connector: %s/%s, Error: %v", connector.Namespace, connector.Name, err) } - fmt.Println("Successfully updated the READMEs in the registry.") + newConnectorOverviewsToBeAdded = append(newConnectorOverviewsToBeAdded, connectorOverviewAndAuthor) + hubRegistryConnectorsToBeAdded = append(hubRegistryConnectorsToBeAdded, hubRegistryConnector) + } - if len(modifiedLogos) > 0 { - err := processModifiedLogos(modifiedLogos) - if err != nil { - log.Fatalf("Failed to process the modified logos: %v", err) - } - fmt.Println("Successfully updated the logos in the registry.") + newConnectorsToBeAdded.HubRegistryConnectors = hubRegistryConnectorsToBeAdded + newConnectorsToBeAdded.ConnectorOverviews = newConnectorOverviewsToBeAdded + + } + + if len(newlyAddedConnectorVersions) > 0 { + newlyAddedConnectors := make(map[Connector]bool) + for connector := range newlyAddedConnectorVersions { + newlyAddedConnectors[connector] = true + } + newConnectorVersionsToBeAdded = processNewlyAddedConnectorVersions(ctx, newlyAddedConnectorVersions, newlyAddedConnectors) + } + + if len(modifiedReadmes) > 0 { + readMeUpdates, err := processModifiedReadmes(modifiedReadmes) + if err != nil { + log.Fatalf("Failed to process the modified READMEs: %v", err) + } + connectorOverviewUpdates = append(connectorOverviewUpdates, readMeUpdates...) + fmt.Println("Successfully updated the READMEs in the registry.") + } + + if len(modifiedLogos) > 0 { + logoUpdates, err := processModifiedLogos(modifiedLogos, ctx.Cloudinary) + if err != nil { + log.Fatalf("Failed to process the modified logos: %v", err) } + connectorOverviewUpdates = append(connectorOverviewUpdates, logoUpdates...) + fmt.Println("Successfully updated the logos in the registry.") + } + + if ctx.Env == "production" { + err = registryDbMutation(ctx.RegistryGQLClient, newConnectorsToBeAdded, connectorOverviewUpdates, newConnectorVersionsToBeAdded) + + } else if ctx.Env == "staging" { + err = registryDbMutationStaging(ctx.RegistryGQLClient, newConnectorsToBeAdded, connectorOverviewUpdates, newConnectorVersionsToBeAdded) + } else { + log.Fatalf("Unexpected: invalid publication environment: %s", ctx.Env) + } + + if err != nil { + log.Fatalf("Failed to update the registry: %v", err) } fmt.Println("Successfully processed the changed files in the PR") } -func processModifiedLogos(modifiedLogos ModifiedLogos) error { - // Iterate over the modified logos and update the logos in the registry - var connectorOverviewUpdates []ConnectorOverviewUpdate - // upload the logo to cloudinary - cloudinary, err := cloudinary.NewFromURL(ciCmdArgs.CloudinaryUrl) +func uploadLogoToCloudinary(cloudinary CloudinaryInterface, connector Connector, logoPath string) (string, error) { + logoContent, err := readFile(logoPath) if err != nil { - return err + fmt.Printf("Failed to read the logo file: %v", err) + return "", err } + imageReader := bytes.NewReader(logoContent) + + uploadResult, err := cloudinary.Upload(context.Background(), imageReader, uploader.UploadParams{ + PublicID: fmt.Sprintf("%s-%s", connector.Namespace, connector.Name), + Format: "png", + }) + if err != nil { + return "", fmt.Errorf("Failed to upload the logo to cloudinary for the connector: %s, Error: %v\n", connector.Name, err) + } + return uploadResult.SecureURL, nil +} + +func processModifiedLogos(modifiedLogos ModifiedLogos, cloudinaryClient CloudinaryInterface) ([]ConnectorOverviewUpdate, error) { + // Iterate over the modified logos and update the logos in the registry + var connectorOverviewUpdates []ConnectorOverviewUpdate + for connector, logoPath := range modifiedLogos { // open the logo file - logoContent, err := readFile(logoPath) + uploadedLogoUrl, err := uploadLogoToCloudinary(cloudinaryClient, connector, logoPath) if err != nil { - fmt.Printf("Failed to read the logo file: %v", err) - return err - } - - imageReader := bytes.NewReader(logoContent) - - uploadResult, err := cloudinary.Upload.Upload(context.Background(), imageReader, uploader.UploadParams{ - PublicID: fmt.Sprintf("%s-%s", connector.Namespace, connector.Name), - Format: "png", - }) - if err != nil { - fmt.Printf("Failed to upload the logo to cloudinary for the connector: %s, Error: %v\n", connector.Name, err) - return err - } else { - fmt.Printf("Successfully uploaded the logo to cloudinary for the connector: %s\n", connector.Name) + return connectorOverviewUpdates, err } var connectorOverviewUpdate ConnectorOverviewUpdate @@ -357,7 +436,7 @@ func processModifiedLogos(modifiedLogos ModifiedLogos) error { *connectorOverviewUpdate.Set.Logo = "" } - *connectorOverviewUpdate.Set.Logo = string(uploadResult.SecureURL) + *connectorOverviewUpdate.Set.Logo = uploadedLogoUrl connectorOverviewUpdate.Where.ConnectorName = connector.Name connectorOverviewUpdate.Where.ConnectorNamespace = connector.Namespace @@ -366,11 +445,11 @@ func processModifiedLogos(modifiedLogos ModifiedLogos) error { } - return updateConnectorOverview(ConnectorOverviewUpdates{Updates: connectorOverviewUpdates}) + return connectorOverviewUpdates, nil } -func processModifiedReadmes(modifiedReadmes ModifiedReadmes) error { +func processModifiedReadmes(modifiedReadmes ModifiedReadmes) ([]ConnectorOverviewUpdate, error) { // Iterate over the modified READMEs and update the READMEs in the registry var connectorOverviewUpdates []ConnectorOverviewUpdate @@ -378,7 +457,7 @@ func processModifiedReadmes(modifiedReadmes ModifiedReadmes) error { // open the README file readmeContent, err := readFile(readmePath) if err != nil { - return err + return connectorOverviewUpdates, err } @@ -394,11 +473,11 @@ func processModifiedReadmes(modifiedReadmes ModifiedReadmes) error { } - return updateConnectorOverview(ConnectorOverviewUpdates{Updates: connectorOverviewUpdates}) + return connectorOverviewUpdates, nil } -func processNewlyAddedConnectorVersions(client *storage.Client, newlyAddedConnectorVersions NewConnectorVersions) { +func processNewlyAddedConnectorVersions(ciCtx Context, newlyAddedConnectorVersions NewConnectorVersions, newConnectorsAdded map[Connector]bool) []ConnectorVersion { // Iterate over the added or modified connectors and upload the connector versions var connectorVersions []ConnectorVersion var uploadConnectorVersionErr error @@ -407,7 +486,8 @@ func processNewlyAddedConnectorVersions(client *storage.Client, newlyAddedConnec for connectorName, versions := range newlyAddedConnectorVersions { for version, connectorVersionPath := range versions { var connectorVersion ConnectorVersion - connectorVersion, uploadConnectorVersionErr = uploadConnectorVersionPackage(client, connectorName, version, connectorVersionPath) + isNewConnector := newConnectorsAdded[connectorName] + connectorVersion, uploadConnectorVersionErr = uploadConnectorVersionPackage(ciCtx, connectorName, version, connectorVersionPath, isNewConnector) if uploadConnectorVersionErr != nil { fmt.Printf("Error while processing version and connector: %s - %s, Error: %v", version, connectorName, uploadConnectorVersionErr) @@ -423,24 +503,18 @@ func processNewlyAddedConnectorVersions(client *storage.Client, newlyAddedConnec if encounteredError { // attempt to cleanup the uploaded connector versions - _ = cleanupUploadedConnectorVersions(client, connectorVersions) // ignore errors while cleaning up + _ = cleanupUploadedConnectorVersions(ciCtx.StorageClient, connectorVersions) // ignore errors while cleaning up // delete the uploaded connector versions from the registry log.Fatalf("Failed to upload the connector version: %v", uploadConnectorVersionErr) - - } else { - fmt.Printf("Connector versions to be added to the registry: %+v\n", connectorVersions) - err := updateRegistryGQL(connectorVersions) - if err != nil { - // attempt to cleanup the uploaded connector versions - _ = cleanupUploadedConnectorVersions(client, connectorVersions) // ignore errors while cleaning up - log.Fatalf("Failed to update the registry: %v", err) - } } + fmt.Println("Successfully added connector versions to the registry.") + return connectorVersions + } -func cleanupUploadedConnectorVersions(client *storage.Client, connectorVersions []ConnectorVersion) error { +func cleanupUploadedConnectorVersions(client StorageClientInterface, connectorVersions []ConnectorVersion) error { // Iterate over the connector versions and delete the uploaded files // from the google bucket fmt.Println("Cleaning up the uploaded connector versions") @@ -455,22 +529,8 @@ func cleanupUploadedConnectorVersions(client *storage.Client, connectorVersions return nil } -// Type that uniquely identifies a connector -type Connector struct { - Name string `json:"name"` - Namespace string `json:"namespace"` -} - -type NewConnectorVersions map[Connector]map[string]string - -// ModifiedLogos represents the modified logos in the PR, the key is the connector name and the value is the path to the modified logo -type ModifiedLogos map[Connector]string - -// ModifiedReadmes represents the modified READMEs in the PR, the key is the connector name and the value is the path to the modified README -type ModifiedReadmes map[Connector]string - // uploadConnectorVersionPackage uploads the connector version package to the registry -func uploadConnectorVersionPackage(client *storage.Client, connector Connector, version string, changedConnectorVersionPath string) (ConnectorVersion, error) { +func uploadConnectorVersionPackage(ciCtx Context, connector Connector, version string, changedConnectorVersionPath string, isNewConnector bool) (ConnectorVersion, error) { var connectorVersion ConnectorVersion @@ -492,7 +552,7 @@ func uploadConnectorVersionPackage(client *storage.Client, connector Connector, return connectorVersion, err } - uploadedTgzUrl, err := uploadConnectorVersionDefinition(client, connector.Namespace, connector.Name, version, connectorMetadataTgzPath) + uploadedTgzUrl, err := uploadConnectorVersionDefinition(ciCtx, connector.Namespace, connector.Name, version, connectorMetadataTgzPath) if err != nil { return connectorVersion, fmt.Errorf("failed to upload the connector version definition - connector: %v version:%v - err: %v", connector.Name, version, err) } else { @@ -501,13 +561,13 @@ func uploadConnectorVersionPackage(client *storage.Client, connector Connector, } // Build payload for registry upsert - return buildRegistryPayload(connector.Namespace, connector.Name, version, connectorVersionMetadata, uploadedTgzUrl) + return buildRegistryPayload(ciCtx, connector.Namespace, connector.Name, version, connectorVersionMetadata, uploadedTgzUrl, isNewConnector) } -func uploadConnectorVersionDefinition(client *storage.Client, connectorNamespace, connectorName string, connectorVersion string, connectorMetadataTgzPath string) (string, error) { +func uploadConnectorVersionDefinition(ciCtx Context, connectorNamespace, connectorName string, connectorVersion string, connectorMetadataTgzPath string) (string, error) { bucketName := ciCmdArgs.GCPBucketName objectName := generateGCPObjectName(connectorNamespace, connectorName, connectorVersion) - uploadedTgzUrl, err := uploadFile(client, bucketName, objectName, connectorMetadataTgzPath) + uploadedTgzUrl, err := uploadFile(ciCtx.StorageClient, bucketName, objectName, connectorMetadataTgzPath) if err != nil { return "", err @@ -552,94 +612,15 @@ func getConnectorVersionMetadata(tgzUrl string, connector Connector, connectorVe return connectorVersionMetadata, tgzPath, nil } -// Write a function that accepts a file path to a YAML file and returns -// the contents of the file as a map[string]interface{}. -// readYAMLFile accepts a file path to a YAML file and returns the contents of the file as a map[string]interface{}. -func readYAMLFile(filePath string) (map[string]interface{}, error) { - // Open the file - file, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("failed to open file: %w", err) - } - defer file.Close() - - // Read the file contents - data, err := io.ReadAll(file) - if err != nil { - return nil, fmt.Errorf("failed to read file: %w", err) - } - - // Unmarshal the YAML contents into a map - var result map[string]interface{} - err = yaml.Unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) - } - - return result, nil -} - -func getConnectorNamespace(connectorMetadata map[string]interface{}) (string, error) { - connectorOverview, ok := connectorMetadata["overview"].(map[string]interface{}) - if !ok { - return "", fmt.Errorf("could not find connector overview in the connector's metadata") - } - connectorNamespace, ok := connectorOverview["namespace"].(string) - if !ok { - return "", fmt.Errorf("could not find the 'namespace' of the connector in the connector's overview in the connector's metadata.json") - } - return connectorNamespace, nil -} - -// struct to store the response of teh GetConnectorInfo query -type GetConnectorInfoResponse struct { - HubRegistryConnector []struct { - Name string `json:"name"` - MultitenantConnector *struct { - ID string `json:"id"` - } `json:"multitenant_connector"` - } `json:"hub_registry_connector"` -} - -func getConnectorInfoFromRegistry(connectorNamespace string, connectorName string) (GetConnectorInfoResponse, error) { - var respData GetConnectorInfoResponse - client := graphql.NewClient(ciCmdArgs.ConnectorRegistryGQLUrl) - ctx := context.Background() - - req := graphql.NewRequest(` -query GetConnectorInfo ($name: String!, $namespace: String!) { - hub_registry_connector(where: {_and: [{name: {_eq: $name}}, {namespace: {_eq: $namespace}}]}) { - name - multitenant_connector { - id - } - } -}`) - req.Var("name", connectorName) - req.Var("namespace", connectorNamespace) - - req.Header.Set("x-hasura-role", "connector_publishing_automation") - req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) - - // Execute the GraphQL query and check the response. - if err := client.Run(ctx, req, &respData); err != nil { - return respData, err - } else { - if len(respData.HubRegistryConnector) == 0 { - return respData, nil - } - } - - return respData, nil -} - // buildRegistryPayload builds the payload for the registry upsert API func buildRegistryPayload( + ciCtx Context, connectorNamespace string, connectorName string, version string, connectorVersionMetadata map[string]interface{}, uploadedConnectorDefinitionTgzUrl string, + isNewConnector bool, ) (ConnectorVersion, error) { var connectorVersion ConnectorVersion var connectorVersionDockerImage string = "" @@ -659,15 +640,31 @@ func buildRegistryPayload( } - connectorInfo, err := getConnectorInfoFromRegistry(connectorNamespace, connectorName) + connectorInfo, err := getConnectorInfoFromRegistry(ciCtx.RegistryGQLClient, connectorNamespace, connectorName) if err != nil { return connectorVersion, err } + var isMultitenant bool + // Check if the connector exists in the registry first if len(connectorInfo.HubRegistryConnector) == 0 { - return connectorVersion, fmt.Errorf("Inserting a new connector is not supported yet") + + if isNewConnector { + isMultitenant = false + } else { + return connectorVersion, fmt.Errorf("Unexpected: Couldn't get the connector info of the connector: %s", connectorName) + + } + + } else { + if len(connectorInfo.HubRegistryConnector) == 1 { + // check if the connector is multitenant + isMultitenant = connectorInfo.HubRegistryConnector[0].MultitenantConnector != nil + + } + } var connectorVersionType string @@ -693,65 +690,9 @@ func buildRegistryPayload( Version: version, Image: connectorVersionImage, PackageDefinitionURL: uploadedConnectorDefinitionTgzUrl, - IsMultitenant: connectorInfo.HubRegistryConnector[0].MultitenantConnector != nil, + IsMultitenant: isMultitenant, Type: connectorVersionType, } return connectorVersion, nil } - -func updateRegistryGQL(payload []ConnectorVersion) error { - var respData map[string]interface{} - client := graphql.NewClient(ciCmdArgs.ConnectorRegistryGQLUrl) - ctx := context.Background() - - req := graphql.NewRequest(` -mutation InsertConnectorVersion($connectorVersion: [hub_registry_connector_version_insert_input!]!) { - insert_hub_registry_connector_version(objects: $connectorVersion, on_conflict: {constraint: connector_version_namespace_name_version_key, update_columns: [image, package_definition_url, is_multitenant]}) { - affected_rows - returning { - id - } - } -}`) - // add the payload to the request - req.Var("connectorVersion", payload) - - req.Header.Set("x-hasura-role", "connector_publishing_automation") - req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) - - // Execute the GraphQL query and check the response. - if err := client.Run(ctx, req, &respData); err != nil { - return err - } - - return nil -} - -func updateConnectorOverview(updates ConnectorOverviewUpdates) error { - var respData map[string]interface{} - client := graphql.NewClient(ciCmdArgs.ConnectorRegistryGQLUrl) - ctx := context.Background() - - req := graphql.NewRequest(` -mutation UpdateConnector ($updates: [connector_overview_updates!]!) { - update_connector_overview_many(updates: $updates) { - affected_rows - } -}`) - - // add the payload to the request - req.Var("updates", updates.Updates) - - req.Header.Set("x-hasura-role", "connector_publishing_automation") - req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) - - // Execute the GraphQL query and check the response. - if err := client.Run(ctx, req, &respData); err != nil { - return err - } else { - fmt.Printf("Successfully updated the connector overview: %+v\n", respData) - } - - return nil -} diff --git a/registry-automation/cmd/ci_test.go b/registry-automation/cmd/ci_test.go index e68cabaf..2b160f8a 100644 --- a/registry-automation/cmd/ci_test.go +++ b/registry-automation/cmd/ci_test.go @@ -1,100 +1,230 @@ package cmd import ( + "context" + "testing" + + "github.com/machinebox/graphql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "cloud.google.com/go/storage" + + "github.com/cloudinary/cloudinary-go/v2/api/uploader" ) -func TestProcessAddedOrModifiedConnectorVersions(t *testing.T) { - // Define test cases +// Mock structures +type MockStorageClient struct { + mock.Mock +} + +func (m *MockStorageClient) Bucket(name string) *storage.BucketHandle { + args := m.Called(name) + return args.Get(0).(*storage.BucketHandle) +} + +type MockCloudinaryUploader struct { + mock.Mock +} + +type MockCloudinary struct { + mock.Mock +} + +func (m *MockCloudinary) Upload(ctx context.Context, file interface{}, uploadParams uploader.UploadParams) (*uploader.UploadResult, error) { + args := m.Called(ctx, file, uploadParams) + return args.Get(0).(*uploader.UploadResult), args.Error(1) +} + +type MockGraphQLClient struct { + mock.Mock +} + +func (m *MockGraphQLClient) Run(ctx context.Context, query *graphql.Request, resp interface{}) error { + args := m.Called(ctx, query, resp) + return args.Error(0) +} + +func createTestContext() Context { + return Context{ + Env: "staging", + RegistryGQLClient: &MockGraphQLClient{}, + StorageClient: &MockStorageClient{}, + Cloudinary: &MockCloudinary{}, + } +} + +// Test processChangedFiles +func TestProcessChangedFiles(t *testing.T) { testCases := []struct { - name string - files []string - expectedAddedOrModifiedConnectors map[string]map[string]string + name string + changedFiles ChangedFiles + expected ProcessedChangedFiles }{ { - name: "Test case 1", - files: []string{ - "registry/hasura/releases/v1.0.0/connector-packaging.json", - "registry/hasura/releases/v2.0.0/connector-packaging.json", - "registry/other/releases/v1.0.0/connector-packaging.json", + name: "New connector added", + changedFiles: ChangedFiles{ + Added: []string{"registry/namespace1/connector1/metadata.json"}, }, - expectedAddedOrModifiedConnectors: map[string]map[string]string{ - "hasura": { - "v1.0.0": "registry/hasura/releases/v1.0.0/connector-packaging.json", - "v2.0.0": "registry/hasura/releases/v2.0.0/connector-packaging.json", - }, - "other": { - "v1.0.0": "registry/other/releases/v1.0.0/connector-packaging.json", - }, + expected: ProcessedChangedFiles{ + NewConnectorVersions: map[Connector]map[string]string{}, + ModifiedLogos: map[Connector]string{}, + ModifiedReadmes: map[Connector]string{}, + NewConnectors: map[Connector]MetadataFile{{Name: "connector1", Namespace: "namespace1"}: "registry/namespace1/connector1/metadata.json"}, + NewLogos: map[Connector]string{}, + NewReadmes: map[Connector]string{}, }, }, { - name: "Test case 2", - files: []string{ - "registry/hasura/releases/v1.0.0/connector-packaging.json", - "registry/hasura/releases/v1.0.0/other-file.json", - }, - expectedAddedOrModifiedConnectors: map[string]map[string]string{ - "hasura": { - "v1.0.0": "registry/hasura/releases/v1.0.0/connector-packaging.json", + name: "Modified logo and README", + changedFiles: ChangedFiles{ + Modified: []string{ + "registry/namespace1/connector1/logo.png", + "registry/namespace1/connector1/README.md", }, }, - }, - { - name: "Test case 3", - files: []string{ - "registry/hasura/releases/v1.0.0/other-file.json", - "registry/other/releases/v1.0.0/connector-packaging.json", - }, - expectedAddedOrModifiedConnectors: map[string]map[string]string{ - "other": { - "v1.0.0": "registry/other/releases/v1.0.0/connector-packaging.json", - }, + expected: ProcessedChangedFiles{ + NewConnectorVersions: map[Connector]map[string]string{}, + ModifiedLogos: map[Connector]string{{Name: "connector1", Namespace: "namespace1"}: "registry/namespace1/connector1/logo.png"}, + ModifiedReadmes: map[Connector]string{{Name: "connector1", Namespace: "namespace1"}: "registry/namespace1/connector1/README.md"}, + NewConnectors: map[Connector]MetadataFile{}, + NewLogos: map[Connector]string{}, + NewReadmes: map[Connector]string{}, }, }, } - // Run the test cases for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - // Initialize the map to store the added or modified connectors - addedOrModifiedConnectorVersions := make(map[string]map[string]string) - - var changedFiles ChangedFiles - - changedFiles.Added = tc.files - - // Call the function under test - processChangedFiles(changedFiles) - - // Compare the actual result with the expected result - if len(addedOrModifiedConnectorVersions) != len(tc.expectedAddedOrModifiedConnectors) { - t.Errorf("Unexpected number of connectors. Expected: %d, Got: %d", len(tc.expectedAddedOrModifiedConnectors), len(addedOrModifiedConnectorVersions)) - } - - for connectorName, versions := range addedOrModifiedConnectorVersions { - expectedVersions, ok := tc.expectedAddedOrModifiedConnectors[connectorName] - if !ok { - t.Errorf("Unexpected connector name: %s", connectorName) - continue - } - - if len(versions) != len(expectedVersions) { - t.Errorf("Unexpected number of versions for connector %s. Expected: %d, Got: %d", connectorName, len(expectedVersions), len(versions)) - } - - for version, connectorVersionPath := range versions { - expectedPath, ok := expectedVersions[version] - if !ok { - t.Errorf("Unexpected version for connector %s: %s", connectorName, version) - continue - } - - if connectorVersionPath != expectedPath { - t.Errorf("Unexpected connector version path for connector %s, version %s. Expected: %s, Got: %s", connectorName, version, expectedPath, connectorVersionPath) - } - } - } + result := processChangedFiles(tc.changedFiles) + assert.Equal(t, tc.expected, result) }) } } + +// func TestProcessNewConnector(t *testing.T) { +// ctx := createTestContext() +// connector := Connector{Name: "testconnector", Namespace: "testnamespace"} + +// // Create a temporary directory for our test files +// tempDir, err := os.MkdirTemp("", "connector-test") +// assert.NoError(t, err) +// defer os.RemoveAll(tempDir) // Clean up after the test + +// // Set up the directory structure +// registryDir := filepath.Join(tempDir, "registry", connector.Namespace, connector.Name) +// err = os.MkdirAll(registryDir, 0755) +// assert.NoError(t, err) + +// // Create the metadata file +// metadataFile := filepath.Join(registryDir, "metadata.json") +// tempMetadata := []byte(`{"overview": {"title": "Test Connector", "description": "A test connector"}, "isVerified": true, "isHostedByHasura": false, "author": {"name": "Test Author", "supportEmail": "support@test.com", "homepage": "https://test.com"}}`) +// err = os.WriteFile(metadataFile, tempMetadata, 0666) +// assert.NoError(t, err) + +// // Create the README file +// readmeFile := filepath.Join(registryDir, "README.md") +// err = os.WriteFile(readmeFile, []byte("# Test Connector"), 0644) +// assert.NoError(t, err) + +// // Mock the necessary functions and API calls +// mockCloudinaryUploader := &MockCloudinaryUploader{} +// mockCloudinaryUploader.On("Upload", mock.Anything, mock.Anything, mock.Anything).Return(&uploader.UploadResult{SecureURL: "https://res.cloudinary.com/demo/image/upload/logo.png"}, nil) + +// mockGraphQLClient := ctx.RegistryGQLClient.(*MockGraphQLClient) +// mockGraphQLClient.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(nil) + +// // Run the function +// connectorOverviewInsert, hubRegistryConnectorInsert, err := processNewConnector(ctx, connector, MetadataFile(metadataFile)) + +// // Assert the results +// assert.NoError(t, err) +// assert.Equal(t, "testconnector", connectorOverviewInsert.Name) +// assert.Equal(t, "testnamespace", connectorOverviewInsert.Namespace) +// assert.Equal(t, "Test Connector", connectorOverviewInsert.Title) +// assert.Equal(t, "A test connector", connectorOverviewInsert.Description) +// assert.True(t, connectorOverviewInsert.IsVerified) +// assert.False(t, connectorOverviewInsert.IsHosted) +// assert.Equal(t, "Test Author", connectorOverviewInsert.Author.Data.Name) +// assert.Equal(t, "support@test.com", connectorOverviewInsert.Author.Data.SupportEmail) +// assert.Equal(t, "https://test.com", connectorOverviewInsert.Author.Data.Website) + +// assert.Equal(t, "testconnector", hubRegistryConnectorInsert.Name) +// assert.Equal(t, "testnamespace", hubRegistryConnectorInsert.Namespace) +// assert.Equal(t, "Test Connector", hubRegistryConnectorInsert.Title) + +// mockCloudinaryUploader.AssertExpectations(t) +// mockGraphQLClient.AssertExpectations(t) +// } + +// // Test uploadConnectorVersionPackage +// func TestUploadConnectorVersionPackage(t *testing.T) { +// ctx := createTestContext() +// connector := Connector{Name: "testconnector", Namespace: "testnamespace"} +// version := "v1.0.0" +// changedConnectorVersionPath := "registry/testnamespace/testconnector/releases/v1.0.0/connector-packaging.json" +// isNewConnector := true + +// // Mock necessary functions +// mockStorageClient := ctx.StorageClient.(*MockStorageClient) +// mockStorageClient.On("Bucket", mock.Anything).Return(&storage.BucketHandle{}) + +// mockGraphQLClient := ctx.RegistryGQLClient.(*MockGraphQLClient) +// mockGraphQLClient.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(nil) + +// // Create temporary files +// err := os.MkdirAll("registry/testnamespace/testconnector/releases/v1.0.0", 0755) +// assert.NoError(t, err) +// defer os.RemoveAll("registry/testnamespace/testconnector") + +// packagingContent := []byte(`{"uri": "https://example.com/testconnector-v1.0.0.tgz"}`) +// err = os.WriteFile(changedConnectorVersionPath, packagingContent, 0644) +// assert.NoError(t, err) + +// // Run the function +// connectorVersion, err := uploadConnectorVersionPackage(ctx, connector, version, changedConnectorVersionPath, isNewConnector) + +// // Assert the results +// assert.NoError(t, err) +// assert.Equal(t, "testconnector", connectorVersion.Name) +// assert.Equal(t, "testnamespace", connectorVersion.Namespace) +// assert.Equal(t, "v1.0.0", connectorVersion.Version) + +// mockStorageClient.AssertExpectations(t) +// mockGraphQLClient.AssertExpectations(t) +// } + +// // Test buildRegistryPayload +// func TestBuildRegistryPayload(t *testing.T) { +// ctx := createTestContext() +// connectorNamespace := "testnamespace" +// connectorName := "testconnector" +// version := "v1.0.0" +// connectorVersionMetadata := map[string]interface{}{ +// "packagingDefinition": map[string]interface{}{ +// "type": "ManagedDockerBuild", +// }, +// } +// uploadedConnectorDefinitionTgzUrl := "https://example.com/test.tgz" +// isNewConnector := true + +// // Mock the GraphQL client +// mockGraphQLClient := ctx.RegistryGQLClient.(*MockGraphQLClient) +// mockGraphQLClient.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(nil) + +// // Run the function +// connectorVersion, err := buildRegistryPayload(ctx, connectorNamespace, connectorName, version, connectorVersionMetadata, uploadedConnectorDefinitionTgzUrl, isNewConnector) + +// // Assert the results +// assert.NoError(t, err) +// assert.Equal(t, connectorNamespace, connectorVersion.Namespace) +// assert.Equal(t, connectorName, connectorVersion.Name) +// assert.Equal(t, version, connectorVersion.Version) +// assert.Equal(t, uploadedConnectorDefinitionTgzUrl, connectorVersion.PackageDefinitionURL) +// assert.Equal(t, "ManagedDockerBuild", connectorVersion.Type) +// assert.False(t, connectorVersion.IsMultitenant) +// assert.Nil(t, connectorVersion.Image) + +// mockGraphQLClient.AssertExpectations(t) +// } diff --git a/registry-automation/cmd/gcp.go b/registry-automation/cmd/gcp.go index 40d41d62..4896a92d 100644 --- a/registry-automation/cmd/gcp.go +++ b/registry-automation/cmd/gcp.go @@ -2,7 +2,6 @@ package cmd import ( - "cloud.google.com/go/storage" "context" "fmt" "io" @@ -10,7 +9,7 @@ import ( ) // deleteFile deletes a file from Google Cloud Storage -func deleteFile(client *storage.Client, bucketName, objectName string) error { +func deleteFile(client StorageClientInterface, bucketName, objectName string) error { bucket := client.Bucket(bucketName) object := bucket.Object(objectName) @@ -19,7 +18,7 @@ func deleteFile(client *storage.Client, bucketName, objectName string) error { // uploadFile uploads a file to Google Cloud Storage // document this function with comments -func uploadFile(client *storage.Client, bucketName, objectName, filePath string) (string, error) { +func uploadFile(client StorageClientInterface, bucketName, objectName, filePath string) (string, error) { bucket := client.Bucket(bucketName) object := bucket.Object(objectName) newCtx := context.Background() diff --git a/registry-automation/cmd/registryDb.go b/registry-automation/cmd/registryDb.go new file mode 100644 index 00000000..bad68d36 --- /dev/null +++ b/registry-automation/cmd/registryDb.go @@ -0,0 +1,283 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/machinebox/graphql" +) + +type HubRegistryConnectorInsertInput struct { + Name string `json:"name"` + Title string `json:"title"` + Namespace string `json:"namespace"` +} + +type NewConnectorsInsertInput struct { + HubRegistryConnectors []HubRegistryConnectorInsertInput `json:"hub_registry_connectors"` + ConnectorOverviews []ConnectorOverviewInsert `json:"connector_overviews"` +} + +// struct to store the response of teh GetConnectorInfo query +type GetConnectorInfoResponse struct { + HubRegistryConnector []struct { + Name string `json:"name"` + MultitenantConnector *struct { + ID string `json:"id"` + } `json:"multitenant_connector"` + } `json:"hub_registry_connector"` +} + +func insertHubRegistryConnector(client graphql.Client, newConnectors NewConnectorsInsertInput) error { + var respData map[string]interface{} + + ctx := context.Background() + + req := graphql.NewRequest(` +mutation InsertHubRegistryConnector ($hub_registry_connectors:[hub_registry_connector_insert_input!]!, $connector_overview_objects: [connector_overview_insert_input!]!){ + + insert_hub_registry_connector(objects: $hub_registry_connectors) { +affected_rows + } + insert_connector_overview(objects: $connector_overview_objects) { + affected_rows + } +} +`) + + // add the payload to the request + req.Var("hub_registry_connectors", newConnectors.HubRegistryConnectors) + req.Var("connectors_overviews", newConnectors.ConnectorOverviews) + + // set the headers + req.Header.Set("x-hasura-role", "connector_publishing_automation") + req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) + + // Execute the GraphQL query and check the response. + if err := client.Run(ctx, req, &respData); err != nil { + return err + } else { + connectorNames := make([]string, 0) + for _, connector := range newConnectors.HubRegistryConnectors { + connectorNames = append(connectorNames, fmt.Sprintf("%s/%s", connector.Namespace, connector.Name)) + } + fmt.Printf("Successfully inserted the following connectors in the registry: %+v\n", connectorNames) + } + + return nil +} + +func getConnectorInfoFromRegistry(client GraphQLClientInterface, connectorNamespace string, connectorName string) (GetConnectorInfoResponse, error) { + var respData GetConnectorInfoResponse + + ctx := context.Background() + + req := graphql.NewRequest(` +query GetConnectorInfo ($name: String!, $namespace: String!) { + hub_registry_connector(where: {_and: [{name: {_eq: $name}}, {namespace: {_eq: $namespace}}]}) { + name + multitenant_connector { + id + } + } +}`) + req.Var("name", connectorName) + req.Var("namespace", connectorNamespace) + + req.Header.Set("x-hasura-role", "connector_publishing_automation") + req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) + + // Execute the GraphQL query and check the response. + if err := client.Run(ctx, req, &respData); err != nil { + return respData, err + } else { + if len(respData.HubRegistryConnector) == 0 { + return respData, nil + } + } + + return respData, nil +} + +func updateRegistryGQL(client graphql.Client, payload []ConnectorVersion) error { + var respData map[string]interface{} + + ctx := context.Background() + + req := graphql.NewRequest(` +mutation InsertConnectorVersion($connectorVersion: [hub_registry_connector_version_insert_input!]!) { + insert_hub_registry_connector_version(objects: $connectorVersion, on_conflict: {constraint: connector_version_namespace_name_version_key, update_columns: [image, package_definition_url, is_multitenant]}) { + affected_rows + returning { + id + } + } +}`) + // add the payload to the request + req.Var("connectorVersion", payload) + + req.Header.Set("x-hasura-role", "connector_publishing_automation") + req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) + + // Execute the GraphQL query and check the response. + if err := client.Run(ctx, req, &respData); err != nil { + return err + } + + return nil +} + +func updateConnectorOverview(updates ConnectorOverviewUpdates) error { + var respData map[string]interface{} + client := graphql.NewClient(ciCmdArgs.ConnectorRegistryGQLUrl) + ctx := context.Background() + + req := graphql.NewRequest(` +mutation UpdateConnector ($updates: [connector_overview_updates!]!) { + update_connector_overview_many(updates: $updates) { + affected_rows + } +}`) + + // add the payload to the request + req.Var("updates", updates.Updates) + + req.Header.Set("x-hasura-role", "connector_publishing_automation") + req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) + + // Execute the GraphQL query and check the response. + if err := client.Run(ctx, req, &respData); err != nil { + return err + } else { + fmt.Printf("Successfully updated the connector overview: %+v\n", respData) + } + + return nil +} + +type ConnectorAuthorNestedInsertOnConflict struct { + Constraint string `json:"constraint"` + UpdateCols []string `json:"update_columns,omitempty"` +} + +type ConnectorAuthorNestedInsert struct { + Data ConnectorAuthor `json:"data"` + OnConflict *ConnectorAuthorNestedInsertOnConflict `json:"on_conflict,omitempty"` +} + +type ConnectorOverviewInsert struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + Logo string `json:"logo"` + Docs string `json:"docs"` + IsVerified bool `json:"is_verified"` + IsHosted bool `json:"is_hosted_by_hasura"` + Author ConnectorAuthorNestedInsert `json:"author"` +} + +type ConnectorAuthor struct { + Name string `json:"name"` + SupportEmail string `json:"support_email"` + Website string `json:"website"` +} + +// registryDbMutation is a function to insert data into the registry database, all the mutations are done in a single transaction. +func registryDbMutation(client GraphQLClientInterface, newConnectors NewConnectorsInsertInput, connectorOverviewUpdates []ConnectorOverviewUpdate, connectorVersionInserts []ConnectorVersion) error { + var respData map[string]interface{} + ctx := context.Background() + mutationQuery := ` +mutation HubRegistryMutationRequest ( + $hub_registry_connectors:[hub_registry_connector_insert_input!]!, + $connector_overview_inserts: [connector_overview_insert_input!]!, + $connector_overview_updates: [connector_overview_updates!]!, + $connector_version_inserts: [hub_registry_connector_version_insert_input!]! +){ + + insert_hub_registry_connector(objects: $hub_registry_connectors) { +affected_rows + } + insert_connector_overview(objects: $connector_overview_inserts) { + affected_rows + } + insert_hub_registry_connector_version(objects: $connector_version_inserts, on_conflict: {constraint: connector_version_namespace_name_version_key, update_columns: [image, package_definition_url, is_multitenant]}) { + affected_rows + } + + update_connector_overview_many(updates: $connector_overview_updates) { + affected_rows + } +} +` + req := graphql.NewRequest(mutationQuery) + req.Var("hub_registry_connectors", newConnectors.HubRegistryConnectors) + req.Var("connector_overview_inserts", newConnectors.ConnectorOverviews) + req.Var("connector_overview_updates", connectorOverviewUpdates) + req.Var("connector_version_inserts", connectorVersionInserts) + + req.Header.Set("x-hasura-role", "connector_publishing_automation") + req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) + + // Execute the GraphQL query and check the response. + if err := client.Run(ctx, req, &respData); err != nil { + return err + } + + return nil + +} + +// registryDbMutation is a function to insert data into the registry database, all the mutations are done in a single transaction. +func registryDbMutationStaging(client GraphQLClientInterface, newConnectors NewConnectorsInsertInput, connectorOverviewUpdates []ConnectorOverviewUpdate, connectorVersionInserts []ConnectorVersion) error { + var respData map[string]interface{} + ctx := context.Background() + mutationQuery := ` +mutation HubRegistryMutationRequest ( + $hub_registry_connectors:[hub_registry_connector_insert_input!]!, + $connector_overview_inserts: [connector_overview_insert_input!]!, + $connector_overview_updates: [connector_overview_updates!]!, + $connector_version_inserts: [hub_registry_connector_version_insert_input!]! +){ + + insert_hub_registry_connector(objects: $hub_registry_connectors, on_conflict: {constraint: connector_pkey}) { +affected_rows + } + insert_connector_overview(objects: $connector_overview_inserts, on_conflict: {constraint: connector_overview_pkey, update_columns: [docs, logo]}) { + affected_rows + } + insert_hub_registry_connector_version(objects: $connector_version_inserts, on_conflict: {constraint: connector_version_namespace_name_version_key, update_columns: [image, package_definition_url, is_multitenant]}) { + affected_rows + } + + update_connector_overview_many(updates: $connector_overview_updates) { + affected_rows + } +} +` + + // update newConnectors.ConnectorOverviews to have on_conflict + for i := range newConnectors.ConnectorOverviews { + newConnectors.ConnectorOverviews[i].Author.OnConflict = &ConnectorAuthorNestedInsertOnConflict{ + Constraint: "connector_author_connector_title_key", + UpdateCols: []string{}, + } + } + + req := graphql.NewRequest(mutationQuery) + req.Var("hub_registry_connectors", newConnectors.HubRegistryConnectors) + req.Var("connector_overview_inserts", newConnectors.ConnectorOverviews) + req.Var("connector_overview_updates", connectorOverviewUpdates) + req.Var("connector_version_inserts", connectorVersionInserts) + + req.Header.Set("x-hasura-role", "connector_publishing_automation") + req.Header.Set("x-connector-publication-key", ciCmdArgs.ConnectorPublicationKey) + + // Execute the GraphQL query and check the response. + if err := client.Run(ctx, req, &respData); err != nil { + return err + } + + return nil + +} diff --git a/registry-automation/cmd/types.go b/registry-automation/cmd/types.go new file mode 100644 index 00000000..ae51bbf6 --- /dev/null +++ b/registry-automation/cmd/types.go @@ -0,0 +1,185 @@ +package cmd + +import ( + "cloud.google.com/go/storage" + "context" + "encoding/json" + "github.com/cloudinary/cloudinary-go/v2" + "github.com/cloudinary/cloudinary-go/v2/api/uploader" + "github.com/machinebox/graphql" +) + +type ChangedFiles struct { + Added []string `json:"added_files"` + Modified []string `json:"modified_files"` + Deleted []string `json:"deleted_files"` +} + +// ConnectorVersion represents a version of a connector, this type is +// used to insert a new version of a connector in the registry. +type ConnectorVersion struct { + // Namespace of the connector, e.g. "hasura" + Namespace string `json:"namespace"` + // Name of the connector, e.g. "mongodb" + Name string `json:"name"` + // Semantic version of the connector version, e.g. "v1.0.0" + Version string `json:"version"` + // Docker image of the connector version (optional) + // This field is only required if the connector version is of type `PrebuiltDockerImage` + Image *string `json:"image,omitempty"` + // URL to the connector's metadata + PackageDefinitionURL string `json:"package_definition_url"` + // Is the connector version multitenant? + IsMultitenant bool `json:"is_multitenant"` + // Type of the connector packaging `PrebuiltDockerImage`/`ManagedDockerBuild` + Type string `json:"type"` +} + +// Create a struct with the following fields: +// type string +// image *string (optional) +type ConnectionVersionMetadata struct { + Type string `yaml:"type"` + Image *string `yaml:"image,omitempty"` +} + +type WhereClause struct { + ConnectorName string + ConnectorNamespace string +} + +func (wc WhereClause) MarshalJSON() ([]byte, error) { + where := map[string]interface{}{ + "_and": []map[string]interface{}{ + {"name": map[string]string{"_eq": wc.ConnectorName}}, + {"namespace": map[string]string{"_eq": wc.ConnectorNamespace}}, + }, + } + return json.Marshal(where) +} + +type ConnectorOverviewUpdate struct { + Set struct { + Docs *string `json:"docs,omitempty"` + Logo *string `json:"logo,omitempty"` + } `json:"_set"` + Where WhereClause `json:"where"` +} + +type ConnectorOverviewUpdates struct { + Updates []ConnectorOverviewUpdate `json:"updates"` +} + +const ( + ManagedDockerBuild = "ManagedDockerBuild" + PrebuiltDockerImage = "PrebuiltDockerImage" +) + +// Type to represent the metadata.json file +type ConnectorMetadata struct { + Overview struct { + Namespace string `json:"namespace"` + Description string `json:"description"` + Title string `json:"title"` + Logo string `json:"logo"` + Tags []string `json:"tags"` + LatestVersion string `json:"latest_version"` + } `json:"overview"` + Author struct { + SupportEmail string `json:"support_email"` + Homepage string `json:"homepage"` + Name string `json:"name"` + } `json:"author"` + + IsVerified bool `json:"is_verified"` + IsHostedByHasura bool `json:"is_hosted_by_hasura"` + HasuraHubConnector struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + } `json:"hasura_hub_connector"` + SourceCode struct { + IsOpenSource bool `json:"is_open_source"` + Repository string `json:"repository"` + } `json:"source_code"` +} + +// Make a struct with the fields expected in the command line arguments +type ConnectorRegistryArgs struct { + ChangedFilesPath string + PublicationEnv string + ConnectorRegistryGQLUrl string + ConnectorPublicationKey string + GCPServiceAccountDetails string + GCPBucketName string + CloudinaryUrl string +} + +type MetadataFile string + +type NewConnectors map[Connector]MetadataFile + +type ProcessedChangedFiles struct { + NewConnectorVersions NewConnectorVersions + ModifiedLogos ModifiedLogos + ModifiedReadmes ModifiedReadmes + NewConnectors NewConnectors + NewLogos NewLogos + NewReadmes NewReadmes +} + +type GraphQLClientInterface interface { + Run(ctx context.Context, req *graphql.Request, resp interface{}) error +} + +type StorageClientWrapper struct { + *storage.Client +} + +func (s *StorageClientWrapper) Bucket(name string) *storage.BucketHandle { + return s.Client.Bucket(name) +} + +type StorageClientInterface interface { + Bucket(name string) *storage.BucketHandle +} + +type CloudinaryInterface interface { + Upload(ctx context.Context, file interface{}, uploadParams uploader.UploadParams) (*uploader.UploadResult, error) +} + +type CloudinaryWrapper struct { + *cloudinary.Cloudinary +} + +func (c *CloudinaryWrapper) Upload(ctx context.Context, file interface{}, uploadParams uploader.UploadParams) (*uploader.UploadResult, error) { + return c.Cloudinary.Upload.Upload(ctx, file, uploadParams) +} + +// + +type Context struct { + Env string + RegistryGQLClient GraphQLClientInterface + StorageClient StorageClientInterface + Cloudinary CloudinaryInterface +} + +// Type that uniquely identifies a connector +type Connector struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +type NewConnectorVersions map[Connector]map[string]string + +// ModifiedLogos represents the modified logos in the PR, the key is the connector name and the value is the path to the modified logo +type ModifiedLogos map[Connector]string + +// ModifiedReadmes represents the modified READMEs in the PR, the key is the connector name and the value is the path to the modified README +type ModifiedReadmes map[Connector]string + +// ModifiedLogos represents the modified logos in the PR, the key is the connector name and the value is the path to the modified logo +type NewLogos map[Connector]string + +// ModifiedReadmes represents the modified READMEs in the PR, the key is the connector name and the value is the path to the modified README +type NewReadmes map[Connector]string diff --git a/registry-automation/cmd/utils.go b/registry-automation/cmd/utils.go index 45af32a3..178ec188 100644 --- a/registry-automation/cmd/utils.go +++ b/registry-automation/cmd/utils.go @@ -3,6 +3,7 @@ package cmd import ( "encoding/json" "fmt" + "gopkg.in/yaml.v2" "io" "net/http" "os" @@ -120,3 +121,30 @@ func extractTarGz(src, dest string) (string, error) { return fmt.Sprintf("%s/.hasura-connector/connector-metadata.yaml", filepath), nil } + +// Write a function that accepts a file path to a YAML file and returns +// the contents of the file as a map[string]interface{}. +// readYAMLFile accepts a file path to a YAML file and returns the contents of the file as a map[string]interface{}. +func readYAMLFile(filePath string) (map[string]interface{}, error) { + // Open the file + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + // Read the file contents + data, err := io.ReadAll(file) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + // Unmarshal the YAML contents into a map + var result map[string]interface{} + err = yaml.Unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) + } + + return result, nil +} diff --git a/registry-automation/go.mod b/registry-automation/go.mod index e2eca410..2c03147d 100644 --- a/registry-automation/go.mod +++ b/registry-automation/go.mod @@ -5,12 +5,17 @@ go 1.21.4 require ( github.com/cloudinary/cloudinary-go/v2 v2.8.0 github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.9.0 ) require ( github.com/creasty/defaults v1.7.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/gorilla/schema v1.4.1 // indirect github.com/matryer/is v1.4.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( diff --git a/registry-automation/go.sum b/registry-automation/go.sum index c06e0b27..f44c7142 100644 --- a/registry-automation/go.sum +++ b/registry-automation/go.sum @@ -92,6 +92,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= diff --git a/registry/hasura/testnewconnectornegativecase/logo.png b/registry/hasura/testnewconnectornegativecase/logo.png new file mode 100644 index 00000000..06b4a258 Binary files /dev/null and b/registry/hasura/testnewconnectornegativecase/logo.png differ diff --git a/registry/hasura/testnewconnectornegativecase/metadata.json b/registry/hasura/testnewconnectornegativecase/metadata.json new file mode 100644 index 00000000..8cc41973 --- /dev/null +++ b/registry/hasura/testnewconnectornegativecase/metadata.json @@ -0,0 +1,73 @@ +{ + "overview": { + "namespace": "hasura", + "description": "Connect to a SQL Server database and expose it to Hasura v3 Project", + "title": "Test New Connector Without Readme or logo", + "logo": "logo.png", + "tags": ["database"], + "latest_version": "v0.2.1" + }, + "author": { + "support_email": "support@hasura.io", + "homepage": "https://hasura.io", + "name": "Hasura" + }, + "is_verified": true, + "is_hosted_by_hasura": true, + "packages": [ + { + "version": "0.1.2", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.2/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "102c642b2e0ddea1eaa471c5189ecd3423a20f91ad83995e09f9d4721dd85732" + }, + "source": { + "hash": "bc0fd3d126f6c142587e014aa900fc6bc90cd59d" + } + }, + { + "version": "0.1.1", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.1/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "340f11a2dbc180af838327c09949ac0dc14c94eb87b0d6b5a28c765ec928b1a9" + }, + "source": { + "hash": "638a2b608f7a9c4625de7df35c61c909d2ce16b1" + } + }, + { + "version": "0.1.0", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.0/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "5f47a1df3055b694ffaf13058e201ac0fa83db53ce2044cd15eeaaa841565cb4" + }, + "source": { + "hash": "e26d6bd1a22540dcf5c5b29460260c2d23ff2657" + } + } + ], + "source_code": { + "is_open_source": true, + "repository": "https://github.com/hasura/ndc-sqlserver/", + "version": [ + { + "tag": "v0.1.2", + "hash": "bc0fd3d126f6c142587e014aa900fc6bc90cd59d", + "is_verified": true + }, + { + "tag": "v0.1.1", + "hash": "638a2b608f7a9c4625de7df35c61c909d2ce16b1", + "is_verified": true + }, + { + "tag": "v0.1.0", + "hash": "e26d6bd1a22540dcf5c5b29460260c2d23ff2657", + "is_verified": true + } + ] + } +} diff --git a/registry/hasura/testnewconnectornegativecase/releases/v0.1.0/connector-packaging.json b/registry/hasura/testnewconnectornegativecase/releases/v0.1.0/connector-packaging.json new file mode 100644 index 00000000..f0200516 --- /dev/null +++ b/registry/hasura/testnewconnectornegativecase/releases/v0.1.0/connector-packaging.json @@ -0,0 +1,11 @@ +{ + "version": "0.1.0", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.0/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "5f47a1df3055b694ffaf13058e201ac0fa83db53ce2044cd15eeaaa841565cb4" + }, + "source": { + "hash": "e26d6bd1a22540dcf5c5b29460260c2d23ff2657" + } +} diff --git a/registry/hasura/testnewconnectorrelease/README.md b/registry/hasura/testnewconnectorrelease/README.md new file mode 100644 index 00000000..eb08aa6f --- /dev/null +++ b/registry/hasura/testnewconnectorrelease/README.md @@ -0,0 +1,112 @@ +# New Connector + +[![Docs](https://img.shields.io/badge/docs-v3.x-brightgreen.svg?style=flat)](https://hasura.io/docs/3.0) +[![ndc-hub](https://img.shields.io/badge/ndc--hub-sqlserver-blue.svg?style=flat)](https://hasura.io/connectors/sqlserver) +[![License](https://img.shields.io/badge/license-Apache--2.0-purple.svg?style=flat)](LICENSE.txt) +[![Status](https://img.shields.io/badge/status-alpha-yellow.svg?style=flat)](./readme.md) + +With this connector, Hasura allows you to instantly create a real-time GraphQL API on top of your data models in +Microsoft SQL Server. This connector supports SQL Server's functionalities listed in the table below, allowing for +efficient and scalable data operations. Additionally, users benefit from all the powerful features of Hasura’s Data +Delivery Network (DDN) platform, including query pushdown capabilities that delegate query operations to the database, +thereby enhancing query optimization and performance. + +This connector is built using the [Rust Data Connector SDK](https://github.com/hasura/ndc-hub#rusk-sdk) and implements +the [Data Connector Spec](https://github.com/hasura/ndc-spec). + +- [See the listing in the Hasura Hub](https://hasura.io/connectors/sqlserver) +- [Hasura V3 Documentation](https://hasura.io/docs/3.0/) + +## Features + +Below, you'll find a matrix of all supported features for the SQL Server connector: + +| Feature | Supported | Notes | +|---------------------------------|-----------|--------------------------------------| +| Native Queries + Logical Models | ✅ | | +| Native Mutations | ✅ | | +| Simple Object Query | ✅ | | +| Filter / Search | ✅ | | +| Simple Aggregation | ✅ | | +| Sort | ✅ | | +| Paginate | ✅ | | +| Table Relationships | ✅ | | +| Views | ✅ | | +| Remote Relationships | ✅ | | +| Stored Procedures | ✅ | | +| Custom Fields | ❌ | | +| Mutations | ❌ | Only native mutations are suppported | +| Distinct | ✅ | | +| Enums | ❌ | | +| Naming Conventions | ❌ | | +| Default Values | ❌ | | +| User-defined Functions | ❌ | | + +## Before you get Started + +1. Create a [Hasura Cloud account](https://console.hasura.io) +2. Please ensure you have the [DDN CLI](https://hasura.io/docs/3.0/cli/installation) and [Docker](https://docs.docker.com/engine/install/) installed +3. [Create a supergraph](https://hasura.io/docs/3.0/getting-started/init-supergraph) +4. [Create a subgraph](https://hasura.io/docs/3.0/getting-started/init-subgraph) + +The steps below explain how to Initialize and configure a connector for local development. You can learn how to deploy a +connector — after it's been configured — [here](https://hasura.io/docs/3.0/getting-started/deployment/deploy-a-connector). + +## Using the SQL Server connector + +### Step 1: Authenticate your CLI session + +```bash +ddn auth login +``` + +### Step 2: Configure the connector + +Once you have an initialized supergraph and subgraph, run the initialization command in interactive mode while +providing a name for the connector in the prompt: + +```bash +ddn connector init -i +``` + +#### Step 2.1: Choose the `hasura/sqlserver` from the list + +#### Step 2.2: Choose a port for the connector + +The CLI will ask for a specific port to run the connector on. Choose a port that is not already in use or use the +default suggested port. + +#### Step 2.3: Provide the env vars for the connector + +| Name | Description | Required | Default | +|----------------|--------------------------------------------------|----------|---------| +| CONNECTION_URI | The connection string of the SQL Server database | Yes | N/A | + +## Step 3: Introspect the connector + +```bash +ddn connector introspect +``` + +This will generate a `configuration.json` file that will have the schema of your SQL Server database. + +## Step 4: Add your resources + +```bash +ddn connector-link add-resources +``` + +This command will track all the containers in your SQL Server DB as [Models](https://hasura.io/docs/3.0/supergraph-modeling/models). + +## Documentation + +View the full documentation for the ndc-sqlserver connector [here](./docs/readme.md). + +## Contributing + +We're happy to receive any contributions from the community. Please refer to our [development guide](./docs/development.md). + +## License + +The Hasura SQL Server connector is available under the [Apache License +2.0](https://www.apache.org/licenses/LICENSE-2.0). diff --git a/registry/hasura/testnewconnectorrelease/logo.png b/registry/hasura/testnewconnectorrelease/logo.png new file mode 100644 index 00000000..06b4a258 Binary files /dev/null and b/registry/hasura/testnewconnectorrelease/logo.png differ diff --git a/registry/hasura/testnewconnectorrelease/metadata.json b/registry/hasura/testnewconnectorrelease/metadata.json new file mode 100644 index 00000000..423ce946 --- /dev/null +++ b/registry/hasura/testnewconnectorrelease/metadata.json @@ -0,0 +1,73 @@ +{ + "overview": { + "namespace": "hasura", + "description": "Connect to a SQL Server database and expose it to Hasura v3 Project", + "title": "Test New Connector First Release", + "logo": "logo.png", + "tags": ["database"], + "latest_version": "v0.2.1" + }, + "author": { + "support_email": "support@hasura.io", + "homepage": "https://hasura.io", + "name": "Hasura" + }, + "is_verified": true, + "is_hosted_by_hasura": true, + "packages": [ + { + "version": "0.1.2", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.2/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "102c642b2e0ddea1eaa471c5189ecd3423a20f91ad83995e09f9d4721dd85732" + }, + "source": { + "hash": "bc0fd3d126f6c142587e014aa900fc6bc90cd59d" + } + }, + { + "version": "0.1.1", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.1/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "340f11a2dbc180af838327c09949ac0dc14c94eb87b0d6b5a28c765ec928b1a9" + }, + "source": { + "hash": "638a2b608f7a9c4625de7df35c61c909d2ce16b1" + } + }, + { + "version": "0.1.0", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.0/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "5f47a1df3055b694ffaf13058e201ac0fa83db53ce2044cd15eeaaa841565cb4" + }, + "source": { + "hash": "e26d6bd1a22540dcf5c5b29460260c2d23ff2657" + } + } + ], + "source_code": { + "is_open_source": true, + "repository": "https://github.com/hasura/ndc-sqlserver/", + "version": [ + { + "tag": "v0.1.2", + "hash": "bc0fd3d126f6c142587e014aa900fc6bc90cd59d", + "is_verified": true + }, + { + "tag": "v0.1.1", + "hash": "638a2b608f7a9c4625de7df35c61c909d2ce16b1", + "is_verified": true + }, + { + "tag": "v0.1.0", + "hash": "e26d6bd1a22540dcf5c5b29460260c2d23ff2657", + "is_verified": true + } + ] + } +} diff --git a/registry/hasura/testnewconnectorrelease/releases/v0.1.0/connector-packaging.json b/registry/hasura/testnewconnectorrelease/releases/v0.1.0/connector-packaging.json new file mode 100644 index 00000000..f0200516 --- /dev/null +++ b/registry/hasura/testnewconnectorrelease/releases/v0.1.0/connector-packaging.json @@ -0,0 +1,11 @@ +{ + "version": "0.1.0", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.0/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "5f47a1df3055b694ffaf13058e201ac0fa83db53ce2044cd15eeaaa841565cb4" + }, + "source": { + "hash": "e26d6bd1a22540dcf5c5b29460260c2d23ff2657" + } +} diff --git a/registry/hasura/testnewconnectorrelease/releases/v0.1.1/connector-packaging.json b/registry/hasura/testnewconnectorrelease/releases/v0.1.1/connector-packaging.json new file mode 100644 index 00000000..d1836ef5 --- /dev/null +++ b/registry/hasura/testnewconnectorrelease/releases/v0.1.1/connector-packaging.json @@ -0,0 +1,11 @@ +{ + "version": "0.1.1", + "uri": "https://github.com/hasura/ndc-sqlserver/releases/download/v0.1.1/package.tar.gz", + "checksum": { + "type": "sha256", + "value": "340f11a2dbc180af838327c09949ac0dc14c94eb87b0d6b5a28c765ec928b1a9" + }, + "source": { + "hash": "638a2b608f7a9c4625de7df35c61c909d2ce16b1" + } +}