diff --git a/README.md b/README.md index f8ade1e..4e63d1a 100644 --- a/README.md +++ b/README.md @@ -37,10 +37,11 @@ http: # - SLES12-SP2-LTSS-Updates # archs: [x86_64] -# OBS credentials: -# obs: -# username: "" -# password: "" +# Build Service credentials: +# build_service: +# api: obs +# username: "user" +# password: "passwd" ``` diff --git a/cmd/sync.go b/cmd/sync.go index 2ab745c..3cf6039 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -3,14 +3,13 @@ package cmd import ( "fmt" "log" - "net/url" - "os" - "path/filepath" "strings" "github.com/spf13/cobra" "github.com/uyuni-project/minima/get" - "github.com/uyuni-project/minima/updates" + "github.com/uyuni-project/minima/maint" + "github.com/uyuni-project/minima/scc" + "github.com/uyuni-project/minima/storage" yaml "gopkg.in/yaml.v2" ) @@ -54,24 +53,23 @@ var ( Run: func(cmd *cobra.Command, args []string) { initConfig() - var errorflag bool = false - syncers, err := syncersFromConfig(cfgString) + config, err := parseConfig(cfgString) if err != nil { log.Fatal(err) - errorflag = true } + + syncers, err := syncersFromConfig(config) + if err != nil { + log.Fatal(err) + } + for _, syncer := range syncers { log.Printf("Processing repo: %s", syncer.URL.String()) err := syncer.StoreRepo() if err != nil { - log.Println(err) - errorflag = true - } else { - log.Println("...done.") + log.Fatal(err) } - } - if errorflag { - os.Exit(1) + log.Println("...done.") } }, } @@ -82,17 +80,13 @@ var ( // Config maps the configuration in minima.yaml type Config struct { - Storage get.StorageConfig - SCC get.SCC - OBS updates.OBS - HTTP []get.HTTPRepoConfig + Storage storage.StorageConfig + SCC scc.SCC + BuildService maint.BuildServiceCredentials `yaml:"build_service"` + HTTP []get.HTTPRepo } -func syncersFromConfig(configString string) ([]*get.Syncer, error) { - config, err := parseConfig(configString) - if err != nil { - return nil, err - } +func syncersFromConfig(config Config) ([]*get.Syncer, error) { //---passing the flag value to a global variable in get package, to disables syncing of i586 and i686 rpms (usually inside x86_64) get.SkipLegacy = skipLegacyPackages @@ -101,7 +95,7 @@ func syncersFromConfig(configString string) ([]*get.Syncer, error) { if archs == "" { archs = "x86_64" } - config.SCC.Repositories = []get.SCCReposConfig{ + config.SCC.Repositories = []scc.SCCRepos{ { Names: []string{thisRepo}, Archs: strings.Split(archs, ","), @@ -109,39 +103,14 @@ func syncersFromConfig(configString string) ([]*get.Syncer, error) { } } - httpRepoConfigs, err := get.SCCToHTTPConfigs(sccUrl, config.SCC.Username, config.SCC.Password, config.SCC.Repositories) + httpRepoConfigs, err := scc.SCCToHTTPConfigs(sccUrl, config.SCC.Username, config.SCC.Password, config.SCC.Repositories) if err != nil { return nil, err } config.HTTP = append(config.HTTP, httpRepoConfigs...) } - syncers := []*get.Syncer{} - for _, httpRepo := range config.HTTP { - repoURL, err := url.Parse(httpRepo.URL) - if err != nil { - return nil, err - } - - archs := map[string]bool{} - for _, archString := range httpRepo.Archs { - archs[archString] = true - } - - var storage get.Storage - switch config.Storage.Type { - case "file": - storage = get.NewFileStorage(filepath.Join(config.Storage.Path, filepath.FromSlash(repoURL.Path))) - case "s3": - storage, err = get.NewS3Storage(config.Storage.AccessKeyID, config.Storage.AccessKeyID, config.Storage.Region, config.Storage.Bucket+repoURL.Path) - if err != nil { - return nil, err - } - } - syncers = append(syncers, get.NewSyncer(*repoURL, archs, storage)) - } - - return syncers, nil + return get.SyncersFromHTTPRepos(config.HTTP, config.Storage) } func parseConfig(configString string) (Config, error) { diff --git a/cmd/sync_test.go b/cmd/sync_test.go index 458cff0..63bf28f 100644 --- a/cmd/sync_test.go +++ b/cmd/sync_test.go @@ -7,6 +7,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/uyuni-project/minima/get" + "github.com/uyuni-project/minima/scc" + "github.com/uyuni-project/minima/storage" ) const ( @@ -26,11 +28,11 @@ func TestParseConfig(t *testing.T) { { "Valid HTTP repos", validHTTPReposFile, Config{ - Storage: get.StorageConfig{ + Storage: storage.StorageConfig{ Type: "file", Path: "/srv/mirror", }, - HTTP: []get.HTTPRepoConfig{ + HTTP: []get.HTTPRepo{ { URL: "http://test/SLE-Product-SLES15-SP5-Pool/", Archs: []string{"x86_64", "aarch64", "s390x"}, @@ -46,14 +48,14 @@ func TestParseConfig(t *testing.T) { { "Valid SCC repos", validSCCReposFile, Config{ - Storage: get.StorageConfig{ + Storage: storage.StorageConfig{ Type: "file", Path: "/srv/mirror", }, - SCC: get.SCC{ + SCC: scc.SCC{ Username: "user", Password: "pass", - Repositories: []get.SCCReposConfig{ + Repositories: []scc.SCCRepos{ { Names: []string{"SLE-Manager-Tools15-Pool", "SLE-Manager-Tools15-Updates"}, Archs: []string{"x86_64", "aarch64", "s390x"}, @@ -70,11 +72,11 @@ func TestParseConfig(t *testing.T) { { "Invalid storage", invalidStoragefile, Config{ - Storage: get.StorageConfig{ + Storage: storage.StorageConfig{ Type: "memory", Path: "/srv/mirror", }, - HTTP: []get.HTTPRepoConfig{ + HTTP: []get.HTTPRepo{ { URL: "http://test/SLE-Product-SLES15-SP5-Pool/", Archs: []string{"x86_64", "aarch64", "s390x"}, diff --git a/cmd/updates.go b/cmd/updates.go index 709f5c1..28af5fd 100644 --- a/cmd/updates.go +++ b/cmd/updates.go @@ -17,47 +17,26 @@ package cmd import ( "fmt" - "io" "log" - "net/http" "os" - "path/filepath" - "regexp" "strings" - "sync" "time" "github.com/spf13/cobra" "github.com/uyuni-project/minima/get" - "github.com/uyuni-project/minima/updates" + "github.com/uyuni-project/minima/maint" yaml "gopkg.in/yaml.v2" ) -type Updates struct { - IncidentNumber string - ReleaseRequest string - SRCRPMS []string - Products string - Repositories []get.HTTPRepoConfig -} - -// package scoped array of all possible available archs to check for a repo -var architectures = [...]string{"x86_64", "i586", "i686", "aarch64", "aarch64_ilp32", "ppc64le", "s390x", "src"} - -// package scoped Thread-safe Map used as cache to check the existence of repositories -var register sync.Map - -// mufnsCmd represents the mufns command +// updateCmd represents the mufns command var ( updateCmd = &cobra.Command{ Use: "updates", Short: "searches all updates and syncs them to mirror", - Long: `A longer description that spans multiple lines and likely contains examples -and usage of using your command. For example: - -Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, + Long: `Using the Build Service API, searches the updates repositories for the given MU and filters and syncs them to the mirror. + It is also possible to generate yaml file to be used for syncing such repositories, only search for the updates or do a cleanup + of previous MUs repositories. + `, Run: func(cmd *cobra.Command, args []string) { initConfig() muFindAndSync() @@ -67,45 +46,54 @@ to quickly create a Cobra application.`, justSearch bool thisMU string cleanup bool + group string + states string ) func init() { RootCmd.AddCommand(updateCmd) // local flags - updateCmd.Flags().BoolVarP(&spitYamls, "yaml", "y", false, "flag that would trigger generating minima_obs_.yaml configs") - updateCmd.Flags().BoolVarP(&justSearch, "search", "s", false, "flag that would trigger only looking for updates on OBS") + updateCmd.Flags().BoolVarP(&spitYamls, "yaml", "y", false, "flag that would trigger generating minima__.yaml configs") + updateCmd.Flags().BoolVarP(&justSearch, "search", "s", false, "flag that would trigger only looking for updates on the Build Service") updateCmd.Flags().StringVarP(&thisMU, "maintupdate", "m", "", "flag that consumes the name of an MU, like 'SUSE:Maintenance:Incident:ReleaseRequest'") updateCmd.Flags().BoolVarP(&cleanup, "cleanup", "k", false, "flag that triggers cleaning up the storage (from old MU channels)") + updateCmd.Flags().StringVarP(&group, "group", "g", "", "flag that applies a filter by group") + updateCmd.Flags().StringVarP(&states, "states", "t", "new,review", "flag that applies a filter by state, multiple states must be separated by a comma (default 'new,review')") } func muFindAndSync() { config := Config{} - updateList := []Updates{} + updateList := []maint.Updates{} - err := yaml.Unmarshal([]byte(cfgString), &config) - if err != nil { + if err := yaml.Unmarshal([]byte(cfgString), &config); err != nil { log.Fatalf("Error reading configuration: %v", err) } + client, err := maint.NewBuildServiceClient(config.BuildService.API, config.BuildService.Username, config.BuildService.Password) + if err != nil { + log.Fatal(err) + } + if cleanup { // DO CLEANUP - TO BE IMPLEMENTED log.Println("searching for outdated MU repos...") - updateList, err = GetUpdatesAndChannels(config.OBS.Username, config.OBS.Password, true) + updateList, err := client.GetUpdatesAndChannels(group, states, true) if err != nil { log.Fatalf("Error searching for outdated MUs repos: %v", err) } - err = RemoveOldChannels(config, updateList) + + err = client.RemoveOldChannels(config.Storage, updateList) if err != nil { log.Fatalf("Error removing old channels: %v", err) } log.Println("...done!") } else { if thisMU == "" { - updateList, err = GetUpdatesAndChannels(config.OBS.Username, config.OBS.Password, justSearch) + updateList, err := client.GetUpdatesAndChannels(group, states, justSearch) if err != nil { log.Fatalf("Error finding updates and channels: %v", err) } - config.HTTP = []get.HTTPRepoConfig{} + config.HTTP = []get.HTTPRepo{} for _, val := range updateList { config.HTTP = append(config.HTTP, val.Repositories...) } @@ -113,15 +101,15 @@ func muFindAndSync() { if mu := strings.Split(thisMU, ":"); len(mu) != 4 { log.Fatalf("Badly formatted MU. It must be SUSE:Maintenance:NUMBER:NUMBER") } else { - a := Updates{} + a := maint.Updates{} a.IncidentNumber = mu[2] a.ReleaseRequest = mu[3] - mu := fmt.Sprintf("%s%s/", updates.DownloadIbsLink, a.IncidentNumber) - a.Repositories, err = GetRepo(http.DefaultClient, mu) + repos, err := client.GetRepo(a.IncidentNumber) if err != nil { log.Fatalf("Something went wrong in MU %s repos processing: %v\n", mu, err) } + a.Repositories = repos config.HTTP = append(config.HTTP, a.Repositories...) updateList = append(updateList, a) } @@ -134,7 +122,7 @@ func muFindAndSync() { if spitYamls { t := time.Now() - err := os.WriteFile(fmt.Sprintf("./minima_obs_%v-%v-%v-%v:%v.yaml", t.Year(), t.Month(), t.Local().Day(), t.Hour(), t.Minute()), byteChunk, 0644) + err := os.WriteFile(fmt.Sprintf("./minima_%s_%v-%v-%v-%v:%v.yaml", config.BuildService.API, t.Year(), t.Month(), t.Local().Day(), t.Hour(), t.Minute()), byteChunk, 0644) if err != nil { log.Fatalf("Error writing file: %v", err) } @@ -147,7 +135,7 @@ func muFindAndSync() { os.Exit(3) } - syncers, err := syncersFromConfig(string(byteChunk)) + syncers, err := syncersFromConfig(config) if err != nil { log.Fatal(err) } @@ -162,237 +150,3 @@ func muFindAndSync() { } } } - -// ProcWebChunk retrieves repositories data for a product target in a MU -func ProcWebChunk(client *http.Client, product, maint string) ([]get.HTTPRepoConfig, error) { - httpFormattedRepos := []get.HTTPRepoConfig{} - repo := get.HTTPRepoConfig{ - Archs: []string{}, - } - repoUrl := maint + product - - _, ok := register.Load(repoUrl) - if !ok { - exists, err := updates.CheckWebPageExists(client, repoUrl) - if err != nil { - return nil, err - } - register.Store(repoUrl, exists) - - if exists { - repo.URL = repoUrl - if err := ArchMage(client, &repo); err != nil { - return nil, err - } - fmt.Println(repo) - httpFormattedRepos = append(httpFormattedRepos, repo) - } - } - return httpFormattedRepos, nil -} - -// ArchMage checks that all architecture slice of a *HTTPRepoConfig is filled right -func ArchMage(client *http.Client, repo *get.HTTPRepoConfig) error { - archsChan := make(chan string) - // we need a dedicated goroutine to start the others, wait for them to finish - // and signal back that we're done doing HTTP calls - go func() { - var wg sync.WaitGroup - wg.Add(len(architectures)) - - // verify each arch page exists (possibly) in parallel - for _, a := range architectures { - go func(arch string) { - defer wg.Done() - - if strings.Contains(repo.URL, arch) { - archsChan <- arch - return - } - - finalUrl := repo.URL + arch + "/" - exists, err := updates.CheckWebPageExists(client, finalUrl) - if err != nil { - // TODO: verify if we need to actually return an error - log.Printf("Got error calling HEAD %s: %v...\n", finalUrl, err) - } - if exists { - archsChan <- arch - } - }(a) - } - - wg.Wait() - close(archsChan) - }() - - for foundArch := range archsChan { - repo.Archs = append(repo.Archs, foundArch) - } - - if len(repo.Archs) == 0 { - return fmt.Errorf("no available arch has been found for this repo: %s", repo.URL) - } - return nil -} - -// GetRepo retrieves HTTP repositories data for all the products targets associated to an MU -func GetRepo(client *http.Client, mu string) (httpFormattedRepos []get.HTTPRepoConfig, err error) { - productsChunks, err := getProductsForMU(client, mu) - if err != nil { - return nil, fmt.Errorf("error retrieving products for MU %s: %v", mu, err) - } - fmt.Printf("%d product entries for mu %s\n", len(productsChunks), mu) - - reposChan := make(chan []get.HTTPRepoConfig) - errChan := make(chan error) - // empty struct for 0 allocation: we need only to signal we're done, not pass data - doneChan := make(chan struct{}) - - // we need a dedicated goroutine to start the others, wait for them to finish - // and signal back that we're done processing - go func() { - var wg sync.WaitGroup - wg.Add(len(productsChunks)) - - // process each chunk (possibly) in parallel - for _, productChunk := range productsChunks { - go func(product, maint string) { - defer wg.Done() - - repo, err := ProcWebChunk(client, product, maint) - if err != nil { - errChan <- err - } - reposChan <- repo - - }(productChunk, mu) - } - - wg.Wait() - close(reposChan) - doneChan <- struct{}{} - }() - - // keeps looping untill we're done processing all chunks or an error occurs - for { - select { - case repo := <-reposChan: - httpFormattedRepos = append(httpFormattedRepos, repo...) - case err = <-errChan: - return nil, err - case <-doneChan: - close(errChan) - close(doneChan) - return httpFormattedRepos, nil - } - } -} - -// getProductsForMU parses a MU webpage attempting to retrieve a slice of available SUSE products -func getProductsForMU(client *http.Client, mu string) ([]string, error) { - fmt.Println("GET", mu) - resp, err := client.Get(mu) - if err != nil { - return nil, err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - chunks := strings.Split(string(body), "\"") - productsChunks := cleanWebChunks(chunks) - - return productsChunks, nil -} - -// cleanWebChunks filters a slice of HTML elements and returns a slice containing only SUSE products -func cleanWebChunks(chunks []string) []string { - products := []string{} - regEx := regexp.MustCompile(`>(SUSE[^<]+\/)<`) - - for _, chunk := range chunks { - matches := regEx.FindStringSubmatch(chunk) - if matches != nil { - products = append(products, matches[1]) - } - } - return products -} - -func GetUpdatesAndChannels(usr, passwd string, justsearch bool) (updlist []Updates, err error) { - client := updates.NewClient(usr, passwd) - rrs, err := client.GetReleaseRequests("qam-manager", "new,review") - if err != nil { - return updlist, fmt.Errorf("error while getting response from obs: %v", err) - } - - for _, value := range rrs { - var update Updates - update.ReleaseRequest = value.Id - - for i := 0; i < len(value.Actions); i++ { - if len(strings.Split(value.Actions[i].Target.Package, ".")) > 1 { - update.IncidentNumber = strings.Split(value.Actions[i].Target.Package, ".")[1] - if update.IncidentNumber != "" { - break - } - } - } - for _, val := range value.Actions { - if !strings.Contains(val.Target.Package, "patchinfo") && !(strings.Contains(val.Target.Package, "SLE") || strings.Contains(val.Target.Package, "Module")) { - update.SRCRPMS = append(update.SRCRPMS, strings.Split(val.Target.Package, ".")[0]) - } - } - if !justsearch { - mu := fmt.Sprintf("%s%s/", updates.DownloadIbsLink, update.IncidentNumber) - update.Repositories, err = GetRepo(client.HttpClient, mu) - if err != nil { - return updlist, fmt.Errorf("something went wrong in repo processing: %v", err) - } - } - updlist = append(updlist, update) - } - return updlist, err -} - -func RemoveOldChannels(config Config, updates []Updates) (err error) { - mappedUpdates := MakeAMap(updates) - switch config.Storage.Type { - case "file": - var muChannelList []string - err = filepath.Walk(filepath.Join(config.Storage.Path, "ibs/SUSE:/Maintenance:/"), func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - muChannelList = append(muChannelList, path) - } - return nil - }) - if err != nil { - return - } - //templ := regexp.MustCompile(`/\d{5,6}/`) - for _, elem := range muChannelList { - if regexp.MustCompile(`/\d{5,6}/`).FindString(elem) != "" { - _, exists := mappedUpdates[strings.Replace(regexp.MustCompile(`/\d{5,6}/`).FindString(elem), "/", "", 10)] - if !exists { - log.Printf("removing: %s...\n", elem) - err = os.RemoveAll(elem) - if err != nil { - return - } - } - } - } - } - return -} - -func MakeAMap(updates []Updates) (updatesMap map[string]bool) { - updatesMap = make(map[string]bool) - for _, elem := range updates { - updatesMap[elem.IncidentNumber] = true - } - return -} diff --git a/get/httpclient.go b/get/httpclient.go index 5d5cf2d..9b2ee0d 100644 --- a/get/httpclient.go +++ b/get/httpclient.go @@ -6,6 +6,12 @@ import ( "net/http" ) +// HTTPRepo defines the configuration of an HTTP repo +type HTTPRepo struct { + URL string + Archs []string +} + // UnexpectedStatusCodeError signals a successful request that resulted in an unexpected status code type UnexpectedStatusCodeError struct { URL string @@ -32,3 +38,12 @@ func ReadURL(url string) (r io.ReadCloser, err error) { return } + +func CheckWebPageExists(client *http.Client, repoURL string) (bool, error) { + resp, err := client.Head(repoURL) + if err != nil { + return false, err + } + + return resp.Status == "200 OK", nil +} diff --git a/get/httpclient_test.go b/get/httpclient_test.go index 5fc5985..39c0031 100644 --- a/get/httpclient_test.go +++ b/get/httpclient_test.go @@ -2,24 +2,28 @@ package get import ( "fmt" - "io/ioutil" + "io" "net/http" + "net/http/httptest" "testing" ) func TestReadURL(t *testing.T) { - // Respond to http://localhost:8080/test with "Hello, World" + server := httptest.NewServer(http.DefaultServeMux) + defer server.Close() + + // Respond to /test with "Hello, World" http.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Hello, World") }) // 200 - reader, err := ReadURL("http://localhost:8080/test") + reader, err := ReadURL(server.URL + "/test") if err != nil { t.Error(err) } - result, err := ioutil.ReadAll(reader) + result, err := io.ReadAll(reader) if err != nil { t.Error(err) } @@ -28,7 +32,7 @@ func TestReadURL(t *testing.T) { } // 404 - _, err = ReadURL("http://localhost:8080/not_existing") + _, err = ReadURL(server.URL + "/not_existing") uerr, unexpected := err.(*UnexpectedStatusCodeError) if !unexpected { diff --git a/get/main_test.go b/get/main_test.go deleted file mode 100644 index 8f26c6a..0000000 --- a/get/main_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package get - -import ( - "log" - "net" - "net/http" - "os" - "testing" -) - -// TestMain starts an HTTP server on localhost:8080 for test use -func TestMain(m *testing.M) { - errs := make(chan error) - go func() { - listener, err := net.Listen("tcp", ":8080") - errs <- err - http.Serve(listener, nil) - }() - - err := <-errs - if err != nil { - log.Panic("Could not start test HTTP server:", err) - } - - os.Exit(m.Run()) -} diff --git a/get/syncer.go b/get/syncer.go index b817d28..396a2f6 100644 --- a/get/syncer.go +++ b/get/syncer.go @@ -15,6 +15,7 @@ import ( "strings" "github.com/klauspost/compress/zstd" + "github.com/uyuni-project/minima/storage" "github.com/uyuni-project/minima/util" "golang.org/x/crypto/openpgp" ) @@ -109,7 +110,7 @@ type Syncer struct { // URL of the repo this syncer syncs URL url.URL archs map[string]bool - storage Storage + storage storage.Storage } // Decision encodes what to do with a file @@ -125,10 +126,36 @@ const ( ) // NewSyncer creates a new Syncer -func NewSyncer(url url.URL, archs map[string]bool, storage Storage) *Syncer { +func NewSyncer(url url.URL, archs map[string]bool, storage storage.Storage) *Syncer { return &Syncer{url, archs, storage} } +func SyncersFromHTTPRepos(repoConfigs []HTTPRepo, storageConfig storage.StorageConfig) ([]*Syncer, error) { + syncers := []*Syncer{} + + for _, httpRepo := range repoConfigs { + repoURL, err := url.Parse(httpRepo.URL) + if err != nil { + return nil, err + } + + archs := map[string]bool{} + for _, archString := range httpRepo.Archs { + archs[archString] = true + } + + storage, err := storage.FromConfig(storageConfig, repoURL) + if err != nil { + return nil, err + } + + syncer := NewSyncer(*repoURL, archs, storage) + syncers = append(syncers, syncer) + } + + return syncers, nil +} + // StoreRepo stores an HTTP repo in a Storage, automatically retrying in case of recoverable errors func (r *Syncer) StoreRepo() (err error) { checksumMap := r.readChecksumMap() @@ -360,10 +387,10 @@ func (r *Syncer) readChecksumMap() (checksumMap map[string]XMLChecksum) { checksumMap = make(map[string]XMLChecksum) repoType := repoTypes["rpm"] - repomdReader, err := r.storage.NewReader(repomdPath, Permanent) + repomdReader, err := r.storage.NewReader(repomdPath, storage.Permanent) if err != nil { - if err == ErrFileNotFound { - repomdReader, err = r.storage.NewReader(releasePath, Permanent) + if err == storage.ErrFileNotFound { + repomdReader, err = r.storage.NewReader(releasePath, storage.Permanent) if err != nil { log.Println("First-time sync started") return @@ -388,7 +415,7 @@ func (r *Syncer) readChecksumMap() (checksumMap map[string]XMLChecksum) { dataChecksum := data[i].Checksum checksumMap[dataHref] = dataChecksum if data[i].Type == repoType.PackagesType { - primaryReader, err := r.storage.NewReader(dataHref, Permanent) + primaryReader, err := r.storage.NewReader(dataHref, storage.Permanent) if err != nil { return } @@ -408,7 +435,7 @@ func (r *Syncer) readChecksumMap() (checksumMap map[string]XMLChecksum) { // processPrimary stores the primary XML metadata file and returns a list of // package file paths to download func (r *Syncer) processPrimary(path string, checksumMap map[string]XMLChecksum, repoType RepoType) (packagesToDownload []XMLPackage, packagesToRecycle []XMLPackage, err error) { - reader, err := r.storage.NewReader(path, Temporary) + reader, err := r.storage.NewReader(path, storage.Temporary) if err != nil { return } @@ -445,7 +472,7 @@ func (r *Syncer) decide(location string, checksum XMLChecksum, checksumMap map[s previousChecksum, foundInChecksumMap := checksumMap[location] if foundInChecksumMap { - reader, err := r.storage.NewReader(location, Permanent) + reader, err := r.storage.NewReader(location, storage.Permanent) if err != nil { return Download } @@ -453,7 +480,7 @@ func (r *Syncer) decide(location string, checksum XMLChecksum, checksumMap map[s } if !foundInChecksumMap || previousChecksum.Type != checksum.Type || previousChecksum.Checksum != checksum.Checksum { - reader, err := r.storage.NewReader(location, Temporary) + reader, err := r.storage.NewReader(location, storage.Temporary) if err != nil { return Download } diff --git a/get/syncer_test.go b/get/syncer_test.go index c7285a2..cc0701a 100644 --- a/get/syncer_test.go +++ b/get/syncer_test.go @@ -2,14 +2,38 @@ package get import ( "net/http" + "net/http/httptest" "net/url" "os" "path/filepath" "testing" + + "github.com/stretchr/testify/assert" + "github.com/uyuni-project/minima/storage" ) +func TestSyncersFromHTTPRepo(t *testing.T) { + tests := []struct { + name string + httpRepos []HTTPRepo + storageConfig storage.StorageConfig + want []*Syncer + wantErr bool + }{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := SyncersFromHTTPRepos(tt.httpRepos, tt.storageConfig) + assert.EqualValues(t, tt.wantErr, (err != nil)) + }) + } +} + func TestStoreRepo(t *testing.T) { - // Respond to http://localhost:8080/repo serving the content of the testdata/repo directory + server := httptest.NewServer(http.DefaultServeMux) + defer server.Close() + + // Respond to /repo serving the content of the testdata/repo directory http.Handle("/", http.FileServer(http.Dir("testdata"))) directory := filepath.Join(os.TempDir(), "syncer_test") @@ -21,8 +45,8 @@ func TestStoreRepo(t *testing.T) { archs := map[string]bool{ "x86_64": true, } - storage := NewFileStorage(directory) - url, err := url.Parse("http://localhost:8080/repo") + storage := storage.NewFileStorage(directory) + url, err := url.Parse(server.URL + "/repo") if err != nil { t.Error(err) } @@ -69,6 +93,9 @@ func TestStoreRepo(t *testing.T) { } func TestStoreRepoZstd(t *testing.T) { + server := httptest.NewServer(http.DefaultServeMux) + defer server.Close() + directory := filepath.Join(os.TempDir(), "syncer_test") err := os.RemoveAll(directory) if err != nil { @@ -78,8 +105,8 @@ func TestStoreRepoZstd(t *testing.T) { archs := map[string]bool{ "x86_64": true, } - storage := NewFileStorage(directory) - url, err := url.Parse("http://localhost:8080/zstrepo") + storage := storage.NewFileStorage(directory) + url, err := url.Parse(server.URL + "/zstrepo") if err != nil { t.Error(err) } @@ -125,6 +152,9 @@ func TestStoreRepoZstd(t *testing.T) { } func TestStoreDebRepo(t *testing.T) { + server := httptest.NewServer(http.DefaultServeMux) + defer server.Close() + directory := filepath.Join(os.TempDir(), "syncer_test") err := os.RemoveAll(directory) if err != nil { @@ -135,8 +165,8 @@ func TestStoreDebRepo(t *testing.T) { "amd64": true, } - storage := NewFileStorage(directory) - url, err := url.Parse("http://localhost:8080/deb_repo") + storage := storage.NewFileStorage(directory) + url, err := url.Parse(server.URL + "/deb_repo") if err != nil { t.Error(err) } diff --git a/maint/client.go b/maint/client.go new file mode 100644 index 0000000..4a35e10 --- /dev/null +++ b/maint/client.go @@ -0,0 +1,373 @@ +// Lib's Co-Author: Felix Gerling, fgerling@suse.com, https://github.com/fgerling +package maint + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/uyuni-project/minima/get" + "github.com/uyuni-project/minima/storage" +) + +// package scoped array of all possible available archs to check for a repo +var architectures = [...]string{"x86_64", "i586", "i686", "aarch64", "aarch64_ilp32", "ppc64le", "s390x", "src"} + +// package scoped Thread-safe Map used as cache to check the existence of repositories +var register sync.Map + +type BuildService string + +const ( + IBS BuildService = "ibs" + OBS BuildService = "obs" + + downloadOBSLink = "http://download.opensuse.org/repositories/openSUSE:/Maintenance:/" + downloadIBSLink = "http://download.suse.de/ibs/SUSE:/Maintenance:/" + obsAPI = "api.opensuse.org" + ibsAPI = "api.suse.de" +) + +type BuildServiceCredentials struct { + API BuildService + Username string + Password string +} + +type BuildServiceClient struct { + downloadLink string + baseURL *url.URL + username string + password string + httpClient *http.Client +} + +var ErrUnsupportedAPI = errors.New("unsupported build service API (must be 'obs' or 'ibs')") + +func NewBuildServiceClient(buildService BuildService, username string, password string) (*BuildServiceClient, error) { + client := &BuildServiceClient{ + username: username, + password: password, + httpClient: &http.Client{}, + } + + switch buildService { + case IBS: + client.downloadLink = downloadIBSLink + client.baseURL = &url.URL{Host: ibsAPI, Scheme: "https"} + case OBS: + client.downloadLink = downloadOBSLink + client.baseURL = &url.URL{Host: obsAPI, Scheme: "https"} + default: + return nil, ErrUnsupportedAPI + } + + return client, nil +} + +func (c *BuildServiceClient) newRequest(method, path string, body interface{}) (*http.Request, error) { + rel := &url.URL{Path: path} + u := c.baseURL.ResolveReference(rel) + + var buf io.ReadWriter + if body != nil { + buf = new(bytes.Buffer) + err := xml.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + req.SetBasicAuth(c.username, c.password) + if body != nil { + req.Header.Set("Content-Type", "application/xml") + } + + req.Header.Set("Accept", "application/xml") + return req, nil +} + +func (c *BuildServiceClient) GetReleaseRequests(group string, states string) ([]ReleaseRequest, error) { + req, err := c.newRequest("GET", "/request", nil) + if err != nil { + return nil, err + } + + q := req.URL.Query() + q.Add("view", "collection") + q.Add("group", group) + q.Add("states", states) + req.URL.RawQuery = q.Encode() + + var collection Collection + err = c.do(req, &collection) + return collection.ReleaseRequests, err +} + +func (c *BuildServiceClient) GetPatchinfo(rr ReleaseRequest) (*Patchinfo, error) { + project := rr.Actions[0].Source.Project + patchinfo_url := fmt.Sprintf("/source/%v/patchinfo/_patchinfo", project) + + req, err := c.newRequest("GET", patchinfo_url, nil) + if err != nil { + return nil, err + } + + var patchinfo Patchinfo + err = c.do(req, &patchinfo) + return &patchinfo, err +} + +func (c *BuildServiceClient) GetUpdatesAndChannels(group, states string, justsearch bool) ([]Updates, error) { + rrs, err := c.GetReleaseRequests(group, states) + if err != nil { + return nil, fmt.Errorf("error while getting response from obs: %v", err) + } + + updates := []Updates{} + for _, value := range rrs { + var update Updates + update.ReleaseRequest = value.Id + + for i := 0; i < len(value.Actions); i++ { + if len(strings.Split(value.Actions[i].Target.Package, ".")) > 1 { + update.IncidentNumber = strings.Split(value.Actions[i].Target.Package, ".")[1] + if update.IncidentNumber != "" { + break + } + } + } + for _, val := range value.Actions { + if !strings.Contains(val.Target.Package, "patchinfo") && !(strings.Contains(val.Target.Package, "SLE") || strings.Contains(val.Target.Package, "Module")) { + update.SRCRPMS = append(update.SRCRPMS, strings.Split(val.Target.Package, ".")[0]) + } + } + if !justsearch { + mu := fmt.Sprintf("%s%s/", c.downloadLink, update.IncidentNumber) + update.Repositories, err = c.GetRepo(mu) + if err != nil { + return nil, fmt.Errorf("something went wrong in repo processing: %v", err) + } + } + updates = append(updates, update) + } + + return updates, err +} + +func (c *BuildServiceClient) RemoveOldChannels(config storage.StorageConfig, updates []Updates) error { + incidentNumbers := toIncidentNumberSet(updates) + + switch config.Type { + case "file": + var muChannelList []string + + downloadURL, err := url.Parse(c.downloadLink) + if err != nil { + return err + } + + err = filepath.Walk(filepath.Join(config.Path, downloadURL.Path[1:]), func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + muChannelList = append(muChannelList, path) + } + return nil + }) + if err != nil { + return err + } + + for _, elem := range muChannelList { + if regexp.MustCompile(`/\d{5,6}/`).FindString(elem) != "" { + _, exists := incidentNumbers[strings.Replace(regexp.MustCompile(`/\d{5,6}/`).FindString(elem), "/", "", 10)] + if !exists { + log.Printf("removing: %s...\n", elem) + err := os.RemoveAll(elem) + if err != nil { + return err + } + } + } + } + default: + return fmt.Errorf("unsupported storage type for cleanup") + } + return nil +} + +// GetRepo retrieves HTTP repositories data for all the products targets associated to an MU +func (c *BuildServiceClient) GetRepo(incidentNumber string) (httpFormattedRepos []get.HTTPRepo, err error) { + mu := fmt.Sprintf("%s%s/", c.downloadLink, incidentNumber) + + productsChunks, err := c.getProductsForMU(mu) + if err != nil { + return nil, fmt.Errorf("error retrieving products for MU %s: %v", mu, err) + } + fmt.Printf("%d product entries for mu %s\n", len(productsChunks), mu) + + reposChan := make(chan []get.HTTPRepo) + errChan := make(chan error) + // empty struct for 0 allocation: we need only to signal we're done, not pass data + doneChan := make(chan struct{}) + + // we need a dedicated goroutine to start the others, wait for them to finish + // and signal back that we're done processing + go func() { + var wg sync.WaitGroup + wg.Add(len(productsChunks)) + + // process each chunk (possibly) in parallel + for _, productChunk := range productsChunks { + go func(product, maint string) { + defer wg.Done() + + repo, err := c.procWebChunk(product, maint) + if err != nil { + errChan <- err + } + reposChan <- repo + + }(productChunk, mu) + } + + wg.Wait() + close(reposChan) + doneChan <- struct{}{} + }() + + // keeps looping until we're done processing all chunks or an error occurs + for { + select { + case repo := <-reposChan: + httpFormattedRepos = append(httpFormattedRepos, repo...) + case err = <-errChan: + return nil, err + case <-doneChan: + close(errChan) + close(doneChan) + return httpFormattedRepos, nil + } + } +} + +// ProcWebChunk retrieves repositories data for a product target in a MU +func (c *BuildServiceClient) procWebChunk(product, maint string) ([]get.HTTPRepo, error) { + httpFormattedRepos := []get.HTTPRepo{} + repo := get.HTTPRepo{ + Archs: []string{}, + } + repoUrl := maint + product + + _, ok := register.Load(repoUrl) + if !ok { + exists, err := get.CheckWebPageExists(c.httpClient, repoUrl) + if err != nil { + return nil, err + } + register.Store(repoUrl, exists) + + if exists { + repo.URL = repoUrl + if err := c.archMage(&repo); err != nil { + return nil, err + } + fmt.Println(repo) + httpFormattedRepos = append(httpFormattedRepos, repo) + } + } + return httpFormattedRepos, nil +} + +// archMage checks that all architecture slice of a *HTTPRepo is filled right +func (c *BuildServiceClient) archMage(repo *get.HTTPRepo) error { + archsChan := make(chan string) + // we need a dedicated goroutine to start the others, wait for them to finish + // and signal back that we're done doing HTTP calls + go func() { + var wg sync.WaitGroup + wg.Add(len(architectures)) + + // verify each arch page exists (possibly) in parallel + for _, a := range architectures { + go func(arch string) { + defer wg.Done() + + if strings.Contains(repo.URL, arch) { + archsChan <- arch + return + } + + finalUrl := repo.URL + arch + "/" + exists, err := get.CheckWebPageExists(c.httpClient, finalUrl) + if err != nil { + // TODO: verify if we need to actually return an error + log.Printf("Got error calling HEAD %s: %v...\n", finalUrl, err) + } + if exists { + archsChan <- arch + } + }(a) + } + + wg.Wait() + close(archsChan) + }() + + for foundArch := range archsChan { + repo.Archs = append(repo.Archs, foundArch) + } + + if len(repo.Archs) == 0 { + return fmt.Errorf("no available arch has been found for this repo: %s", repo.URL) + } + return nil +} + +// getProductsForMU parses a MU webpage attempting to retrieve a slice of available SUSE products +func (c *BuildServiceClient) getProductsForMU(mu string) ([]string, error) { + fmt.Println("GET", mu) + resp, err := c.httpClient.Get(mu) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + chunks := strings.Split(string(body), "\"") + productsChunks := cleanWebChunks(chunks) + + return productsChunks, nil +} + +func (c *BuildServiceClient) do(req *http.Request, v interface{}) error { + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("got status code: %v for %q", resp.StatusCode, req.URL) + } + defer resp.Body.Close() + + err = xml.NewDecoder(resp.Body).Decode(v) + return err +} diff --git a/cmd/updates_test.go b/maint/client_test.go similarity index 74% rename from cmd/updates_test.go rename to maint/client_test.go index 9b0dda8..fe4bd3b 100644 --- a/cmd/updates_test.go +++ b/maint/client_test.go @@ -1,25 +1,12 @@ -/* -Copyright © 2021-2024 NAME HERE - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package cmd +package maint import ( "bytes" "errors" + "fmt" "io" "net/http" + "net/url" "sort" "testing" @@ -59,7 +46,8 @@ func createMockClient(baseUrl string, archs []string, forceError bool) *http.Cli StatusCode: 200, Body: io.NopCloser(bytes.NewBufferString(`SUSE_SLE-15-SP4_Update/ SLE-15-SP5_Update/ - SUSE_SLE-15-SP6_Update/`)), + SUSE_SLE-15-SP6_Update/ + openSUSE_15.4_Update_standard/`)), } for _, p := range productEntries { @@ -88,6 +76,47 @@ func createMockClient(baseUrl string, archs []string, forceError bool) *http.Cli } } +func TestNewBuildServiceClient(t *testing.T) { + tests := []struct { + name string + buildservice string + want *BuildServiceClient + wantErr bool + }{ + { + "IBS client", "ibs", + &BuildServiceClient{ + downloadLink: downloadIBSLink, + baseURL: &url.URL{Host: ibsAPI, Scheme: "https"}, + }, + false, + }, + { + "OBS client", "obs", + &BuildServiceClient{ + downloadLink: downloadOBSLink, + baseURL: &url.URL{Host: obsAPI, Scheme: "https"}, + }, + false, + }, + { + "Invalid client", "github", + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewBuildServiceClient(BuildService(tt.buildservice), "test", "test") + assert.EqualValues(t, tt.wantErr, (err != nil)) + if tt.want != nil { + assert.EqualValues(t, tt.want.downloadLink, got.downloadLink) + assert.EqualValues(t, tt.want.baseURL, got.baseURL) + } + }) + } +} + func TestArchMage(t *testing.T) { maint := "http://download.suse.de/ibs/SUSE:/Maintenance:/1/" @@ -104,13 +133,15 @@ func TestArchMage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - repo := get.HTTPRepoConfig{ + repo := get.HTTPRepo{ URL: maint + sle15sp4Entry + tt.urlArch, Archs: []string{}, } - mockClient := createMockClient(maint, tt.validArchs, false) + client := BuildServiceClient{ + httpClient: createMockClient(maint, tt.validArchs, false), + } - err := ArchMage(mockClient, &repo) + err := client.archMage(&repo) assert.EqualValues(t, tt.wantErr, (err != nil)) assert.ElementsMatch(t, tt.validArchs, repo.Archs) }) @@ -123,14 +154,14 @@ func TestProcWebChunk(t *testing.T) { maint string product string validArchs []string - want []get.HTTPRepoConfig + want []get.HTTPRepo netWorkErr bool wantErr bool }{ { "Valid maint repo - single valid arch", "http://download.suse.de/ibs/SUSE:/Maintenance:/2/", "SUSE_SLE-15-SP4_Update/", []string{"aarch64"}, - []get.HTTPRepoConfig{ + []get.HTTPRepo{ { URL: "http://download.suse.de/ibs/SUSE:/Maintenance:/2/SUSE_SLE-15-SP4_Update/", Archs: []string{"aarch64"}, @@ -142,7 +173,7 @@ func TestProcWebChunk(t *testing.T) { { "Valid maint repo - multiple valid archs", "http://download.suse.de/ibs/SUSE:/Maintenance:/3/", "SUSE_SLE-15-SP4_Update/", []string{"x86_64", "aarch64", "ppc64le", "s390x"}, - []get.HTTPRepoConfig{ + []get.HTTPRepo{ { URL: "http://download.suse.de/ibs/SUSE:/Maintenance:/3/SUSE_SLE-15-SP4_Update/", Archs: []string{"x86_64", "aarch64", "ppc64le", "s390x"}, @@ -154,23 +185,25 @@ func TestProcWebChunk(t *testing.T) { { "Valid maint repo - no valid archs", "http://download.suse.de/ibs/SUSE:/Maintenance:/4/", "SUSE_SLE-15-SP4_Update/", []string{}, - []get.HTTPRepoConfig{}, + []get.HTTPRepo{}, false, true, }, { "Network error", "http://download.suse.de/ibs/SUSE:/Maintenance:/5/", "SUSE_SLE-15-SP4_Update/", []string{}, - []get.HTTPRepoConfig{}, + []get.HTTPRepo{}, true, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - client := createMockClient(tt.maint, tt.validArchs, tt.netWorkErr) + client := BuildServiceClient{ + httpClient: createMockClient(tt.maint, tt.validArchs, tt.netWorkErr), + } - got, err := ProcWebChunk(client, tt.product, tt.maint) + got, err := client.procWebChunk(tt.product, tt.maint) assert.EqualValues(t, tt.wantErr, (err != nil)) assert.Equal(t, len(tt.want), len(got)) for i := range tt.want { @@ -193,7 +226,7 @@ func TestGetProductsForMU(t *testing.T) { { "Chunk without 'SUSE' is discarded", "http://download.suse.de/ibs/SUSE:/Maintenance:/6/", - []string{"SUSE_SLE-15-SP4_Update/", "SUSE_SLE-15-SP6_Update/"}, + []string{"SUSE_SLE-15-SP4_Update/", "SUSE_SLE-15-SP6_Update/", "openSUSE_15.4_Update_standard/"}, false, }, { @@ -205,9 +238,11 @@ func TestGetProductsForMU(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - client := createMockClient(tt.mu, []string{}, tt.wantErr) + client := BuildServiceClient{ + httpClient: createMockClient(tt.mu, []string{}, tt.wantErr), + } - got, err := getProductsForMU(client, tt.mu) + got, err := client.getProductsForMU(tt.mu) assert.EqualValues(t, tt.wantErr, (err != nil)) assert.ElementsMatch(t, tt.want, got) }) @@ -219,14 +254,14 @@ func TestGetRepo(t *testing.T) { name string mu string validArchs []string - want []get.HTTPRepoConfig + want []get.HTTPRepo netWorkErr bool wantErr bool }{ { - "Single arch", "http://download.suse.de/ibs/SUSE:/Maintenance:/8/", + "Single arch", "8", []string{"x86_64"}, - []get.HTTPRepoConfig{ + []get.HTTPRepo{ { URL: "http://download.suse.de/ibs/SUSE:/Maintenance:/8/SUSE_SLE-15-SP4_Update/", Archs: []string{"x86_64"}, @@ -240,9 +275,9 @@ func TestGetRepo(t *testing.T) { false, }, { - "Multiple archs", "http://download.suse.de/ibs/SUSE:/Maintenance:/9/", + "Multiple archs", "9", []string{"x86_64", "aarch64", "ppc64le", "s390x"}, - []get.HTTPRepoConfig{ + []get.HTTPRepo{ { URL: "http://download.suse.de/ibs/SUSE:/Maintenance:/9/SUSE_SLE-15-SP4_Update/", Archs: []string{"x86_64", "aarch64", "ppc64le", "s390x"}, @@ -256,25 +291,28 @@ func TestGetRepo(t *testing.T) { false, }, { - "No available archs", "http://download.suse.de/ibs/SUSE:/Maintenance:/10/", + "No available archs", "10", []string{}, - []get.HTTPRepoConfig{}, + []get.HTTPRepo{}, false, true, }, { - "Network error", "http://download.suse.de/ibs/SUSE:/Maintenance:/11/", + "Network error", "11", []string{}, - []get.HTTPRepoConfig{}, + []get.HTTPRepo{}, true, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - client := createMockClient(tt.mu, tt.validArchs, tt.netWorkErr) + client := BuildServiceClient{ + downloadLink: downloadIBSLink, + httpClient: createMockClient(fmt.Sprintf("%s%s/", downloadIBSLink, tt.mu), tt.validArchs, tt.netWorkErr), + } - got, err := GetRepo(client, tt.mu) + got, err := client.GetRepo(tt.mu) assert.EqualValues(t, tt.wantErr, (err != nil)) assert.Equal(t, len(tt.want), len(got)) diff --git a/updates/obs_data.go b/maint/maintenance_update.go similarity index 75% rename from updates/obs_data.go rename to maint/maintenance_update.go index 7a5904a..14057ed 100644 --- a/updates/obs_data.go +++ b/maint/maintenance_update.go @@ -1,20 +1,17 @@ -package updates +package maint import ( - "net/http" - "net/url" -) + "regexp" -const ( - DownloadIbsLink = "http://download.suse.de/ibs/SUSE:/Maintenance:/" - baseUrl = "api.suse.de" + "github.com/uyuni-project/minima/get" ) -type Client struct { - BaseURL *url.URL - Username string - Password string - HttpClient *http.Client +type Updates struct { + IncidentNumber string + ReleaseRequest string + SRCRPMS []string + Products string + Repositories []get.HTTPRepo } type Person struct { @@ -117,3 +114,30 @@ type Patchinfo struct { Description string `xml:"description"` Summary string `xml:"summary"` } + +// this operates like an HashSet in other languages - we just care about unique keys, +// the empty struct is a dummy value for 0 allocation meant to be discarded +type stringSet map[string]struct{} + +// toIncidentNumberSet returns a set-like structure containing the incident numbers for the provided Updates +func toIncidentNumberSet(updates []Updates) stringSet { + incidentNumbers := make(stringSet) + for _, elem := range updates { + incidentNumbers[elem.IncidentNumber] = struct{}{} + } + return incidentNumbers +} + +// cleanWebChunks filters a slice of HTML elements and returns a slice containing only SUSE products +func cleanWebChunks(chunks []string) []string { + products := []string{} + regEx := regexp.MustCompile(`>((?:open)?SUSE[^<]+\/)<`) + + for _, chunk := range chunks { + matches := regEx.FindStringSubmatch(chunk) + if matches != nil { + products = append(products, matches[1]) + } + } + return products +} diff --git a/maint/maintenance_update_test.go b/maint/maintenance_update_test.go new file mode 100644 index 0000000..2a03d51 --- /dev/null +++ b/maint/maintenance_update_test.go @@ -0,0 +1,71 @@ +package maint + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestToIncidentNumberSet(t *testing.T) { + tests := []struct { + name string + ups []Updates + want stringSet + }{ + { + "No duplicates", + []Updates{{IncidentNumber: "1"}, {IncidentNumber: "2"}, {IncidentNumber: "3"}}, + map[string]struct{}{ + "1": {}, + "2": {}, + "3": {}, + }, + }, + { + "Duplicates", + []Updates{{IncidentNumber: "1"}, {IncidentNumber: "1"}, {IncidentNumber: "2"}, {IncidentNumber: "2"}, {IncidentNumber: "3"}}, + map[string]struct{}{ + "1": {}, + "2": {}, + "3": {}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := toIncidentNumberSet(tt.ups) + assert.Equal(t, len(tt.want), len(got)) + for k := range tt.want { + _, ok := got[k] + if !ok { + t.Errorf("%s not found in results", k) + } + } + }) + } +} + +func TestCleanWebChunks(t *testing.T) { + tests := []struct { + name string + chunks []string + want []string + }{ + { + "All valid", + []string{">SUSE_SLE-15-SP4_Update/<", ">SUSE_SLE-15-SP6_Pool/<", ">openSUSE_Leap-15.5/<"}, + []string{"SUSE_SLE-15-SP4_Update/", "SUSE_SLE-15-SP6_Pool/", "openSUSE_Leap-15.5/"}, + }, + { + "Some junk", + []string{"SUSE ", ">SUSE_SLE-15-SP4_Update/<", ">SUSE_SLE-15-SP6_Pool/<", ">openSUSE_Leap-15.5/<", "=\"SUSE_SLE-15-SP6_Pool\""}, + []string{"SUSE_SLE-15-SP4_Update/", "SUSE_SLE-15-SP6_Pool/", "openSUSE_Leap-15.5/"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := cleanWebChunks(tt.chunks) + assert.ElementsMatch(t, tt.want, got) + }) + } +} diff --git a/get/scc.go b/scc/scc.go similarity index 83% rename from get/scc.go rename to scc/scc.go index d3a12dd..13b3315 100644 --- a/get/scc.go +++ b/scc/scc.go @@ -1,4 +1,4 @@ -package get +package scc import ( "encoding/base64" @@ -8,27 +8,23 @@ import ( "net/http" "regexp" "strings" + + "github.com/uyuni-project/minima/get" ) // SCC defines the configuration to be used for downloading packages from SUSE Customer Center type SCC struct { Username string Password string - Repositories []SCCReposConfig + Repositories []SCCRepos } -// SCCRepoConfig defines the configuration of SCC repos sharing the same architectures -type SCCReposConfig struct { +// SCCRepos defines the names of SCC products/repos sharing the same architectures +type SCCRepos struct { Names []string Archs []string } -// HTTPRepoConfig defines the configuration of an HTTP repo -type HTTPRepoConfig struct { - URL string - Archs []string -} - // Repo represents the JSON entry for a repository as retuned by SCC API type Repo struct { URL string @@ -41,9 +37,9 @@ type Repo struct { type sccMap map[string][]string // SCCToHTTPConfigs returns HTTPS repos configurations (URL and archs) for repos in SCC -func SCCToHTTPConfigs(baseURL string, username string, password string, sccConfigs []SCCReposConfig) ([]HTTPRepoConfig, error) { +func SCCToHTTPConfigs(baseURL string, username string, password string, sccConfigs []SCCRepos) ([]get.HTTPRepo, error) { token := base64.URLEncoding.EncodeToString([]byte(username + ":" + password)) - httpConfigs := []HTTPRepoConfig{} + httpConfigs := []get.HTTPRepo{} // build a map of name - available archs entries to avoid repeated iterations // on sccConfigs when searching repos by name and archs @@ -90,9 +86,9 @@ func SCCToHTTPConfigs(baseURL string, username string, password string, sccConfi // getHTTPConfig attempts to match the given repo name and description to one of the given // sccMap entries and build a HTTRepoConfig for it. // -// Returns a HTTPRepoConfig and a bool indicating whether the match was successfull or not. -func getHTTPConfig(name, description, url string, sccEntries sccMap) (HTTPRepoConfig, bool) { - httpConfig := HTTPRepoConfig{ +// Returns a HTTPRepo and a bool indicating whether the match was successfull or not. +func getHTTPConfig(name, description, url string, sccEntries sccMap) (get.HTTPRepo, bool) { + httpConfig := get.HTTPRepo{ Archs: []string{}, } @@ -125,7 +121,10 @@ func downloadPaged(url string, token string) (page []byte, next string, err erro } if resp.StatusCode != 200 { - err = &UnexpectedStatusCodeError{url, resp.StatusCode} + err = &get.UnexpectedStatusCodeError{ + URL: url, + StatusCode: resp.StatusCode, + } return } diff --git a/get/scc_test.go b/scc/scc_test.go similarity index 84% rename from get/scc_test.go rename to scc/scc_test.go index e66e8d9..e1d764c 100644 --- a/get/scc_test.go +++ b/scc/scc_test.go @@ -1,25 +1,30 @@ -package get +package scc import ( "encoding/base64" "fmt" "net/http" + "net/http/httptest" "testing" "github.com/stretchr/testify/assert" + "github.com/uyuni-project/minima/get" ) func TestSCCToHTTPConfigs(t *testing.T) { expectedToken := base64.URLEncoding.EncodeToString([]byte("user:pass")) expectedAuth := "Basic " + expectedToken + server := httptest.NewServer(http.DefaultServeMux) + defer server.Close() + http.HandleFunc("/connect/organizations/repositories", func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("Authorization") != expectedAuth { w.WriteHeader(401) return } - w.Header().Set("Link", "; rel=\"next\"") + w.Header().Set("Link", fmt.Sprintf("<%s/connect/organizations/repositories2>; rel=\"next\"", server.URL)) fmt.Fprintf(w, "[{\"url\" : \"http://whatever/SLES15-SP5-Pool\", \"name\" : \"SLES15-SP5-Pool\", \"description\" : \"x86_64 aarch64 i586\"}]") }) @@ -34,19 +39,19 @@ func TestSCCToHTTPConfigs(t *testing.T) { pass string names []string archs []string - want []HTTPRepoConfig + want []get.HTTPRepo wantErr bool }{ { "One name and no matching arch", "user", "pass", []string{"SLES15-SP5-Pool"}, []string{"s390x"}, - []HTTPRepoConfig{}, + []get.HTTPRepo{}, false, }, { "One name and one matching arch", "user", "pass", []string{"SLES15-SP5-Pool"}, []string{"x86_64"}, - []HTTPRepoConfig{ + []get.HTTPRepo{ {URL: "http://whatever/SLES15-SP5-Pool", Archs: []string{"x86_64"}}, }, false, @@ -54,7 +59,7 @@ func TestSCCToHTTPConfigs(t *testing.T) { { "One name and multiple matching archs", "user", "pass", []string{"SLES15-SP5-Pool"}, []string{"aarch64", "i586"}, - []HTTPRepoConfig{ + []get.HTTPRepo{ {URL: "http://whatever/SLES15-SP5-Pool", Archs: []string{"aarch64", "i586"}}, }, false, @@ -62,13 +67,13 @@ func TestSCCToHTTPConfigs(t *testing.T) { { "Multiple names and no matching archs", "user", "pass", []string{"SLES15-SP5-Pool", "SLES15-SP5-Updates"}, []string{"src"}, - []HTTPRepoConfig{}, + []get.HTTPRepo{}, false, }, { "Multiple names and multiple matching archs", "user", "pass", []string{"SLES15-SP5-Pool", "SLES15-SP5-Updates"}, []string{"x86_64", "aarch64"}, - []HTTPRepoConfig{ + []get.HTTPRepo{ {URL: "http://whatever/SLES15-SP5-Pool", Archs: []string{"x86_64", "aarch64"}}, {URL: "http://whatever/SLES15-SP5-Updates", Archs: []string{"x86_64", "aarch64"}}, }, @@ -77,25 +82,26 @@ func TestSCCToHTTPConfigs(t *testing.T) { { "Invalid user", "thiswillfail", "pass", []string{"SLES15-SP5-Pool"}, []string{"x86_64"}, - []HTTPRepoConfig{}, + []get.HTTPRepo{}, true, }, { "Invalid password", "user", "thiswillfail", []string{"SLES15-SP5-Pool"}, []string{"x86_64"}, - []HTTPRepoConfig{}, + []get.HTTPRepo{}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - httpConfigs, err := SCCToHTTPConfigs("http://localhost:8080", tt.user, tt.pass, []SCCReposConfig{ + httpConfigs, err := SCCToHTTPConfigs(server.URL, tt.user, tt.pass, []SCCRepos{ { Names: tt.names, Archs: tt.archs, }, }) + fmt.Println(err) assert.EqualValues(t, tt.wantErr, (err != nil)) assert.Equal(t, len(tt.want), len(httpConfigs)) diff --git a/get/filestorage.go b/storage/filestorage.go similarity index 85% rename from get/filestorage.go rename to storage/filestorage.go index 4b5b7d9..073d78c 100644 --- a/get/filestorage.go +++ b/storage/filestorage.go @@ -1,9 +1,8 @@ -package get +package storage import ( "crypto" "io" - "log" "os" "path" @@ -22,25 +21,22 @@ func NewFileStorage(directory string) Storage { // NewReader returns a Reader for a file in a location, returns ErrFileNotFound // if the requested path was not found at all -func (s *FileStorage) NewReader(filename string, location Location) (reader io.ReadCloser, err error) { - var prefix string - if location == Permanent { - prefix = "" - } else { - prefix = "-in-progress" +func (s *FileStorage) NewReader(filename string, location Location) (io.ReadCloser, error) { + var suffix string + if location != Permanent { + suffix = "-in-progress" } - fullPath := path.Join(s.directory+prefix, filename) + + fullPath := path.Join(s.directory+suffix, filename) stat, err := os.Stat(fullPath) if os.IsNotExist(err) || stat == nil { - err = ErrFileNotFound - return + return nil, ErrFileNotFound } f, err := os.Open(fullPath) if err != nil { - log.Fatal(err) + return nil, err } - return f, err } @@ -82,17 +78,21 @@ func (s *FileStorage) Recycle(filename string) (err error) { // Commit moves any temporary file accumulated so far to the permanent location func (s *FileStorage) Commit() (err error) { + // remove previous tmp backups err = os.RemoveAll(s.directory + "-old") if err != nil { return } + // tmp backup in case something goes wrong err = os.Rename(s.directory, s.directory+"-old") if err != nil && !os.IsNotExist(err) { return } + // move from in-progress to the final repo err = os.Rename(s.directory+"-in-progress", s.directory) if err != nil { return } + // cleanup tmp backup return os.RemoveAll(s.directory + "-old") } diff --git a/storage/filestorage_test.go b/storage/filestorage_test.go new file mode 100644 index 0000000..66b6067 --- /dev/null +++ b/storage/filestorage_test.go @@ -0,0 +1,41 @@ +package storage + +import ( + "io" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + testdataDir = "testdata/testrepo" + inProgressDir = "testdata/testrepo-in-progress" + testFile = "test.txt" +) + +func TestNewReader(t *testing.T) { + tests := []struct { + name string + directory string + filename string + location Location + wantErr bool + }{ + {"Permanent location", testdataDir, testFile, Permanent, false}, + {"Temporary location", testdataDir, testFile, Temporary, false}, + {"Not existing file", testdataDir, "does-not-exist.txt", Permanent, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + storage := NewFileStorage(tt.directory) + + r, err := storage.NewReader(tt.filename, tt.location) + assert.EqualValues(t, tt.wantErr, (err != nil)) + if r != nil { + content, err := io.ReadAll(r) + assert.Nil(t, err) + assert.EqualValues(t, "Hello World", string(content)) + } + }) + } +} diff --git a/get/s3storage.go b/storage/s3storage.go similarity index 98% rename from get/s3storage.go rename to storage/s3storage.go index db8e031..80e8e7f 100644 --- a/get/s3storage.go +++ b/storage/s3storage.go @@ -1,4 +1,4 @@ -package get +package storage import ( "crypto" @@ -64,7 +64,7 @@ func configureBucket(region string, bucket string, svc *s3.S3) error { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeBucketAlreadyExists: - return errors.New("Bucket name already taken by another AWS user, please use a different name") + return errors.New("bucket name already taken by another AWS user, please use a different name") case s3.ErrCodeBucketAlreadyOwnedByYou: return nil default: diff --git a/get/storage.go b/storage/storage.go similarity index 66% rename from get/storage.go rename to storage/storage.go index efdd9a5..b1f6e5d 100644 --- a/get/storage.go +++ b/storage/storage.go @@ -1,9 +1,11 @@ -package get +package storage import ( "crypto" "errors" "io" + "net/url" + "path/filepath" "github.com/uyuni-project/minima/util" ) @@ -47,4 +49,21 @@ type Storage interface { } // ErrFileNotFound signals that the requested file was not found -var ErrFileNotFound = errors.New("File not found") +var ErrFileNotFound = errors.New("file not found") + +// ErrInvalidStorageType signals that the storage type is not supported +var ErrInvalidStorageType = errors.New("invalid storage type") + +// StorageFromConfig returns the Storage implementation defined in the .yaml configuration +// +// Returns an error if the storage type is not supported or it was not possible to initialize it correctly +func FromConfig(config StorageConfig, repoURL *url.URL) (Storage, error) { + switch config.Type { + case "file": + return NewFileStorage(filepath.Join(config.Path, filepath.FromSlash(repoURL.Path))), nil + case "s3": + return NewS3Storage(config.AccessKeyID, config.AccessKeyID, config.Region, config.Bucket+repoURL.Path) + default: + return nil, ErrInvalidStorageType + } +} diff --git a/storage/storage_test.go b/storage/storage_test.go new file mode 100644 index 0000000..120f1f7 --- /dev/null +++ b/storage/storage_test.go @@ -0,0 +1,54 @@ +package storage + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFromConfig(t *testing.T) { + tests := []struct { + name string + config StorageConfig + repoURL string + want Storage + wantErr bool + }{ + { + "Invalid Storage type", + StorageConfig{ + Type: "memory", + Path: "/srv/mirror/", + }, + "http://test/some-product/", + nil, + true, + }, + { + "File Storage", + StorageConfig{ + Type: "file", + Path: "/srv/mirror/", + }, + "http://test/some-product/", + &FileStorage{ + directory: "/srv/mirror/some-product", + }, + false, + }, + // TODO: S3 Storage + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repoURL, err := url.Parse(tt.repoURL) + if err != nil { + t.Fatalf("Failed to parse repo URL %s: %v", tt.repoURL, err) + } + + got, err := FromConfig(tt.config, repoURL) + assert.EqualValues(t, tt.wantErr, (err != nil)) + assert.EqualValues(t, tt.want, got) + }) + } +} diff --git a/storage/testdata/testrepo-in-progress/test.txt b/storage/testdata/testrepo-in-progress/test.txt new file mode 100644 index 0000000..5e1c309 --- /dev/null +++ b/storage/testdata/testrepo-in-progress/test.txt @@ -0,0 +1 @@ +Hello World \ No newline at end of file diff --git a/storage/testdata/testrepo/test.txt b/storage/testdata/testrepo/test.txt new file mode 100644 index 0000000..5e1c309 --- /dev/null +++ b/storage/testdata/testrepo/test.txt @@ -0,0 +1 @@ +Hello World \ No newline at end of file diff --git a/updates/obs.go b/updates/obs.go deleted file mode 100644 index d1f5fd5..0000000 --- a/updates/obs.go +++ /dev/null @@ -1,98 +0,0 @@ -// Lib's Co-Author: Felix Gerling, fgerling@suse.com, https://github.com/fgerling -package updates - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" -) - -type OBS struct { - Username string - Password string -} - -func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) { - rel := &url.URL{Path: path} - u := c.BaseURL.ResolveReference(rel) - var buf io.ReadWriter - if body != nil { - buf = new(bytes.Buffer) - err := xml.NewEncoder(buf).Encode(body) - if err != nil { - return nil, err - } - } - req, err := http.NewRequest(method, u.String(), buf) - if err != nil { - return nil, err - } - req.SetBasicAuth(c.Username, c.Password) - if body != nil { - req.Header.Set("Content-Type", "application/xml") - } - req.Header.Set("Accept", "application/xml") - return req, nil -} - -func (c *Client) GetReleaseRequests(group string, state string) ([]ReleaseRequest, error) { - req, err := c.NewRequest("GET", "/request", nil) - if err != nil { - return nil, err - } - q := req.URL.Query() - q.Add("view", "collection") - q.Add("group", group) - q.Add("states", state) - req.URL.RawQuery = q.Encode() - - var collection Collection - _, err = c.do(req, &collection) - return collection.ReleaseRequests, err -} - -func (c *Client) GetPatchinfo(rr ReleaseRequest) (*Patchinfo, error) { - project := rr.Actions[0].Source.Project - patchinfo_url := fmt.Sprintf("/source/%v/patchinfo/_patchinfo", project) - req, err := c.NewRequest("GET", patchinfo_url, nil) - if err != nil { - return nil, err - } - var patchinfo Patchinfo - _, err = c.do(req, &patchinfo) - return &patchinfo, err -} - -func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { - resp, err := c.HttpClient.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return nil, fmt.Errorf("got status code: %v for %q", resp.StatusCode, req.URL) - } - defer resp.Body.Close() - err = xml.NewDecoder(resp.Body).Decode(v) - return resp, err -} - -func NewClient(username string, password string) *Client { - return &Client{ - BaseURL: &url.URL{Host: baseUrl, Scheme: "https"}, - Username: username, - Password: password, - HttpClient: &http.Client{}, - } -} - -func CheckWebPageExists(client *http.Client, repoURL string) (bool, error) { - resp, err := client.Head(repoURL) - if err != nil { - return false, err - } - - return resp.Status == "200 OK", nil -} diff --git a/util/debian.go b/util/debian.go index b09fea7..becd3a3 100644 --- a/util/debian.go +++ b/util/debian.go @@ -1,60 +1,60 @@ package util import ( - "bufio" - "fmt" - "io" - "strings" + "bufio" + "fmt" + "io" + "strings" ) func ProcessPropertiesFile(reader io.Reader) (entries []map[string]string, err error) { - entries = make([]map[string]string, 0) - currentEntry := make(map[string]string) - var key, value string + entries = make([]map[string]string, 0) + currentEntry := make(map[string]string) + var key, value string - scanner := bufio.NewScanner(reader) + scanner := bufio.NewScanner(reader) - for scanner.Scan() { - line := scanner.Text() - if len(line) == 0 { - // Empty line means end of previous entry, create a new one - if key != "" { - currentEntry[key] = value - key = "" - value = "" - } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 { + // Empty line means end of previous entry, create a new one + if key != "" { + currentEntry[key] = value + key = "" + value = "" + } - entries = append(entries, currentEntry) - currentEntry = make(map[string]string) - } else { - if line[0] != ' ' { - // Line with no space: parse the key and value - if key != "" { - currentEntry[key] = value - key = "" - value = "" - } + entries = append(entries, currentEntry) + currentEntry = make(map[string]string) + } else { + if line[0] != ' ' { + // Line with no space: parse the key and value + if key != "" { + currentEntry[key] = value + key = "" + value = "" + } - parts := strings.SplitN(line, ":", 2) - if len(parts) != 2 { - err = fmt.Errorf("Invalid line: '%s'", line) - return - } - key = strings.Trim(parts[0], " ") - value = strings.Trim(parts[1], " ") - } else { - // Line with space: append to current value - value = strings.Trim(value + "\n" + strings.Trim(line, " "), "\n") - } - } - } - // Ensure we have the last key/value pair and entry - if key != "" { - currentEntry[key] = value - } + parts := strings.SplitN(line, ":", 2) + if len(parts) != 2 { + err = fmt.Errorf("Invalid line: '%s'", line) + return + } + key = strings.Trim(parts[0], " ") + value = strings.Trim(parts[1], " ") + } else { + // Line with space: append to current value + value = strings.Trim(value+"\n"+strings.Trim(line, " "), "\n") + } + } + } + // Ensure we have the last key/value pair and entry + if key != "" { + currentEntry[key] = value + } - if len(currentEntry) > 0 { - entries = append(entries, currentEntry) - } - return + if len(currentEntry) > 0 { + entries = append(entries, currentEntry) + } + return } diff --git a/util/debian_test.go b/util/debian_test.go index f5a0e8f..4e267ed 100644 --- a/util/debian_test.go +++ b/util/debian_test.go @@ -1,13 +1,13 @@ package util import ( - "bytes" - "testing" - "github.com/stretchr/testify/assert" + "bytes" + "github.com/stretchr/testify/assert" + "testing" ) func TestProcessPropertiesFile(t *testing.T) { - data := []byte(`item1.key1: value1 + data := []byte(`item1.key1: value1 item1.key2: value2 item1.key3: value3.1 value3.2 @@ -19,26 +19,26 @@ item2.key3: value3.1 value3.2`) - expected := []map[string]string{ - { - "item1.key1": "value1", - "item1.key2": "value2", - "item1.key3": "value3.1\nvalue3.2\nvalue3.3", - }, - { - "item2.key1": "value1", - "item2.key2": "value2", - "item2.key3": "value3.1\nvalue3.2", - }, - } + expected := []map[string]string{ + { + "item1.key1": "value1", + "item1.key2": "value2", + "item1.key3": "value3.1\nvalue3.2\nvalue3.3", + }, + { + "item2.key1": "value1", + "item2.key2": "value2", + "item2.key3": "value3.1\nvalue3.2", + }, + } - actual, err := ProcessPropertiesFile(bytes.NewReader(data)) - assert.EqualValues(t, expected, actual) - assert.Nil(t, err) + actual, err := ProcessPropertiesFile(bytes.NewReader(data)) + assert.EqualValues(t, expected, actual) + assert.Nil(t, err) - badData := []byte(`item1.key1: value1 + badData := []byte(`item1.key1: value1 bad line`) - actual, err = ProcessPropertiesFile(bytes.NewReader(badData)) - assert.Error(t, err) + actual, err = ProcessPropertiesFile(bytes.NewReader(badData)) + assert.Error(t, err) } diff --git a/util/io.go b/util/io.go index e3c4da5..48fddb0 100644 --- a/util/io.go +++ b/util/io.go @@ -6,7 +6,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" ) // ReaderConsumer consumes bytes from a Reader @@ -76,7 +75,7 @@ var discardBuffer = make([]byte, 4*1024*1024) // Close closes the internal reader and writer func (t *TeeReadCloser) Close() (err error) { // read any remaining bytes from the teeReader (discarding them) - _, err = io.CopyBuffer(ioutil.Discard, t.teeReader, discardBuffer) + _, err = io.CopyBuffer(io.Discard, t.teeReader, discardBuffer) if err != nil { t.reader.Close() t.writer.Close()