diff --git a/.github/ISSUE_TEMPLATE/download-or-update-issue.md b/.github/ISSUE_TEMPLATE/download-or-update-issue.md index c6bf66d..2c2b6f9 100644 --- a/.github/ISSUE_TEMPLATE/download-or-update-issue.md +++ b/.github/ISSUE_TEMPLATE/download-or-update-issue.md @@ -9,8 +9,8 @@ assignees: '' **What project were you trying to use? (e.g. Blender, Arch Linux, Gentoo):** -**What failed? (e.g. ISO download failed, `apt upgrade` failed, project is out of sync, Blender download failed):** +**What failed? (e.g. ISO download failed, `apt upgrade` failed, project is out of sync):** -**When did you first experience the issue:** +**Are you an official project maintainer?:** -**Please provide any other information:** +Feel free to add any other details you think are relevant :smile: diff --git a/.github/ISSUE_TEMPLATE/mirror-request.md b/.github/ISSUE_TEMPLATE/mirror-request.md index 69ebf2a..287e441 100644 --- a/.github/ISSUE_TEMPLATE/mirror-request.md +++ b/.github/ISSUE_TEMPLATE/mirror-request.md @@ -7,10 +7,8 @@ assignees: '' --- -## Include full project name, a short name, and the home page url +## Include full project name, a short name, and the home page url. -## Are you an official maintainer of this project? How would we become an "Official" mirror +## Please provide documentation on how to mirror the project -## Is there a link to documentation on how to mirror the project - -## (Optional) Around how much disk space is required to host the project +## Are you an official maintainer of this project? diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 7f7e988..c918b58 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -2,24 +2,58 @@ name: Go on: push: - branches: [ master ] + branches: [ "main" ] pull_request: - branches: [ master ] + branches: [ "main" ] jobs: - build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: - go-version: 1.18 + go-version: '1.21' - name: Build run: go build -v ./... + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.21' + + - name: Verify modfile + run: go mod verify + + - name: Vet + run: go vet ./... + + - name: Install golint + run: go install golang.org/x/lint/golint@latest + + - name: Lint + run: golint -set_exit_status ./... + + - name: Check code formatting using gofmt + uses: Jerome1337/gofmt-action@v1.0.5 + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.21' + - name: Test - run: go test -v ./... + run: go test -v ./... \ No newline at end of file diff --git a/README.md b/README.md index 83d2443..65db21a 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # Mirror -Monolithic software for our [mirror](https://mirror.clarkson.edu) that handles the website, tracking, and scheduling systems. We use an influxdb time series database for storage. +Monolithic software for our [mirror](https://mirror.clarkson.edu) that handles the website, tracking, and scheduling systems. We use an influxdb time series database for storage. ![preview](./preview.png) ## Setup -``` +```cli git clone --recurse_submodule git@github.com:COSI-Lab/Mirror.git ``` @@ -15,17 +15,8 @@ git clone --recurse_submodule git@github.com:COSI-Lab/Mirror.git Secrets and some configuration is managed through creating a `.env` file. ```text -# "adm" group id. check with "getent group admin" -# the user running this script should be in the "adm" group -# so that they can read and write log files. -ADM_GROUP= - -# Discord Webhook URL and id to ping when things panic -# Omit either and the bot will not communicate with discord -HOOK_URL= -PING_ID= - -# Maxmind DB token to update the database, omit and we'll only use a local copy if it exists +# Maxmind DB token to update the database. Omit and we'll only use a local copy if it exists +# Note: The maxmind DB license requires we use an up-to-date copy MAXMIND_LICENSE_KEY= # InfluxDB Token @@ -54,9 +45,6 @@ SYNC_DRY_RUN=true # Directory to store the rsync log files, if empty then we don't keep logs. It will be created if it doesn't exist. RSYNC_LOGS= -# "true" if we should cache the result of executing templates -WEB_SERVER_CACHE=false - # Secret pull token PULL_TOKEN=token ``` diff --git a/_typos.toml b/_typos.toml index b552116..7fbfb14 100644 --- a/_typos.toml +++ b/_typos.toml @@ -1,5 +1,6 @@ [default.extend-words] templeos = "templeos" +WRONLY = "WRONLY" [files] extend-exclude = ["scripts"] diff --git a/aggregator.go b/aggregator.go new file mode 100644 index 0000000..fadd1d4 --- /dev/null +++ b/aggregator.go @@ -0,0 +1,50 @@ +package main + +import ( + "time" + + "github.com/influxdata/influxdb-client-go/v2/api" +) + +// Aggregator is an interface for aggregating a metric `T` +type Aggregator[T any] interface { + // Initialize the aggregator with a starting value from influxdb + Init(reader api.QueryAPI) (lastUpdated time.Time, err error) + + // Aggregate adds metric T into the aggregator + Aggregate(entry T) + + // Send the aggregated statistics to influxdb + Send(writer api.WriteAPI) +} + +// StartAggregator starts the aggregator with the given Aggregator implementation, channel of type T, influxdb QueryAPI and WriteAPI. +// It returns the lastUpdated time and an error if any occurred during initialization. +func StartAggregator[T any](aggregator Aggregator[T], c <-chan T, reader api.QueryAPI, writer api.WriteAPI) (lastUpdated time.Time, err error) { + lastUpdated, err = aggregator.Init(reader) + if err != nil { + return lastUpdated, err + } + + go func() { + ticker := time.NewTicker(time.Minute) + + for { + select { + case <-ticker.C: + aggregator.Send(writer) + case entry := <-c: + aggregator.Aggregate(entry) + } + } + }() + + return lastUpdated, nil +} + +// NetStat is a commonly used struct for aggregating network statistics +type NetStat struct { + BytesSent int64 + BytesRecv int64 + Requests int64 +} diff --git a/aggregator_nginx.go b/aggregator_nginx.go new file mode 100644 index 0000000..8011dcc --- /dev/null +++ b/aggregator_nginx.go @@ -0,0 +1,371 @@ +package main + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "log" + "net" + "net/url" + "os" + "path" + "regexp" + "strconv" + "strings" + "time" + + "github.com/COSI-Lab/Mirror/logging" + "github.com/IncSW/geoip2" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api" + "github.com/nxadm/tail" +) + +// It is critical that NGINX uses the following log format: +/* + * log_format config '"$time_local" "$remote_addr" "$request" "$status" "$body_bytes_sent" "$request_length" "$http_user_agent"'; + * access_log /var/log/nginx/access.log config; + */ + +// ProjectStatistics is a shorthand for map[string]*NetStat +type ProjectStatistics map[string]*NetStat + +// NGINXProjectAggregator measures the popularity of each project (bytes sent, bytes received, and number of requests) +// +// It is given a stream of NGINXLogEntries and aggregates the statistics for each project +type NGINXProjectAggregator struct { + stats map[string]ProjectStatistics + + // filter function + filters map[string]func(NGINXLogEntry) bool +} + +// NewNGINXProjectAggregator creates a new NGINXProjectAggregator with no measurements +func NewNGINXProjectAggregator() *NGINXProjectAggregator { + return &NGINXProjectAggregator{ + stats: map[string]ProjectStatistics{}, + filters: map[string]func(NGINXLogEntry) bool{}, + } +} + +// AddMeasurement adds a measurement to the aggregator +// measurement is the name of the measurement in influxdb (e.g. "nginx" or "clarkson") +// filter is a function that checks if an entry should be counted for this measurement +func (aggregator *NGINXProjectAggregator) AddMeasurement(measurement string, filter func(NGINXLogEntry) bool) { + aggregator.stats[measurement] = make(ProjectStatistics) + aggregator.filters[measurement] = filter +} + +// Init initializes the aggregator by querying the database for the latest statistics +func (aggregator *NGINXProjectAggregator) Init(reader api.QueryAPI) (lastUpdated time.Time, err error) { + for measurement := range aggregator.filters { + // You can paste this into the influxdb data explorer + // Replace MEASUREMENT with "nginx" or "clarkson" + /* + from(bucket: "stats") + |> range(start: 0, stop: now()) + |> filter(fn: (r) => r["_measurement"] == "MEASUREMENT") + |> filter(fn: (r) => r["_field"] == "bytes_sent" or r["_field"] == "bytes_recv" or r["_field"] == "requests") + |> last() + |> group(columns: ["Project"], mode: "by") + */ + request := fmt.Sprintf("from(bucket: \"stats\") |> range(start: 0, stop: now()) |> filter(fn: (r) => r[\"_measurement\"] == \"%s\") |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\" or r[\"_field\"] == \"bytes_recv\" or r[\"_field\"] == \"requests\") |> last() |> group(columns: [\"Project\"], mode: \"by\")", measurement) + + // try the query at most 5 times + var result *api.QueryTableResult + for i := 0; i < 5; i++ { + result, err = reader.Query(context.Background(), request) + + if err != nil { + logging.Warn("Failed to querying influxdb nginx statistics", err) + time.Sleep(time.Second) + continue + } + + break + } + + if err != nil { + return lastUpdated, errors.New("error querying influxdb") + } + + stats := make(ProjectStatistics) + + for result.Next() { + if result.Err() != nil { + logging.Warn("QueryProjectStatistics Flux Query Error", result.Err()) + continue + } + + dp := result.Record() + lastUpdated = dp.Time() + + // Get the Project short name + Project, ok := dp.ValueByKey("Project").(string) + if !ok { + logging.Warn("Error getting Project short name") + fmt.Printf("%T %v\n", Project, Project) + continue + } + + // Create a new NetStat for the project if it doesn't exist + if _, ok := stats[Project]; !ok { + stats[Project] = &NetStat{} + } + + field, ok := dp.ValueByKey("_field").(string) + if !ok { + logging.Warn("Error getting field") + fmt.Printf("%T %v\n", field, field) + continue + } + + switch field { + case "bytes_sent": + sent, ok := dp.ValueByKey("_value").(int64) + if !ok { + logging.Warn("Error getting bytes sent") + fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) + continue + } + stats[Project].BytesSent = sent + case "bytes_recv": + received, ok := dp.ValueByKey("_value").(int64) + if !ok { + logging.Warn("Error getting bytes recv") + fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) + continue + } + stats[Project].BytesRecv = received + case "requests": + requests, ok := dp.ValueByKey("_value").(int64) + if !ok { + logging.Warn("Error getting requests") + fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) + continue + } + stats[Project].Requests = requests + } + } + result.Close() + + // Add "other" and "total" to the stats if they don't exist + if _, ok := stats["other"]; !ok { + stats["other"] = &NetStat{} + } + + if _, ok := stats["total"]; !ok { + stats["total"] = &NetStat{} + } + } + + return lastUpdated, nil +} + +// Aggregate adds a NGINXLogEntry to the aggregator +func (aggregator *NGINXProjectAggregator) Aggregate(entry NGINXLogEntry) { + for measurement, filter := range aggregator.filters { + if !filter(entry) { + return + } + + stat := aggregator.stats[measurement] + + if _, ok := aggregator.stats[entry.Project]; ok { + stat[entry.Project].BytesSent += entry.BytesSent + stat[entry.Project].BytesRecv += entry.BytesRecv + stat[entry.Project].Requests++ + } else { + stat["other"].BytesSent += entry.BytesSent + stat["other"].BytesRecv += entry.BytesRecv + stat["other"].Requests++ + } + + stat["total"].BytesSent += entry.BytesSent + stat["total"].BytesRecv += entry.BytesRecv + stat["total"].Requests++ + } +} + +func (aggregator *NGINXProjectAggregator) Send(writer api.WriteAPI) { + t := time.Now() + + for measurement, stats := range aggregator.stats { + for short, stat := range stats { + p := influxdb2.NewPoint(measurement, + map[string]string{"Project": short}, + map[string]interface{}{ + "bytes_sent": stat.BytesSent, + "bytes_recv": stat.BytesRecv, + "requests": stat.Requests, + }, t) + writer.WritePoint(p) + } + } +} + +// NGINXLogEntry is a struct that represents a parsed nginx log entry +type NGINXLogEntry struct { + IP net.IP + City *geoip2.CityResult + Time time.Time + Method string + Project string + Url string + Version string + Status int + BytesSent int64 + BytesRecv int64 + Agent string +} + +var reQuotes = regexp.MustCompile(`"(.*?)"`) + +// TailNGINXLogFile tails a log file and sends the parsed log entries to the specified channels +func TailNGINXLogFile(logFile string, lastUpdated time.Time, channels []chan<- NGINXLogEntry) { + start := time.Now() + + f, err := os.Open(logFile) + if err != nil { + logging.Error(err) + return + } + + // Preforms a linear scan of the log file to find the offset to continue tailing from + offset := int64(0) + s := bufio.NewScanner(f) + for s.Scan() { + tm, err := parseNginxDate(s.Text()) + if err == nil && tm.After(lastUpdated) { + break + } + offset += int64(len(s.Text()) + 1) + } + logging.Info("Found nginx log offset in", time.Since(start)) + + // Tail the log file `tail -F` starting at the offset + seek := tail.SeekInfo{ + Offset: offset, + Whence: io.SeekStart, + } + tail, err := tail.TailFile(logFile, tail.Config{Follow: true, ReOpen: true, MustExist: true, Location: &seek}) + if err != nil { + logging.Error("Failed to start tailing `nginx.log`:", err) + return + } + + logging.Success("Tailing nginx log file") + + // Parse each line as we receive it + for line := range tail.Lines { + entry, err := parseNginxLine(line.Text) + + if err == nil { + for ch := range channels { + channels[ch] <- entry + } + } + } +} + +// parseNginxDate parses a single line of the nginx log file and returns the time.Time of the line +func parseNginxDate(line string) (time.Time, error) { + return time.Parse("\"02/Jan/2006:15:04:05 -0700\"", reQuotes.FindString(line)) +} + +// parseNginxLine parses a single line of the nginx log file +// It's critical the log file uses the correct format found at the top of this file +// If the log file is not in the correct format or if some other part of the parsing fails +// this function will return an error +func parseNginxLine(line string) (entry NGINXLogEntry, err error) { + // "$time_local" "$remote_addr" "$request" "$status" "$body_bytes_sent" "$request_length" "$http_user_agent"; + quoteList := reQuotes.FindAllString(line, -1) + + if len(quoteList) != 7 { + return NGINXLogEntry{}, errors.New("invalid number of parameters in log entry") + } + + // Trim quotation marks + for i := 0; i < len(quoteList); i++ { + quoteList[i] = quoteList[i][1 : len(quoteList[i])-1] + } + + // Time + entry.Time, err = time.Parse("02/Jan/2006:15:04:05 -0700", quoteList[0]) + if err != nil { + return entry, err + } + + // IPv4 or IPv6 address + entry.IP = net.ParseIP(quoteList[1]) + if entry.IP == nil { + return entry, errors.New("failed to parse ip") + } + + // Optional GeoIP lookup + if geoipHandler != nil { + city, err := geoipHandler.Lookup(entry.IP) + if err != nil { + entry.City = nil + } else { + entry.City = city + } + } else { + entry.City = nil + } + + // method url http version + split := strings.Split(quoteList[2], " ") + if len(split) != 3 { + // this should never fail + return entry, errors.New("invalid number of strings in request") + } + entry.Method = split[0] + entry.Url = split[1] + entry.Version = split[2] + + // Project is the top part of the URL path + u, err := url.Parse(entry.Url) + if err != nil { + log.Fatal(err) + } + // Parse the path + path := path.Clean(u.EscapedPath()) + // Return the first part of the path + if pathSplit := strings.Split(path, "/"); len(pathSplit) > 1 { + project := pathSplit[1] + entry.Project = project + } else { + return entry, errors.New("could not parse project name") + } + + // HTTP response status + status, err := strconv.Atoi(quoteList[3]) + if err != nil { + // this should never fail + return entry, errors.New("could not parse http response status") + } + entry.Status = status + + // Bytes sent int64 + bytesSent, err := strconv.ParseInt(quoteList[4], 10, 64) + if err != nil { + // this should never fail + return entry, errors.New("could not parse bytes_sent") + } + entry.BytesSent = bytesSent + + // Bytes received + bytesRecv, err := strconv.ParseInt(quoteList[5], 10, 64) + if err != nil { + return entry, errors.New("could not parse bytes_recv") + } + entry.BytesRecv = bytesRecv + + // User agent + entry.Agent = quoteList[6] + + return entry, nil +} diff --git a/aggregator_rsyncd.go b/aggregator_rsyncd.go new file mode 100644 index 0000000..3f95212 --- /dev/null +++ b/aggregator_rsyncd.go @@ -0,0 +1,246 @@ +package main + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/COSI-Lab/Mirror/logging" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api" + "github.com/nxadm/tail" +) + +type RsyncdAggregator struct { + stat NetStat +} + +func NewRSYNCProjectAggregator() *RsyncdAggregator { + return &RsyncdAggregator{} +} + +func (a *RsyncdAggregator) Init(reader api.QueryAPI) (lastUpdated time.Time, err error) { + // You can paste this into the influxdb data explorer + /* + from(bucket: "stats") + |> range(start: 0, stop: now()) + |> filter(fn: (r) => r["_measurement"] == "rsyncd") + |> filter(fn: (r) => r["_field"] == "bytes_sent" or r["_field"] == "bytes_recv" or r["_field"] == "requests") + |> last() + */ + const request = "from(bucket: \"stats\") |> range(start: 0, stop: now()) |> filter(fn: (r) => r[\"_measurement\"] == \"rsyncd\") |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\" or r[\"_field\"] == \"bytes_recv\") |> last()" + + // try the query at most 5 times + var result *api.QueryTableResult + for i := 0; i < 5; i++ { + result, err = reader.Query(context.Background(), request) + + if err != nil { + logging.Warn("Failed to querying influxdb rsyncd statistics", err) + time.Sleep(time.Second) + continue + } + + break + } + + if result == nil { + return time.Time{}, errors.New("Error querying influxdb for rsyncd stat") + } + + for result.Next() { + if result.Err() == nil { + // Get the data point + dp := result.Record() + lastUpdated = dp.Time() + + // Get the field + field, ok := dp.ValueByKey("_field").(string) + if !ok { + logging.Warn("Error getting field") + fmt.Printf("%T %v\n", field, field) + continue + } + + // Switch on the field + switch field { + case "bytes_sent": + sent, ok := dp.ValueByKey("_value").(int64) + if !ok { + logging.Warn("Error getting bytes sent") + fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) + continue + } + a.stat.BytesSent = sent + case "bytes_recv": + received, ok := dp.ValueByKey("_value").(int64) + if !ok { + logging.Warn("Error getting bytes recv") + fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) + continue + } + a.stat.BytesRecv = received + case "requests": + requests, ok := dp.ValueByKey("_value").(int64) + if !ok { + logging.Warn("Error getting requests") + fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) + continue + } + a.stat.Requests = requests + } + } else { + logging.Warn("Error querying influxdb for rsyncd stat", result.Err()) + } + } + + return lastUpdated, nil +} + +func (a *RsyncdAggregator) Aggregate(entry RsyncdLogEntry) { + a.stat.BytesSent += entry.sent + a.stat.BytesRecv += entry.recv + a.stat.Requests++ +} + +func (a *RsyncdAggregator) Send(writer api.WriteAPI) { + t := time.Now() + + p := influxdb2.NewPoint("rsyncd", map[string]string{}, map[string]interface{}{ + "bytes_sent": a.stat.BytesSent, + "bytes_recv": a.stat.BytesRecv, + "requests": a.stat.Requests, + }, t) + writer.WritePoint(p) +} + +type RsyncdLogEntry struct { + time time.Time + sent int64 + recv int64 +} + +func TailRSYNCLogFile(logFile string, lastUpdated time.Time, channels []chan<- RsyncdLogEntry) { + // Find the offset of the line where the date is past lastUpdated + start := time.Now() + + f, err := os.Open(logFile) + if err != nil { + logging.Error(err) + return + } + + // Preforms a linear scan of the log file to find the offset to continue tailing from + offset := int64(0) + s := bufio.NewScanner(f) + for s.Scan() { + tm, err := parseRSYNCDate(s.Text()) + if err == nil && tm.After(lastUpdated) { + break + } + offset += int64(len(s.Text()) + 1) + } + logging.Info("Found rsyncd log offset in", time.Since(start)) + + // Tail the log file `tail -F` starting at the offset + seek := tail.SeekInfo{ + Offset: offset, + Whence: io.SeekStart, + } + tail, err := tail.TailFile(logFile, tail.Config{Follow: true, ReOpen: true, MustExist: true, Location: &seek}) + if err != nil { + logging.Error("Failed to start tailing `rsyncd.log`:", err) + return + } + + logging.Success("Tailing rsyncd log file") + + // Parse each line as we receive it + for line := range tail.Lines { + entry, err := parseRsyncdLine(line.Text) + + if err == nil { + for ch := range channels { + channels[ch] <- entry + } + } + } +} + +type ParseLineError struct{} + +func (e ParseLineError) Error() string { + return "Failed to parse line" +} + +func parseRSYNCDate(line string) (time.Time, error) { + // Split the line over whitespace + parts := strings.Split(line, " ") + + if len(parts) < 2 { + return time.Time{}, ParseLineError{} + } + + // The 1st part is the date + dt := parts[0] + // 2nd part is the time + tm := parts[1] + + // make the time.Time object + t, err := time.Parse("2006/01/02 15:04:05", dt+" "+tm) + if err != nil { + return time.Time{}, err + } + + return t, nil +} + +func parseRsyncdLine(line string) (entry RsyncdLogEntry, err error) { + // 2022/04/20 20:00:10 [pid] sent XXX bytes received XXX bytes total size XXX + + // Split the line over whitespace + parts := strings.Split(line, " ") + + // the line we want has 14 parts + if len(parts) != 14 { + return entry, ParseLineError{} + } + + // the 4th part is "sent" + if parts[3] != "sent" { + return entry, ParseLineError{} + } + + // The 1st part is the date + dt := parts[0] + // 2nd part is the time + tm := parts[1] + + // make the time.Time object + entry.time, err = time.Parse("2006/01/02 15:04:05", dt+" "+tm) + if err != nil { + return entry, err + } + + // part 5 is the number of bytes sent + entry.sent, err = strconv.ParseInt(parts[4], 10, 64) + if err != nil { + fmt.Println(err) + return entry, ParseLineError{} + } + + // part 9 is the number of bytes received + entry.recv, err = strconv.ParseInt(parts[8], 10, 64) + if err != nil { + fmt.Println(err) + return entry, ParseLineError{} + } + + return entry, nil +} diff --git a/api.go b/api.go deleted file mode 100644 index e9e7171..0000000 --- a/api.go +++ /dev/null @@ -1,9 +0,0 @@ -package main - -import "github.com/gorilla/mux" - -func HandleAPI(r *mux.Router) { - -} - -// api/stats/{distro} diff --git a/cache.go b/cache.go deleted file mode 100644 index 2fe8bf8..0000000 --- a/cache.go +++ /dev/null @@ -1,108 +0,0 @@ -package main - -import ( - "bytes" - "net/http" - "sync" - "time" - - "github.com/COSI-Lab/logging" -) - -type ProxyWriter struct { - header http.Header - buffer bytes.Buffer - status int -} - -func (p *ProxyWriter) Header() http.Header { - return p.header -} - -func (p *ProxyWriter) Write(bytes []byte) (int, error) { - return p.buffer.Write(bytes) -} - -func (p *ProxyWriter) WriteHeader(status int) { - p.status = status -} - -type CacheEntry struct { - header http.Header - body []byte - status int - time time.Time -} - -func (c *CacheEntry) WriteTo(w http.ResponseWriter) (int, error) { - header := w.Header() - - for k, v := range c.header { - header[k] = v - } - - if c.status != 0 { - w.WriteHeader(c.status) - } - - return w.Write(c.body) -} - -// Caches the responses from the webserver -var cache = map[string]*CacheEntry{} -var cacheLock = &sync.RWMutex{} - -func cachingMiddleware(next func(w http.ResponseWriter, r *http.Request)) http.Handler { - if !webServerCache { - logging.Info("Caching disabled") - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logging.Info(r.Method, r.URL.Path) - next(w, r) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - start := time.Now() - - // Check if the request is cached - cacheLock.RLock() - if entry, ok := cache[r.RequestURI]; ok && r.Method == "GET" { - // Check that the cache entry is still valid - if time.Since(entry.time) < time.Hour { - // Send the cached response - entry.WriteTo(w) - cacheLock.RUnlock() - logging.Info(r.Method, r.RequestURI, "in", time.Since(start), "; cached") - return - } - } - cacheLock.RUnlock() - - // Create a new response writer - proxyWriter := &ProxyWriter{ - header: make(http.Header), - } - - // Call the next handler - next(proxyWriter, r) - - // Create the response cache entry - entry := &CacheEntry{ - header: proxyWriter.header, - body: proxyWriter.buffer.Bytes(), - status: proxyWriter.status, - time: time.Now(), - } - - // Cache the response - go func() { - cacheLock.Lock() - cache[r.RequestURI] = entry - cacheLock.Unlock() - }() - - // Send the response to client - entry.WriteTo(w) - logging.Info(r.Method, r.RequestURI, "in", time.Since(start)) - }) -} diff --git a/config.go b/config.go deleted file mode 100644 index a7e1a32..0000000 --- a/config.go +++ /dev/null @@ -1,322 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "os" - "regexp" - "sort" - "strings" - "text/template" - - "github.com/COSI-Lab/logging" - "github.com/xeipuuv/gojsonschema" -) - -type ConfigFile struct { - Schema string `json:"$schema"` - Mirrors map[string]*Project `json:"mirrors"` - Torrents []*Torrent `json:"torrents"` -} - -type Torrent struct { - Url string `json:"url"` - Delay int `json:"delay"` - Depth int `json:"depth"` -} - -// Returns a slice of all projects sorted by id -func (config *ConfigFile) GetProjects() []Project { - var projects []Project - - for _, project := range config.Mirrors { - projects = append(projects, *project) - } - - sort.Slice(projects, func(i, j int) bool { - return projects[i].Id < projects[j].Id - }) - - return projects -} - -type ProjectsGrouped struct { - Distributions []Project - Software []Project - Miscellaneous []Project -} - -// Returns 3 slices of projects grouped by Page and sorted by Human name -func (config *ConfigFile) GetProjectsByPage() ProjectsGrouped { - // "Distributions", "Software", "Miscellaneous" - var distributions, software, misc []Project - - for _, project := range config.GetProjects() { - switch project.Page { - case "Distributions": - distributions = append(distributions, project) - case "Software": - software = append(software, project) - case "Miscellaneous": - misc = append(misc, project) - } - } - - sort.Slice(distributions, func(i, j int) bool { - return strings.ToLower(distributions[i].Name) < strings.ToLower(distributions[j].Name) - }) - - sort.Slice(software, func(i, j int) bool { - return strings.ToLower(software[i].Name) < strings.ToLower(software[j].Name) - }) - - sort.Slice(misc, func(i, j int) bool { - return strings.ToLower(misc[i].Name) < strings.ToLower(misc[j].Name) - }) - - return ProjectsGrouped{ - Distributions: distributions, - Software: software, - Miscellaneous: misc, - } -} - -type Project struct { - Name string `json:"name"` - Short string // Copied from key - Id byte // Id is given out in alphabetical order of short (only 255 are supported) - SyncStyle string // "script" "rsync" or "static" - Script struct { - // Map of envirment variables to be set before calling the command - Env map[string]string `json:"env"` - Command string `json:"command"` - Arguments []string `json:"arguments"` - SyncsPerDay int `json:"syncs_per_day"` - } - Rsync struct { - Options string `json:"options"` // cmdline options for first stage - Second string `json:"second"` // cmdline options for second stage - Third string `json:"third"` // cmdline options for third stage - User string `json:"user"` - Host string `json:"host"` - Src string `json:"src"` - Dest string `json:"dest"` - SyncFile string `json:"sync_file"` - SyncsPerDay int `json:"syncs_per_day"` - PasswordFile string `json:"password_file"` - Password string // Loaded from password file - } `json:"rsync"` - Static struct { - Location string `json:"location"` - Source string `json:"source"` - Description string `json:"description"` - } `json:"static"` - Color string `json:"color"` - Official bool `json:"official"` - Page string `json:"page"` - HomePage string `json:"homepage"` - PublicRsync bool `json:"publicRsync"` - Icon string `json:"icon"` - Alternative string `json:"alternative"` - AccessToken string // Loaded from the access tokens file - Torrents string `json:"torrents"` -} - -func ParseConfig(configFile, schemaFile, tokensFile string) (config ConfigFile) { - // Parse the schema file - schemaBytes, err := ioutil.ReadFile(schemaFile) - if err != nil { - log.Fatal("Could not read schema file: ", configFile, err.Error()) - } - schemaLoader := gojsonschema.NewBytesLoader(schemaBytes) - - // Parse the config file - configBytes, err := ioutil.ReadFile(configFile) - if err != nil { - log.Fatal("Could not read config file: ", configFile, err.Error()) - } - documentLoader := gojsonschema.NewBytesLoader(configBytes) - - // Validate the config against the schema - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - log.Fatal("Config file did not match the schema: ", err.Error()) - } - - // Report errors - if !result.Valid() { - fmt.Printf("The document is not valid. see errors :\n") - for _, desc := range result.Errors() { - fmt.Printf("- %s\n", desc) - } - os.Exit(1) - } - - // Finally parse the config - err = json.Unmarshal(configBytes, &config) - if err != nil { - log.Fatal("Could not parse the config file even though it fits the schema file: ", err.Error()) - } - - // Parse passwords & copy key as short & determine style - var i uint8 = 0 - for short, project := range config.Mirrors { - if project.Rsync.Dest != "" { - project.SyncStyle = "rsync" - } else if project.Static.Location != "" { - project.SyncStyle = "static" - } else { - project.SyncStyle = "script" - } - - if project.Rsync.PasswordFile != "" { - project.Rsync.Password = getPassword("configs/" + project.Rsync.PasswordFile) - } - project.Short = short - project.Id = i - - // add 1 and check for overflow - if i == 255 { - log.Fatal("Too many projects, 255 is the maximum because of the live map") - } - i++ - } - - // Parse access tokens - if tokensFile != "" { - // Read line by line - file, err := os.Open(tokensFile) - if err != nil { - log.Fatal("Could not open access tokens file: ", tokensFile, err.Error()) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - - // The format is "project:token" and we ignore lines that don't match - reg := regexp.MustCompile(`^([^:]+):([^:]+)$`) - if reg.MatchString(line) { - // Get the project name and the token - projectName := reg.FindStringSubmatch(line)[1] - token := reg.FindStringSubmatch(line)[2] - - // Add the token to the project - config.Mirrors[projectName].AccessToken = token - } - } - } - - return config -} - -func getPassword(filename string) string { - bytes, err := os.ReadFile(filename) - - if err != nil { - log.Fatal("Could not read password file: ", filename, " ", err.Error()) - } - - // trim whitespace from the end - password := strings.TrimSpace(string(bytes)) - - return string(password) -} - -func createRsyncdConfig(config *ConfigFile) { - tmpl := `# This is a generated file. Do not edit manually. -uid = nobody -gid = nogroup -use chroot = yes -max connections = 0 -pid file = /var/run/rsyncd.pid -motd file = /etc/rsyncd.motd -log file = /var/log/rsyncd.log -log format = %t %o %a %m %f %b -dont compress = *.gz *.tgz *.zip *.z *.Z *.rpm *.deb *.bz2 *.tbz2 *.xz *.txz *.rar -refuse options = checksum delete -{{ range . }} -[{{ .Short }}] - comment = {{ .Name }} - path = /storage/{{ .Short }} - exclude = lost+found/ - read only = true - ignore nonreadable = yes{{ end }} -` - - var filteredProjects []*Project - for _, project := range config.Mirrors { - if project.PublicRsync { - filteredProjects = append(filteredProjects, project) - } - } - - t := template.Must(template.New("rsyncd.conf").Parse(tmpl)) - var buf bytes.Buffer - err := t.Execute(&buf, filteredProjects) - - if err != nil { - log.Fatal("Could not create rsyncd.conf: ", err.Error()) - } - - // save to rsyncd.conf - err = os.WriteFile("/etc/rsyncd.conf", buf.Bytes(), 0644) - if err != nil { - logging.Error("Could not write rsyncd.conf: ", err.Error()) - } -} - -// In case of emergencies this can generate a nginx config file to redirect to alternative mirrors -func createNginxRedirects(config *ConfigFile) { - tmpl := `# This is a generated file. Do not edit manually. -server { - listen 80 default; - listen [::]:80 default; - server_name _; - - # SSL configuration - listen 443 ssl; - listen [::]:443 ssl; - ssl_certificate /etc/letsencrypt/live/mirror.clarkson.edu/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/mirror.clarkson.edu/privkey.pem; - - # Linuxmint redirection - rewrite ^/linuxmint/iso/images/(.*)$ /linuxmint-images/$1 permanent; - rewrite ^/linuxmint/packages/(.*)$ /linuxmint-packages/$1 permanent; - - # Redirect all projects to other mirrors{{ range . }} - rewrite ^/{{ .Short }}/(.*)$ {{ .Alternative }}$1 redirect;{{ end }} - - # Other wise redirect 404 to 503.html - error_page 404 403 500 503 /503.html; - location = /503.html { - root /var/www; - } -} -` - var filteredProjects []*Project - for _, project := range config.Mirrors { - if project.Alternative != "" { - filteredProjects = append(filteredProjects, project) - } - } - - t := template.Must(template.New("nginx.conf").Parse(tmpl)) - var buf bytes.Buffer - err := t.Execute(&buf, filteredProjects) - - if err != nil { - log.Fatal("Could not create nginx.conf: ", err.Error()) - } - - // save to /tmp/nginx.conf - err = os.WriteFile("/tmp/nginx.conf", buf.Bytes(), 0644) - if err != nil { - logging.Error("Could not write nginx.conf: ", err.Error()) - } -} diff --git a/config/README.md b/config/README.md new file mode 100644 index 0000000..a1695db --- /dev/null +++ b/config/README.md @@ -0,0 +1,3 @@ +# Config + +This module handles parsing, validating, and processing all of mirror's configuration files. \ No newline at end of file diff --git a/config/configFile.go b/config/configFile.go new file mode 100644 index 0000000..376a60e --- /dev/null +++ b/config/configFile.go @@ -0,0 +1,247 @@ +package config + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "sort" + "strings" + "text/template" + + "github.com/xeipuuv/gojsonschema" +) + +// File is the struct that represents the main mirror.json config file +type File struct { + Schema string `json:"$schema"` + // Torrents is a list of upstreams to scrape .torrent files from + Torrents []*ScrapeTarget `json:"torrents"` + // Subnets defines a map of subnets we track usage from (e.g. "clarkson" -> ['128.153.0.0/16']) + Subnets map[string][]string `json:"subnets"` + // Projects is a map short names to project definitions + Projects map[string]*Project `json:"mirrors"` +} + +// ParseConfig reads the main mirrors.json file and checks that it matches the schema +func ReadProjectConfig(cfg, schema io.Reader) (config *File, err error) { + // read cfg and schema into byte arrays + cfgBytes, err := io.ReadAll(cfg) + if err != nil { + return nil, err + } + + schemaBytes, err := io.ReadAll(schema) + if err != nil { + return nil, err + } + + schemaLoader := gojsonschema.NewBytesLoader(schemaBytes) + documentLoader := gojsonschema.NewBytesLoader(cfgBytes) + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + log.Fatal("Config file did not match the schema: ", err.Error()) + } + + // Report errors + if !result.Valid() { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + os.Exit(1) + } + + // Finally parse the config + err = json.Unmarshal(cfgBytes, &config) + if err != nil { + return nil, err + } + + // Post processing for the config + var i uint8 = 0 + for short, project := range config.Projects { + // Self reference the short name + project.Short = short + + // Determine the sync style + if project.Rsync != nil { + project.SyncStyle = "rsync" + } else if project.Static != nil { + project.SyncStyle = "static" + } else { + project.SyncStyle = "script" + } + + // Set the project id for the live map + if i == 255 { + return nil, fmt.Errorf("too many projects, max is 255") + } + project.ID = i + i++ + } + + return config, nil +} + +// GetProjects returns a slice of projects sorted by short name +func (config *File) GetProjects() []Project { + var projects []Project + + for _, project := range config.Projects { + projects = append(projects, *project) + } + + sort.Slice(projects, func(i, j int) bool { + return strings.ToLower(projects[i].Short) < strings.ToLower(projects[j].Short) + }) + + return projects +} + +// CreateRsyncdConfig writes a rsyncd.conf file to the given writer based on the config +// +// Consider passing a bufio.Write to this function +func (config *File) CreateRsyncdConfig(w io.Writer) error { + tmpl := `# This is a generated file. Do not edit manually. + uid = nobody + gid = nogroup + use chroot = yes + max connections = 0 + pid file = /var/run/rsyncd.pid + motd file = /etc/rsyncd.motd + log file = /var/log/rsyncd.log + log format = %t %o %a %m %f %b + dont compress = *.gz *.tgz *.zip *.z *.Z *.rpm *.deb *.bz2 *.tbz2 *.xz *.txz *.rar + refuse options = checksum delete + {{ range . }} + [{{ .Short }}] + comment = {{ .Name }} + path = /storage/{{ .Short }} + exclude = lost+found/ + read only = true + ignore nonreadable = yes{{ end }} + ` + + var filteredProjects []*Project + for _, project := range config.Projects { + if project.PublicRsync { + filteredProjects = append(filteredProjects, project) + } + } + + t := template.Must(template.New("rsyncd.conf").Parse(tmpl)) + err := t.Execute(w, filteredProjects) + if err != nil { + return err + } + + return nil +} + +// Validate checks the config file for a few properties +// +// - All projects have a unique long name, case insensitive +// - All projects have a unique short name, case insensitive +// - The sync style flag is set correctly +func (config *File) Validate() error { + // Check that all projects have a unique long name + longNames := make(map[string]bool) + for _, project := range config.Projects { + if _, ok := longNames[strings.ToLower(project.Name)]; ok { + return fmt.Errorf("duplicate long name: %s", project.Name) + } + longNames[project.Name] = true + } + + // Check that all projects have a unique short name, case insensitive + shortNames := make(map[string]bool) + for _, project := range config.Projects { + if _, ok := shortNames[strings.ToLower(project.Short)]; ok { + return fmt.Errorf("duplicate short name: %s", project.Short) + } + shortNames[strings.ToLower(project.Short)] = true + } + + // Check that the sync style flag is set correctly + for _, project := range config.Projects { + switch project.SyncStyle { + case "rsync": + if project.Rsync == nil { + return fmt.Errorf("sync style is 'rsync' but rsync config is nil for project %s", project.Short) + } + + if project.Static != nil { + return fmt.Errorf("sync style is 'rsync' but static config is not nil for project %s", project.Short) + } + + if project.Script != nil { + return fmt.Errorf("sync style is 'rsync' but script config is not nil for project %s", project.Short) + } + case "static": + if project.Rsync != nil { + return fmt.Errorf("sync style is 'static' but rsync config is not nil for project %s", project.Short) + } + + if project.Static == nil { + return fmt.Errorf("sync style is 'static' but static config is nil for project %s", project.Short) + } + + if project.Script != nil { + return fmt.Errorf("sync style is 'static' but script config is not nil for project %s", project.Short) + } + case "script": + if project.Rsync != nil { + return fmt.Errorf("sync style is 'script' but rsync config is not nil for project %s", project.Short) + } + + if project.Static != nil { + return fmt.Errorf("sync style is 'script' but static config is not nil for project %s", project.Short) + } + + if project.Script == nil { + return fmt.Errorf("sync style is 'script' but script config is nil for project %s", project.Short) + } + default: + return fmt.Errorf("unknown sync style '%s' for project %s", project.SyncStyle, project.Short) + } + } + + return nil +} + +// ProjectsGrouped is a simple 3-tuple of slices of projects +type ProjectsGrouped struct { + // Distributions are projects with "Distributions" as their page + Distributions []Project + // Software are projects with "Software" as their page + Software []Project + // Miscellaneous are projects with "Miscellaneous" as their page + Miscellaneous []Project +} + +// GetProjectsByPage returns a ProjectsGrouped struct with the projects grouped by page. +// Within each group the projects are sorted by short +func (config *File) GetProjectsByPage() ProjectsGrouped { + // "Distributions", "Software", "Miscellaneous" + var distributions, software, misc []Project + + for _, project := range config.GetProjects() { + switch project.Page { + case "Distributions": + distributions = append(distributions, project) + case "Software": + software = append(software, project) + case "Miscellaneous": + misc = append(misc, project) + } + } + + return ProjectsGrouped{ + Distributions: distributions, + Software: software, + Miscellaneous: misc, + } +} diff --git a/config/configFile_test.go b/config/configFile_test.go new file mode 100644 index 0000000..85ac6a7 --- /dev/null +++ b/config/configFile_test.go @@ -0,0 +1,43 @@ +package config + +import ( + "os" + "testing" +) + +// Verify that ../configs/mirrors.json is valid +func TestMirrorJSON(t *testing.T) { + // Open the config file + file, err := os.Open("../configs/mirrors.json") + if err != nil { + t.Error("Could not open mirrors.json", err.Error()) + } + + // Open the schema file + schema, err := os.Open("../configs/mirrors.schema.json") + if err != nil { + t.Error("Could not open mirrors.schema.json", err.Error()) + } + + // Parse the config + config, err := ReadProjectConfig(file, schema) + if err != nil { + t.Error("Could not parse mirrors.json", err.Error()) + } + + // Verify that something was parsed + if config == nil { + t.Error("Config was nil") + return + } + if len(config.Projects) == 0 { + t.Error("Config had no projects") + } + + // Verify that the config is valid + err = config.Validate() + if err != nil { + t.Error("Config did not validate", err.Error()) + } + +} diff --git a/config/project.go b/config/project.go new file mode 100644 index 0000000..af89463 --- /dev/null +++ b/config/project.go @@ -0,0 +1,53 @@ +package config + +// Project is the struct that represents a single project in the mirror.json config file +// These make up the bulk of the config file +type Project struct { + // short is the key in the map, e.g. "debian" + Short string + + Name string `json:"name"` + Color string `json:"color"` + Official bool `json:"official"` + Page string `json:"page"` + HomePage string `json:"homepage"` + PublicRsync bool `json:"publicRsync"` + Icon string `json:"icon"` + Alternative string `json:"alternative"` + Torrents string `json:"torrents"` + + // SyncStyle isn't found in the file, instead it's inferred from the presence of "script", "rsync", or "static" keys + SyncStyle string + Script *Script `json:"script"` + Rsync *Rsync `json:"rsync"` + Static *Static `json:"static"` + + // ID is a unique identifier for the project + ID uint8 +} + +// Rsync is the struct that represents a project that is synced with rsync +type Rsync struct { + Stages []string `json:"stages"` + User string `json:"user"` + Host string `json:"host"` + Src string `json:"src"` + Dest string `json:"dest"` + SyncsPerDay uint `json:"syncs_per_day"` + PasswordFile string `json:"password_file"` +} + +// Script is the struct that represents a project that is synced with a script +type Script struct { + Env map[string]string `json:"env"` + Command string `json:"command"` + Arguments []string `json:"arguments"` + SyncsPerDay uint `json:"syncs_per_day"` +} + +// Static is the struct that represents a project that is never synced +type Static struct { + Location string `json:"location"` + Source string `json:"source"` + Description string `json:"description"` +} diff --git a/config/tokens.go b/config/tokens.go new file mode 100644 index 0000000..f6e1f08 --- /dev/null +++ b/config/tokens.go @@ -0,0 +1,54 @@ +package config + +import ( + "io" + + "github.com/pelletier/go-toml/v2" +) + +// Tokens is what we unmarshal the tokens.toml file into +type Tokens struct { + Tokens []Token `toml:"tokens"` +} + +func ReadTokens(r io.Reader) (tokens *Tokens, err error) { + err = toml.NewDecoder(r).Decode(&tokens) + if err != nil { + return nil, err + } + return tokens, nil +} + +// GetToken returns the token struct by token string +func (tokens *Tokens) GetToken(token string) *Token { + for _, t := range tokens.Tokens { + if t.Token == token { + return &t + } + } + return nil +} + +// Token is the struct that represents a single access token in the tokens.txt file +// +// The token is able to trigger manual syncs for particular projects, if the project list is empty then all projects are allowed +type Token struct { + Name string `toml:"name"` + Token string `toml:"token"` + Projects []string `toml:"projects"` +} + +func (token *Token) HasProject(project string) bool { + // Empty project list means all projects + if len(token.Projects) == 0 { + return true + } + + for _, p := range token.Projects { + if p == project { + return true + } + } + + return false +} diff --git a/config/tokens_test.go b/config/tokens_test.go new file mode 100644 index 0000000..c1534ec --- /dev/null +++ b/config/tokens_test.go @@ -0,0 +1,89 @@ +package config_test + +import ( + "strings" + "testing" + + "github.com/COSI-Lab/Mirror/config" +) + +// Test tokens.txt parsing +func TestTokens(t *testing.T) { + example := ` + [[tokens]] + name = "Example" + token = "1234" + projects = ["archlinux", "archlinux32"] + + [[tokens]] + name = "All" + token = "5678" + projects = [] + ` + + // Reader from example string + reader := strings.NewReader(example) + tokens, err := config.ReadTokens(reader) + if err != nil { + t.Errorf("Expected no error, got %s", err) + } + + // Check that we have 2 tokens + if len(tokens.Tokens) != 2 { + t.Errorf("Expected 2 tokens, got %d", len(tokens.Tokens)) + } + + // Check that the first token is correct + if tokens.Tokens[0].Name != "Example" { + t.Errorf("Expected token name Example, got %s", tokens.Tokens[0].Name) + } + + if tokens.Tokens[0].Token != "1234" { + t.Errorf("Expected token 1234, got %s", tokens.Tokens[0].Token) + } + + if len(tokens.Tokens[0].Projects) != 2 { + t.Errorf("Expected 2 projects, got %d", len(tokens.Tokens[0].Projects)) + } + + for _, project := range tokens.Tokens[0].Projects { + if project != "archlinux" && project != "archlinux32" { + t.Errorf("Expected project archlinux or archlinux32, got %s", project) + } + } + + // Check that the second token is correct + if tokens.Tokens[1].Name != "All" { + t.Errorf("Expected token name All, got %s", tokens.Tokens[1].Name) + } + + if tokens.Tokens[1].Token != "5678" { + t.Errorf("Expected token 5678, got %s", tokens.Tokens[1].Token) + } + + if len(tokens.Tokens[1].Projects) != 0 { + t.Errorf("Expected 0 projects, got %d", len(tokens.Tokens[1].Projects)) + } + + // Check that GetToken works + if tokens.GetToken("0000") != nil { + t.Errorf("Expected nil token, got %s", tokens.GetToken("0000").Token) + } + + // Check that HasProject works + if !tokens.GetToken("1234").HasProject("archlinux") { + t.Errorf("Expected token to have project archlinux") + } + + if !tokens.GetToken("1234").HasProject("archlinux32") { + t.Errorf("Expected token to have project archlinux32") + } + + if tokens.GetToken("1234").HasProject("blender") { + t.Errorf("Expected token to not have project blender") + } + + if !tokens.GetToken("5678").HasProject("archlinux") { + t.Errorf("Expected token to have project archlinux") + } +} diff --git a/config/torrent.go b/config/torrent.go new file mode 100644 index 0000000..e8f28ca --- /dev/null +++ b/config/torrent.go @@ -0,0 +1,8 @@ +package config + +// ScrapeTarget is the struct that represents a single upstream to scrape .torrent files from +type ScrapeTarget struct { + URL string `json:"url"` + Delay int `json:"delay"` + Depth int `json:"depth"` +} diff --git a/configs/.gitignore b/configs/.gitignore index 0846d12..73b8005 100644 --- a/configs/.gitignore +++ b/configs/.gitignore @@ -1,2 +1,3 @@ *.secret -tokens.txt \ No newline at end of file +tokens.txt +tokens.toml \ No newline at end of file diff --git a/configs/mirrors.json b/configs/mirrors.json index db15c45..139914f 100644 --- a/configs/mirrors.json +++ b/configs/mirrors.json @@ -1,5 +1,11 @@ { "$schema": "./mirrors.schema.json", + "subnets": { + "clarkson": [ + "128.153.0.0/16", + "2605:6480::/32" + ] + }, "torrents": [ { "url": "https://linuxmint.com/torrents/", @@ -64,7 +70,6 @@ "src": "alpine", "dest": "/storage/alpine", "options": "-avzrHy --no-perms --no-group --no-owner --delete --delete-delay --delay-updates --ignore-errors --exclude \".~tmp~\"", - "sync_file": "last-updated", "syncs_per_day": 4 }, "official": true, diff --git a/configs/mirrors.schema.json b/configs/mirrors.schema.json index 7ae2c33..dfd8c79 100644 --- a/configs/mirrors.schema.json +++ b/configs/mirrors.schema.json @@ -20,7 +20,9 @@ "env": { "description": "Map of envirment variables to be set before calling the command", "type": "object", - "additionalProperties": { "type": "string" } + "additionalProperties": { + "type": "string" + } }, "command": { "description": "Command to execute", @@ -29,7 +31,9 @@ "arguments": { "description": "Arguments to pass to the command", "type": "array", - "items": { "type": "string" } + "items": { + "type": "string" + } }, "syncs_per_day": { "description": "How many times a day to sync", @@ -37,7 +41,10 @@ "minimum": 1, "maximum": 24 }, - "required": ["command", "syncs_per_day"] + "required": [ + "command", + "syncs_per_day" + ] }, "rsync": { "description": "Instructions for how to run rsync", @@ -74,10 +81,6 @@ "description": "Location on disk to save to", "type": "string" }, - "sync_file": { - "description": "A file that tracks if the mirror is in sync with the upstream", - "type": "string" - }, "syncs_per_day": { "description": "How many times a day to sync", "type": "number", @@ -89,7 +92,13 @@ "type": "string" } }, - "required": ["options", "host", "src", "dest", "syncs_per_day"] + "required": [ + "options", + "host", + "src", + "dest", + "syncs_per_day" + ] }, "static": { "description": "Host a repository that never changes", @@ -108,7 +117,11 @@ "description": "Description of the project to be displayed on the website" } }, - "required": ["location", "source", "description"] + "required": [ + "location", + "source", + "description" + ] }, "official": { "description": "Are we an official mirror for this software", @@ -117,7 +130,11 @@ "page": { "description": "Which page should the project be displayed on the website", "type": "string", - "enum": ["Distributions", "Software", "Miscellaneous"] + "enum": [ + "Distributions", + "Software", + "Miscellaneous" + ] }, "homepage": { "description": "URL to the homepage for the mirrored project", @@ -156,13 +173,19 @@ ], "oneOf": [ { - "required": ["rsync"] + "required": [ + "rsync" + ] }, { - "required": ["static"] + "required": [ + "static" + ] }, { - "required": ["script"] + "required": [ + "script" + ] } ], "additionalProperties": false @@ -171,19 +194,31 @@ "torrents": { "type": "array", "description": "list of remote sources to pull torrents from using HTTP", - "items": { + "items": { "type": "object", "depth": { "type": "number", "default": 1 }, - "url": { "type": "string" }, + "url": { + "type": "string" + }, "delay": { "type": "number", "default": 1, "description": "Number of seconds to wait between each request. Locally 0 seconds is fine, globally 1 is normally safe." } } + }, + "subnets": { + "type": "object", + "description": "List of subnets to keep separate nginx metrics for", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } } } -} +} \ No newline at end of file diff --git a/configs/tokens-example.toml b/configs/tokens-example.toml new file mode 100644 index 0000000..16b8d62 --- /dev/null +++ b/configs/tokens-example.toml @@ -0,0 +1,17 @@ +# These are access tokens that can be used to trigger manual syncs of projects +# +# Tokens have 3 fields. A human name describing who/what they have been created for, +# the token itself, and a list of projects that the token can be used to sync. +# If the project list is empty the token can be used to sync all projects +# +# The format is toml, here is an example + +[[tokens]] +name = "chris" +token = "1234567890" +projects = [] + +[[tokens]] +name = "blender team" +token = "0987654321" +projects = ["blender"] diff --git a/daily_health.go b/daily_health.go index f4af184..44ffea8 100644 --- a/daily_health.go +++ b/daily_health.go @@ -7,31 +7,12 @@ import ( "sort" "time" - "github.com/COSI-Lab/logging" + "github.com/COSI-Lab/Mirror/logging" "github.com/influxdata/influxdb-client-go/v2/api" "github.com/wcharczuk/go-chart/v2" "github.com/wcharczuk/go-chart/v2/drawing" ) -// Every day creates ands sends a progress report to the discord channel regarding the health of the server -func HandleCheckIn() { - for { - // Target sending report at 7:00 AM local time - now := time.Now() - target := time.Date(now.Year(), now.Month(), now.Day(), 7, 0, 0, 0, time.Local) - if now.After(target) { - target = target.Add(24 * time.Hour) - } - - // Sleep until target time - time.Sleep(target.Sub(now)) - - // Post the daily progress report to the discord channel (dd-mm-yyyy) - todays_date := time.Now().Format("02-01-2006") - logging.InfoToDiscord(fmt.Sprintf("Daily progress report: https://mirror.clarkson.edu/stats/total/daily_sent?data=%s", todays_date)) - } -} - // You can paste this into the influxdb data explorer /* from(bucket: "public") diff --git a/datarithms/README.md b/datarithms/README.md new file mode 100644 index 0000000..c879053 --- /dev/null +++ b/datarithms/README.md @@ -0,0 +1,3 @@ +# datastructures + +This module contains any generic data structures that we find useful in this project. \ No newline at end of file diff --git a/datarithms/circular_queue.go b/datarithms/circular_queue.go new file mode 100644 index 0000000..5c65076 --- /dev/null +++ b/datarithms/circular_queue.go @@ -0,0 +1,108 @@ +package datarithms + +import ( + "fmt" + "sync" +) + +// CircularQueue is a thread-safe queue with a fixed capacity +type CircularQueue[T any] struct { + lock sync.RWMutex + queue []T + capacity int + start int + end int + length int +} + +// NewCircularQueue creates a new CircularQueue with the given capacity +func NewCircularQueue[T any](capacity int) *CircularQueue[T] { + return &CircularQueue[T]{ + lock: sync.RWMutex{}, + queue: make([]T, capacity), + capacity: capacity, + start: 0, + end: 0, + length: 0, + } +} + +// Push adds an element to the end of the queue +// If the queue is full, the oldest element is overwritten +func (q *CircularQueue[T]) Push(element T) { + q.lock.Lock() + q.queue[q.end] = element + q.end = (q.end + 1) % q.capacity + // If the queue is full, start overwriting from the beginning + if q.length == q.capacity { + q.start = (q.start + 1) % q.capacity + } else { + q.length++ + } + q.lock.Unlock() +} + +// Pop removes an element from the front of the queue +// If the queue is empty, returns error +func (q *CircularQueue[T]) Pop() (element T, err error) { + q.lock.Lock() + // If the queue is empty, return nil + if q.length == 0 { + q.lock.Unlock() + return element, fmt.Errorf("circularQueue is empty") + } + element = q.queue[q.start] + q.start = (q.start + 1) % q.capacity + q.length-- + q.lock.Unlock() + return element, nil +} + +// Front returns the element at the start of the queue +func (q *CircularQueue[T]) Front() T { + q.lock.RLock() + result := q.queue[q.start] + q.lock.RUnlock() + return result +} + +// Len returns the number of elements in the queue +func (q *CircularQueue[T]) Len() int { + q.lock.RLock() + result := q.length + q.lock.RUnlock() + return result +} + +// Capacity returns the capacity of the queue +func (q *CircularQueue[T]) Capacity() int { + q.lock.RLock() + result := q.capacity + q.lock.RUnlock() + return result +} + +// All builds a slice of all elements in the queue +func (q *CircularQueue[T]) All() []T { + q.lock.RLock() + result := make([]T, 0, q.length) + + // From start to end + for i := q.start; i != q.end; i = (i + 1) % q.capacity { + result = append(result, q.queue[i]) + } + + q.lock.RUnlock() + return result +} + +// Fold folds the queue into a single value given a function +func Fold[T any, R any](q *CircularQueue[T], f func(R, T) R, init R) R { + q.lock.RLock() + result := init + for i := q.start; i != q.end; i = (i + 1) % q.capacity { + result = f(result, q.queue[i]) + } + q.lock.RUnlock() + return result +} diff --git a/datarithms/circular_queue_test.go b/datarithms/circular_queue_test.go new file mode 100644 index 0000000..726b7db --- /dev/null +++ b/datarithms/circular_queue_test.go @@ -0,0 +1,129 @@ +package datarithms_test + +import ( + "testing" + + "github.com/COSI-Lab/Mirror/datarithms" +) + +func TestQueue(t *testing.T) { + // Create a new queue + q := datarithms.NewCircularQueue[int](5) + + if q.Capacity() != 5 { + t.Error("Capacity is not 5") + } + + // Push some elements + q.Push(1) + q.Push(2) + q.Push(3) + + // Check the length + if q.Len() != 3 { + t.Error("Expected 3, got", q.Len()) + } + + var element int + var err error + + // Pop the first element + if element, err = q.Pop(); err == nil && element != 1 { + t.Error("Expected 1, got", element) + } + + // Check the length + if q.Len() != 2 { + t.Error("Expected 2, got", q.Len()) + } + + // Pop the second element + if element, err = q.Pop(); err == nil && element != 2 { + t.Error("Expected 2, got", element) + } + + // Check the length + if q.Len() != 1 { + t.Error("Expected 1, got", q.Len()) + } + + // Pop the third element + if element, err = q.Pop(); err == nil && element != 3 { + t.Error("Expected 3, got", element) + } + + // Check the length + if q.Len() != 0 { + t.Error("Expected 0, got", q.Len()) + } + + // Pop the fourth element + if element, err = q.Pop(); err == nil && element == 0 { + t.Error("Expected nil, got", element) + } + + // Check the length + if q.Len() != 0 { + t.Error("Expected 0, got", q.Len()) + } + + // Push more elements than capacity + for i := 0; i < 10; i++ { + q.Push(i) + } + + // Check the length + if q.Len() != 5 { + t.Error("Expected 5, got", q.Len()) + } + + // Pop the first element + if element, err = q.Pop(); err != nil && element != 5 { + t.Error("Expected 5, got", element) + } + + // Check the length + if q.Len() != 4 { + t.Error("Expected 4, got", q.Len()) + } + + // Pop the second element + if element, err = q.Pop(); err != nil && element != 6 { + t.Error("Expected 6, got", element) + } + + // Check the length + if q.Len() != 3 { + t.Error("Expected 3, got", q.Len()) + } + + // Pop the third element + if element, err = q.Pop(); err != nil && element != 7 { + t.Error("Expected 7, got", element) + } + + // Check the length + if q.Len() != 2 { + t.Error("Expected 2, got", q.Len()) + } + + // Pop the fourth element + if element, err = q.Pop(); err != nil && element != 8 { + t.Error("Expected 8, got", element) + } + + // Check the length + if q.Len() != 1 { + t.Error("Expected 1, got", q.Len()) + } + + // Pop the fifth element + if element, err = q.Pop(); err != nil && element != 9 { + t.Error("Expected 9, got", element) + } + + // Check the length + if q.Len() != 0 { + t.Error("Expected 0, got", q.Len()) + } +} diff --git a/go.mod b/go.mod index 79f6cb6..9d496a8 100644 --- a/go.mod +++ b/go.mod @@ -1,74 +1,109 @@ -module github.com/COSI_Lab/Mirror +module github.com/COSI-Lab/Mirror -go 1.20 +go 1.21 require ( - github.com/COSI-Lab/datarithms v1.0.2 github.com/COSI-Lab/geoip v1.0.0 - github.com/COSI-Lab/logging v1.0.3 github.com/IncSW/geoip2 v0.1.2 github.com/gocolly/colly v1.2.0 + github.com/gofrs/flock v0.8.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/influxdata/influxdb-client-go/v2 v2.12.3 github.com/joho/godotenv v1.5.1 github.com/nxadm/tail v1.4.8 - github.com/wcharczuk/go-chart/v2 v2.1.0 + github.com/pelletier/go-toml/v2 v2.1.0 + github.com/wcharczuk/go-chart/v2 v2.1.1 github.com/xeipuuv/gojsonschema v1.2.0 ) require ( + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect + github.com/CloudyKit/jet/v6 v6.2.0 // indirect + github.com/Joker/jade v1.1.3 // indirect github.com/PuerkitoBio/goquery v1.8.1 // indirect + github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 // indirect + github.com/andybalholm/brotli v1.0.5 // indirect github.com/andybalholm/cascadia v1.3.2 // indirect github.com/antchfx/htmlquery v1.3.0 // indirect - github.com/antchfx/xmlquery v1.3.16 // indirect + github.com/antchfx/xmlquery v1.3.17 // indirect github.com/antchfx/xpath v1.2.4 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/bytedance/sonic v1.9.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect - github.com/deepmap/oapi-codegen v1.13.0 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/blend/go-sdk v1.20220411.3 // indirect + github.com/bytedance/sonic v1.10.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.0 // indirect + github.com/deepmap/oapi-codegen v1.14.0 // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/flosch/pongo2/v4 v4.0.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/gin-gonic/gin v1.9.1 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.14.1 // indirect + github.com/go-playground/validator/v10 v10.15.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/gorilla/css v1.0.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect + github.com/iris-contrib/schema v0.0.6 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kataras/blocks v0.0.7 // indirect + github.com/kataras/golog v0.1.9 // indirect + github.com/kataras/iris/v12 v12.2.5 // indirect + github.com/kataras/pio v0.0.12 // indirect + github.com/kataras/sitemap v0.0.6 // indirect + github.com/kataras/tunnel v0.0.4 // indirect github.com/kennygrant/sanitize v1.2.4 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/labstack/echo/v4 v4.10.2 // indirect + github.com/labstack/echo/v4 v4.11.1 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect + github.com/mailgun/raymond/v2 v2.0.48 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect + github.com/microcosm-cc/bluemonday v1.0.25 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect + github.com/schollz/closestmatch v2.1.0+incompatible // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/tdewolff/minify/v2 v2.12.9 // indirect + github.com/tdewolff/parse/v2 v2.6.8 // indirect github.com/temoto/robotstxt v1.1.2 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.11 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.10.0 // indirect - golang.org/x/image v0.8.0 // indirect - golang.org/x/net v0.11.0 // indirect - golang.org/x/sys v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + github.com/yosssi/ace v0.0.5 // indirect + golang.org/x/arch v0.5.0 // indirect + golang.org/x/crypto v0.13.0 // indirect + golang.org/x/image v0.12.0 // indirect + golang.org/x/net v0.15.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 4d1be46..94192f9 100644 --- a/go.sum +++ b/go.sum @@ -1,46 +1,64 @@ -github.com/COSI-Lab/datarithms v1.0.2 h1:lBCmJs6f1HBT8WpkPdVkz2jIIz+4SmhWapPmJDodMUw= -github.com/COSI-Lab/datarithms v1.0.2/go.mod h1:RbztXJr04lNIwJxbc6X5a4RUJTng9tuR0C0htlzhOrk= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/COSI-Lab/geoip v1.0.0 h1:wZEWHfw6mex+Vfk3BPKpVP1Ia1Y+ARhqwgs7QqckP9o= github.com/COSI-Lab/geoip v1.0.0/go.mod h1:/XlojAhPMNnxoBhTSk3o/dFAW+BsywOHJffEmLQOMjo= -github.com/COSI-Lab/logging v1.0.3 h1:D/ritIpI0o8WWgywF9/nQlJbGTu1lLQGAeophjPkLuU= -github.com/COSI-Lab/logging v1.0.3/go.mod h1:7JdoE7ECtg/nWA5FjTAL8f/amR6lvp4cYy+wmXbEYYw= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v6 v6.2.0 h1:EpcZ6SR9n28BUGtNJSvlBqf90IpjeFr36Tizxhn/oME= +github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= github.com/IncSW/geoip2 v0.1.2 h1:v7iAyDiNZjHES45P1JPM3SMvkw0VNeJtz0XSVxkRwOY= github.com/IncSW/geoip2 v0.1.2/go.mod h1:adcasR40vXiUBjtzdaTTKL/6wSf+fgO4M8Gve/XzPUk= -github.com/PuerkitoBio/goquery v1.8.0 h1:PJTF7AmFCFKk1N6V6jmKfrNH9tV5pNE6lZMkG0gta/U= -github.com/PuerkitoBio/goquery v1.8.0/go.mod h1:ypIiRMtY7COPGk+I/YbZLbxsxn9g5ejnI2HSMtkjZvI= +github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.1.3 h1:Qbeh12Vq6BxURXT1qZBRHsDxeURB8ztcL6f3EXSGeHk= +github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM= github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/andybalholm/cascadia v1.3.1 h1:nhxRkql1kdYCc8Snf7D5/D3spOX+dBgjA6u8x004T2c= +github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 h1:KkH3I3sJuOLP3TjA/dfr4NAY8bghDwnXiU7cTKxQqo0= +github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM= +github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA= github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= github.com/antchfx/htmlquery v1.3.0 h1:5I5yNFOVI+egyia5F2s/5Do2nFWxJz41Tr3DyfKD25E= github.com/antchfx/htmlquery v1.3.0/go.mod h1:zKPDVTMhfOmcwxheXUsx4rKJy8KEY/PU6eXr/2SebQ8= -github.com/antchfx/xmlquery v1.3.15 h1:aJConNMi1sMha5G8YJoAIF5P+H+qG1L73bSItWHo8Tw= -github.com/antchfx/xmlquery v1.3.15/go.mod h1:zMDv5tIGjOxY/JCNNinnle7V/EwthZ5IT8eeCGJKRWA= -github.com/antchfx/xmlquery v1.3.16 h1:OCevguHq93z9Y4vb9xpRmU4Cc9lMVoiMkMbBNZVDeBM= -github.com/antchfx/xmlquery v1.3.16/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7TunF7noreA= -github.com/antchfx/xpath v1.2.3 h1:CCZWOzv5bAqjVv0offZ2LVgVYFbeldKQVuLNbViZdes= +github.com/antchfx/xmlquery v1.3.17 h1:d0qWjPp/D+vtRw7ivCwT5ApH/3CkQU8JOeo3245PpTk= +github.com/antchfx/xmlquery v1.3.17/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7TunF7noreA= github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.2.4 h1:dW1HB/JxKvGtJ9WyVGJ0sIoEcqftV3SqIstujI+B9XY= github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/blend/go-sdk v1.20220411.3 h1:GFV4/FQX5UzXLPwWV03gP811pj7B8J2sbuq+GJQofXc= +github.com/blend/go-sdk v1.20220411.3/go.mod h1:7lnH8fTi6U4i1fArEXRyOIY2E1X4MALg09qsQqY1+ak= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.0 h1:qtNZduETEIWJVIyDl01BeNxur2rW9OwTQ/yBqFRkKEk= +github.com/bytedance/sonic v1.10.0/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deepmap/oapi-codegen v1.12.4 h1:pPmn6qI9MuOtCz82WY2Xaw46EQjgvxednXXrP7g5Q2s= -github.com/deepmap/oapi-codegen v1.12.4/go.mod h1:3lgHGMu6myQ2vqbbTXH2H1o4eXFTGnFiDaOaKKl5yas= -github.com/deepmap/oapi-codegen v1.13.0 h1:cnFHelhsRQbYvanCUAbRSn/ZpkUb1HPRlQcu8YqSORQ= -github.com/deepmap/oapi-codegen v1.13.0/go.mod h1:Amy7tbubKY9qkZOXqymI3Z6xSbndmu+atMJheLdyg44= +github.com/deepmap/oapi-codegen v1.14.0 h1:b51/kQwH69rjN5pu+8j/Q5fUGD/rUclLAcGLQWQwa3E= +github.com/deepmap/oapi-codegen v1.14.0/go.mod h1:QcEpzjVDwJEH3Fq6I7XYkI0M/JwvoL82ToYveaeVMAw= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw= +github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= @@ -50,61 +68,97 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= -github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.15.3 h1:S+sSpunYjNPDuXkWbK+x+bA7iXiW296KG4dL3X7xUZo= +github.com/go-playground/validator/v10 v10.15.3/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gocolly/colly v1.2.0 h1:qRz9YAn8FIH0qzgNUw+HT9UN7wm1oF9OBAilwEWpyrI= github.com/gocolly/colly v1.2.0/go.mod h1:Hof5T3ZswNVsOHYmba1u03W65HDWgpV5HifSuueE0EA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12 h1:uK3X/2mt4tbSGoHvbLBHUny7CKiuwUip3MArtukol4E= +github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/influxdata/influxdb-client-go/v2 v2.12.2 h1:uYABKdrEKlYm+++qfKdbgaHKBPmoWR5wpbmj6MBB/2g= -github.com/influxdata/influxdb-client-go/v2 v2.12.2/go.mod h1:YteV91FiQxRdccyJ2cHvj2f/5sq4y4Njqu1fQzsQCOU= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/influxdata/influxdb-client-go/v2 v2.12.3 h1:28nRlNMRIV4QbtIUvxhWqaxn0IpXeMSkY/uJa/O/vC4= github.com/influxdata/influxdb-client-go/v2 v2.12.3/go.mod h1:IrrLUbCjjfkmRuaCiGQg4m2GbkaeJDcuWoxiWdQEbA0= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/iris-contrib/httpexpect/v2 v2.15.1 h1:G2/TW0EZ5UhNNdljNDBBQDfdfumLlV6ljRqdTk3cAmc= +github.com/iris-contrib/httpexpect/v2 v2.15.1/go.mod h1:cUwf1Mm5CWs5ahZNHtDq82WuGOitAWBg/eMGevX9ilg= +github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw= +github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4= +github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I= +github.com/kataras/golog v0.1.9 h1:vLvSDpP7kihFGKFAvBSofYo7qZNULYSHOH2D7rPTKJk= +github.com/kataras/golog v0.1.9/go.mod h1:jlpk/bOaYCyqDqH18pgDHdaJab72yBE6i0O3s30hpWY= +github.com/kataras/iris/v12 v12.2.5 h1:R5UzUW4MIByBM6tKMG3UqJ7hL1JCEE+dkqQ8L72f6PU= +github.com/kataras/iris/v12 v12.2.5/go.mod h1:bf3oblPF8tQmRgyPCzPZr0mLazvEDFgImdaGZYuN4hw= +github.com/kataras/pio v0.0.12 h1:o52SfVYauS3J5X08fNjlGS5arXHjW/ItLkyLcKjoH6w= +github.com/kataras/pio v0.0.12/go.mod h1:ODK/8XBhhQ5WqrAhKy+9lTPS7sBf6O3KcLhc9klfRcY= +github.com/kataras/sitemap v0.0.6 h1:w71CRMMKYMJh6LR2wTgnk5hSgjVNB9KL60n5e2KHvLY= +github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4= +github.com/kataras/tunnel v0.0.4 h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA= +github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw= github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o= github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M= -github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/labstack/echo/v4 v4.11.1 h1:dEpLU2FLg4UVmvCGPuk/APjlH6GDpbEPti61srUUUs4= +github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ= github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw= +github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -112,33 +166,58 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg= +github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA= github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= +github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= +github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= +github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tdewolff/minify/v2 v2.12.9 h1:dvn5MtmuQ/DFMwqf5j8QhEVpPX6fi3WGImhv8RUB4zA= +github.com/tdewolff/minify/v2 v2.12.9/go.mod h1:qOqdlDfL+7v0/fyymB+OP497nIxJYSvX4MQWA8OoiXU= +github.com/tdewolff/parse/v2 v2.6.8 h1:mhNZXYCx//xG7Yq2e/kVLNZw4YfYmeHbhx+Zc0OvFMA= +github.com/tdewolff/parse/v2 v2.6.8/go.mod h1:XHDhaU6IBgsryfdnpzUXBlT6leW/l25yrFBTEb4eIyM= +github.com/tdewolff/test v1.0.9 h1:SswqJCmeN4B+9gEAi/5uqT0qpi1y2/2O47V/1hhGZT0= +github.com/tdewolff/test v1.0.9/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= github.com/temoto/robotstxt v1.1.2 h1:W2pOjSJ6SWvldyEuiFXNxz3xZ8aiWX5LbfDiOFd7Fxg= github.com/temoto/robotstxt v1.1.2/go.mod h1:+1AmkuG3IYkh1kv0d2qEB9Le88ehNO0zwOr3ujewlOo= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= @@ -150,8 +229,12 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/wcharczuk/go-chart/v2 v2.1.0 h1:tY2slqVQ6bN+yHSnDYwZebLQFkphK4WNrVwnt7CJZ2I= -github.com/wcharczuk/go-chart/v2 v2.1.0/go.mod h1:yx7MvAVNcP/kN9lKXM/NTce4au4DFN99j6i1OwDclNA= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/wcharczuk/go-chart/v2 v2.1.1 h1:2u7na789qiD5WzccZsFz4MJWOJP72G+2kUuJoSNqWnE= +github.com/wcharczuk/go-chart/v2 v2.1.1/go.mod h1:CyCAUt2oqvfhCl6Q5ZvAZwItgpQKZOkCJGb+VGv6l14= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -159,91 +242,117 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA= +github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0= +github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= +golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.5.0 h1:5JMiNunQeQw++mMOz48/ISeNu3Iweh/JaZU8ZLqHRrI= -golang.org/x/image v0.5.0/go.mod h1:FVC7BI/5Ym8R25iw5OLsgshdUBbT1h5jZTpA+mvAdZ4= -golang.org/x/image v0.8.0 h1:agUcRXV/+w6L9ryntYYsF2x9fQTMd4T8fiiYXAVW6Jg= -golang.org/x/image v0.8.0/go.mod h1:PwLxp3opCYg4WR2WO9P0L6ESnsD6bLTWcw8zanLMVFM= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8= +golang.org/x/image v0.12.0 h1:w13vZbU4o5rKOFFR8y7M+c4A5jXDC0uXTdHYRP8X2DQ= +golang.org/x/image v0.12.0/go.mod h1:Lu90jvHG7GfemOIcldsh9A2hS01ocl6oNO7ype5mEnk= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= +moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/influx.go b/influx.go index 901dd1c..639453a 100644 --- a/influx.go +++ b/influx.go @@ -6,14 +6,11 @@ import ( "fmt" "sort" - "github.com/COSI-Lab/logging" + "github.com/COSI-Lab/Mirror/logging" influxdb2 "github.com/influxdata/influxdb-client-go/v2" "github.com/influxdata/influxdb-client-go/v2/api" - "github.com/influxdata/influxdb-client-go/v2/api/write" ) -type DataPoint *write.Point - var writer api.WriteAPI var reader api.QueryAPI diff --git a/logging/README.md b/logging/README.md new file mode 100644 index 0000000..724eeed --- /dev/null +++ b/logging/README.md @@ -0,0 +1,21 @@ +# logging + +This module provides thread-safe logging. + +![Screenshot](screenshot.png) + +## Usage + +```go +package main + +import ( + "github.com/COSI-Lab/Mirror/logging" +) + +func main() { + logging.Info("Hello, world!") + logging.Warn("Warning world didn't say hello back!") + logging.Error("Error world is broken!") +} +``` diff --git a/logging/logging.go b/logging/logging.go new file mode 100644 index 0000000..b78eb5b --- /dev/null +++ b/logging/logging.go @@ -0,0 +1,181 @@ +package logging + +import ( + "fmt" + "sync" + "time" +) + +type threadSafeLogger struct { + sync.Mutex +} + +var logger = threadSafeLogger{} + +type messageType int + +const ( + // InfoT is used for logging informational [INFO] messages + InfoT messageType = iota + // WarnT is used for logging warning [WARN] messages + WarnT + // ErrorT is used for logging error [ERROR] messages + ErrorT + // PanicT is used for logging panic [PANIC] messages + PanicT + // SuccessT is used for logging success [SUCCESS] messages + SuccessT +) + +const tm = "2006/01/02 15:04:05" + +func (mt messageType) String() string { + switch mt { + case InfoT: + return "\033[1m[INFO] \033[0m| " + case WarnT: + return "\033[1m\033[33m[WARN] \033[0m| " + case ErrorT: + return "\033[1m\033[31m[ERROR] \033[0m| " + case PanicT: + return "\033[1m\033[34m[PANIC] \033[0m| " + case SuccessT: + return "\033[1m\033[32m[SUCCESS] \033[0m| " + default: + return "" + } +} + +// LogEntryT enables programmatic creation of log entries +type LogEntryT struct { + typ messageType + time time.Time + message string +} + +// NewLogEntry creates a new LogEntryT with the current time +func NewLogEntry(mt messageType, message string) LogEntryT { + return LogEntryT{ + typ: mt, + time: time.Now(), + message: message, + } +} + +// InfoLogEntry creates a new LogEntryT with the current time and [INFO] tag +func InfoLogEntry(message string) LogEntryT { + return NewLogEntry(InfoT, message) +} + +// WarnLogEntry creates a new LogEntryT with the current time and [WARN] tag +func WarnLogEntry(message string) LogEntryT { + return NewLogEntry(WarnT, message) +} + +// ErrorLogEntry creates a new LogEntryT with the current time and [ERROR] tag +func ErrorLogEntry(message string) LogEntryT { + return NewLogEntry(ErrorT, message) +} + +// PanicLogEntry creates a new LogEntryT with the current time and [PANIC] tag +func PanicLogEntry(message string) LogEntryT { + return NewLogEntry(PanicT, message) +} + +// SuccessLogEntry creates a new LogEntryT with the current time and [SUCCESS] tag +func SuccessLogEntry(message string) LogEntryT { + return NewLogEntry(SuccessT, message) +} + +func (le LogEntryT) String() string { + if le.message[len(le.message)-1] != '\n' { + return fmt.Sprintf("%s %s %s\n", time.Now().Format(tm), le.typ.String(), le.message) + } + + return fmt.Sprintf("%s %s %s", time.Now().Format(tm), le.typ.String(), le.message) +} + +func logf(mt messageType, format string, v ...interface{}) { + logger.Lock() + if format[len(format)-1] != '\n' { + fmt.Printf("%s %s %s\n", time.Now().Format(tm), mt.String(), fmt.Sprintf(format, v...)) + } else { + fmt.Printf("%s %s %s", time.Now().Format(tm), mt.String(), fmt.Sprintf(format, v...)) + } + logger.Unlock() +} + +func logln(mt messageType, v ...interface{}) { + logger.Lock() + fmt.Printf("%s %s %s\n", time.Now().Format(tm), mt.String(), fmt.Sprint(v...)) + logger.Unlock() +} + +// Infof formats a message and logs it with [INFO] tag, it adds a newline if the message didn't end with one +func Infof(format string, v ...interface{}) { + logf(InfoT, format, v...) +} + +// Info logs a message with [INFO] tag and a newline +func Info(v ...interface{}) { + logln(InfoT, v...) +} + +// WarnF formats a message and logs it with [WARN] tag, it adds a newline if the message didn't end with one +func WarnF(format string, v ...interface{}) { + logf(WarnT, format, v...) +} + +// Warning logs a message with [WARN] tag and a newline +func Warn(v ...interface{}) { + logln(WarnT, v...) +} + +// Errorf formats a message and logs it with [ERROR] tag, it adds a newline if the message didn't end with one +func Errorf(format string, v ...interface{}) { + logf(ErrorT, format, v...) +} + +// Error logs a message with [ERROR] tag and a newline +func Error(v ...interface{}) { + logln(ErrorT, v...) +} + +// Panicf formats a message and logs it with [PANIC] tag, it adds a newline if the message didn't end with one +// Note: this function does not call panic() or otherwise stops the program +func Panicf(format string, v ...interface{}) { + logf(PanicT, format, v...) +} + +// Panic logs a message with [PANIC] tag and a newline +// Note: this function does not call panic() or otherwise stops the program +func Panic(v ...interface{}) { + logln(PanicT, v...) +} + +// Successf formats a message and logs it with [SUCCESS] tag, it adds a newline if the message didn't end with one +func Successf(format string, v ...interface{}) { + logf(SuccessT, format, v...) +} + +// Success logs a message with [SUCCESS] tag and a newline +func Success(v ...interface{}) { + logln(SuccessT, v...) +} + +// Logf formats a message and logs it with provided tag, it adds a newline if the message didn't end with one +func Logf(mt messageType, format string, v ...interface{}) { + logf(mt, format, v...) +} + +// Log logs a message with provided tag and a newline +func Log(mt messageType, v ...interface{}) { + logln(mt, v...) +} + +// LogEntry logs a LogEntryT +func LogEntry(le LogEntryT) { + logger.Lock() + fmt.Print(le.String()) + logger.Unlock() +} diff --git a/logging/screenshot.png b/logging/screenshot.png new file mode 100644 index 0000000..48ae04d Binary files /dev/null and b/logging/screenshot.png differ diff --git a/main.go b/main.go index 99bbbe1..6ad9819 100644 --- a/main.go +++ b/main.go @@ -1,17 +1,19 @@ package main import ( + "context" + "errors" "fmt" + "net" "os" "os/signal" "runtime" - "runtime/debug" - "strconv" - "syscall" "time" + "github.com/COSI-Lab/Mirror/config" + "github.com/COSI-Lab/Mirror/logging" "github.com/COSI-Lab/geoip" - "github.com/COSI-Lab/logging" + "github.com/gofrs/flock" "github.com/joho/godotenv" ) @@ -19,8 +21,6 @@ var geoipHandler *geoip.GeoIPHandler // .env variables var ( - // ADM_GROUP - admGroup int = 0 // HOOK_URL and PING_URL and handled in the logging packages // MAXMIND_LICENSE_KEY maxmindLicenseKey string @@ -53,9 +53,6 @@ var ( ) func init() { - // Print it's process ID - logging.Info("PID:", os.Getpid()) - // Load the environment variables err := godotenv.Load() if err != nil { @@ -75,33 +72,9 @@ func init() { hookURL = os.Getenv("HOOK_URL") pingID = os.Getenv("PING_ID") pullToken = os.Getenv("PULL_TOKEN") - admGroupStr := os.Getenv("ADM_GROUP") torrentDir = os.Getenv("TORRENT_DIR") downloadDir = os.Getenv("DOWNLOAD_DIR") - if admGroupStr != "" { - admGroup, err = strconv.Atoi(admGroupStr) - - if err != nil { - logging.Warn("environment variable ADM_GROUP", err) - } else { - // Verify adm is in our list of groups - groups, err := os.Getgroups() - if err != nil { - logging.Warn("Could not get groups") - } - var foundAdmGroup bool - for _, group := range groups { - if group == admGroup { - foundAdmGroup = true - } - } - if !foundAdmGroup { - logging.Warn("ADM_GROUP is not in the list of usable groups") - } - } - } - // Check if the environment variables are set if maxmindLicenseKey == "" { logging.Warn("No MAXMIND_LICENSE_KEY environment variable found. GeoIP database will not be updated") @@ -161,27 +134,92 @@ func init() { } } -func loadConfig() *ConfigFile { - config := ParseConfig("configs/mirrors.json", "configs/mirrors.schema.json", "configs/tokens.txt") - return &config +func loadConfig() (*config.File, error) { + configFile, err := os.Open("configs/mirrors.json") + if err != nil { + return nil, errors.New("Could not open mirrors.json: " + err.Error()) + } + defer configFile.Close() + + schemaFile, err := os.Open("configs/mirrors.schema.json") + if err != nil { + return nil, errors.New("Could not open mirrors.schema.json: " + err.Error()) + } + defer schemaFile.Close() + + config, err := config.ReadProjectConfig(configFile, schemaFile) + if err != nil { + return nil, err + } + + return config, config.Validate() } -var restartCount int +func loadTokens() (*config.Tokens, error) { + tokensFile, err := os.Open("configs/tokens.toml") + if err != nil { + return nil, errors.New("Could not open tokens.toml: " + err.Error()) + } -func main() { - defer func() { - if r := recover(); r != nil { - restartCount++ - if restartCount > 3 { - logging.PanicWithAttachment(debug.Stack(), "Program panicked more than 3 times in an hour! Exiting.") - os.Exit(1) + tokens, err := config.ReadTokens(tokensFile) + if err != nil { + return nil, err + } + + return tokens, nil +} + +func startNGINX(config *config.File) (chan<- NGINXLogEntry, time.Time, error) { + nginxAg := NewNGINXProjectAggregator() + nginxAg.AddMeasurement("nginx", func(re NGINXLogEntry) bool { + return true + }) + + // Add subnet aggregators + for name, subnetStrings := range config.Subnets { + subnets := make([]*net.IPNet, 0) + for _, subnetString := range subnetStrings { + _, subnet, err := net.ParseCIDR(subnetString) + if err != nil { + logging.Warn("Failed to parse subnet", subnetString, "for", name) + continue } + subnets = append(subnets, subnet) + } - logging.PanicWithAttachment(debug.Stack(), "Program panicked and attempted to restart itself. Someone should ssh in and check it out.") - main() + if len(subnets) == 0 { + logging.Warn("No valid subnets for", name) + continue } - }() + nginxAg.AddMeasurement(name, func(re NGINXLogEntry) bool { + for _, subnet := range subnets { + if subnet.Contains(re.IP) { + return true + } + } + return false + }) + + logging.Info("Added subnet aggregator for", name) + } + + nginxMetrics := make(chan NGINXLogEntry) + nginxLastUpdated, err := StartAggregator[NGINXLogEntry](nginxAg, nginxMetrics, reader, writer) + + return nginxMetrics, nginxLastUpdated, err +} + +func startRSYNC() (chan<- RsyncdLogEntry, time.Time, error) { + rsyncAg := NewRSYNCProjectAggregator() + + rsyncMetrics := make(chan RsyncdLogEntry) + rsyncLastUpdated, err := StartAggregator[RsyncdLogEntry](rsyncAg, rsyncMetrics, reader, writer) + + return rsyncMetrics, rsyncLastUpdated, err +} + +func main() { // Enforce we are running linux or macos if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { fmt.Println("This program is only meant to be run on *nix systems") @@ -193,21 +231,42 @@ func main() { fmt.Println("This program should no longer be run as root") } - // Setup logging - logging.Setup(hookURL, pingID) + // Manage lock file to prevent multiple instances from running simultaneously + f := flock.New(os.TempDir() + "/mirror.lock") + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + go func() { + <-interrupt + if f.Locked() { + f.Unlock() + } + os.Exit(0) + }() - // Parse the config file - config := loadConfig() + locked, err := f.TryLock() + if err != nil { + logging.Error(f.Path(), " could not be locked: ", err) + os.Exit(1) + } + if !locked { + logging.Error(f.Path(), " is already locked") + os.Exit(1) + } - // Update the rsyncd.conf file based on the config file - createRsyncdConfig(config) - // createNginxRedirects(config) + // Parse the config file + cfg, err := loadConfig() + if err != nil { + logging.Error("Failed to load config file:", err) + os.Exit(1) + } - // We will always run the mirror map - map_entries := make(chan *NginxLogEntry, 100) + tokens, err := loadTokens() + if err != nil { + logging.Error("Failed to load tokens file:", err) + os.Exit(1) + } // GeoIP lookup - var err error if maxmindLicenseKey != "" { geoipHandler, err = geoip.NewGeoIPHandler(maxmindLicenseKey) if err != nil { @@ -215,115 +274,65 @@ func main() { } } - // Connect to the database - if influxToken == "" { - if nginxTail != "" { - // zero date - var zero time.Time - go TailNginxLogFile(nginxTail, zero, map_entries) - } else { - // if nginxTail is empty we attempt to read a local access log for testing - go ReadNginxLogFile("access.log", map_entries) - } - } else { - SetupInfluxClients(influxToken) - logging.Success("Connected to InfluxDB") + // Update rsyncd.conf file based on the config file + rsyncd_conf, err := os.OpenFile("/etc/rsyncd.conf", os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logging.Error("Could not open rsyncd.conf: ", err.Error()) + } + err = cfg.CreateRsyncdConfig(rsyncd_conf) + if err != nil { + logging.Error("Failed to create rsyncd.conf: ", err.Error()) + } - // Stats handling - nginxEntries := make(chan *NginxLogEntry, 100) - rsyncdEntries := make(chan *RsyncdLogEntry, 100) + nginxChannels := make([]chan<- NGINXLogEntry, 0) + nginxLastUpdated := time.Now() + rsyncChannels := make([]chan<- RsyncdLogEntry, 0) + rsyncLastUpdated := time.Now() - lastUpdated, err := InitStatistics(config.Mirrors) + if influxToken != "" { + // Setup reader and writer for influxdb + SetupInfluxClients(influxToken) + // Start the nginx aggregator + nginxMetrics, lastupdated, err := startNGINX(cfg) if err != nil { - logging.Error("Failed to initialize statistics. Not tracking statistics", err) + logging.Error("Failed to start nginx aggregator:", err) + nginxLastUpdated = time.Now() } else { - logging.Success("Initialized statistics") - go HandleStatistics(nginxEntries, rsyncdEntries) - - if nginxTail != "" { - go TailNginxLogFile(nginxTail, lastUpdated, nginxEntries, map_entries) - } else { - // if nginxTail is empty we attempt to read a local file for testing - go ReadNginxLogFile("access.log", nginxEntries, map_entries) - } + nginxChannels = append(nginxChannels, nginxMetrics) + nginxLastUpdated = lastupdated + } - if rsyncdTail != "" { - go TailRSyncdLogFile(rsyncdTail, lastUpdated, rsyncdEntries) - } else { - // if rsyncdTail is empty we attempt to read a local file for testing - go ReadRsyncdLogFile("rsyncd.log", rsyncdEntries) - } + // Start the rsync aggregator + rsyncMetrics, lastupdated, err := startRSYNC() + if err != nil { + logging.Error("Failed to start rsync aggregator:", err) + rsyncLastUpdated = time.Now() + } else { + rsyncChannels = append(rsyncChannels, rsyncMetrics) + rsyncLastUpdated = lastupdated } } - // Listen for sighup - sighup := make(chan os.Signal, 1) - signal.Notify(sighup, syscall.SIGHUP) - - var manual chan string - - if schedulerPaused { - go func() { - for { - <-sighup - logging.Info("Received SIGHUP") - - config = loadConfig() - logging.Info("Reloaded config") - - WebserverLoadConfig(config) - logging.Info("Reloaded projects page") - } - }() - } else { - // rsync scheduler - stop := make(chan struct{}) - manual = make(chan string) - rsyncStatus := make(RSYNCStatus) - go handleSyncs(config, rsyncStatus, manual, stop) - - go func() { - for { - <-sighup - logging.Info("Received SIGHUP") - - config = loadConfig() - logging.Info("Reloaded config") - - WebserverLoadConfig(config) - logging.Info("Reloaded projects page") - - // stop the rsync scheduler - stop <- struct{}{} - <-stop - - // restart the rsync scheduler - rsyncStatus := make(RSYNCStatus) - go handleSyncs(config, rsyncStatus, manual, stop) - } - }() - } + manual := make(chan string) + scheduler := NewScheduler(cfg, context.Background()) + go scheduler.Start(manual) // torrent scheduler - // TODO: handle reload if torrentDir != "" && downloadDir != "" { - go HandleTorrents(config, torrentDir, downloadDir) + go HandleTorrents(cfg, torrentDir, downloadDir) } - // Webserver - WebserverLoadConfig(config) - go HandleWebserver(manual, map_entries) + // WebServer + mapEntries := make(chan NGINXLogEntry) + nginxChannels = append(nginxChannels, mapEntries) - go HandleCheckIn() + WebServerLoadConfig(cfg, tokens) + go HandleWebServer(manual, mapEntries) - go checkOldLogs() + go TailNGINXLogFile(nginxTail, nginxLastUpdated, nginxChannels) + go TailRSYNCLogFile(rsyncdTail, rsyncLastUpdated, rsyncChannels) - for { - logging.Info(runtime.NumGoroutine(), "goroutines") - time.Sleep(time.Hour) - - // Reset the restart count - restartCount = 0 - } + // Wait forever + select {} } diff --git a/map.go b/map.go index 70d2b3e..27485d9 100644 --- a/map.go +++ b/map.go @@ -4,7 +4,7 @@ import ( "net" "net/http" - "github.com/COSI-Lab/logging" + "github.com/COSI-Lab/Mirror/logging" "github.com/gorilla/mux" "github.com/gorilla/websocket" ) @@ -13,7 +13,7 @@ var upgrader = websocket.Upgrader{} var h hub // Upgrade the connection to a websocket and start the client -func HandleWebsocket(w http.ResponseWriter, r *http.Request) { +func handleWebsocket(w http.ResponseWriter, r *http.Request) { // Upgrade the connection to a websocket conn, err := upgrader.Upgrade(w, r, nil) if err != nil { @@ -100,7 +100,7 @@ func (c *client) write() { } func MapRouter(r *mux.Router, broadcast chan []byte) { - r.HandleFunc("/ws", HandleWebsocket) + r.HandleFunc("/ws", handleWebsocket) r.HandleFunc("/health", handleHealth) // Create a new hub @@ -115,7 +115,7 @@ func MapRouter(r *mux.Router, broadcast chan []byte) { go h.run() } -func entriesToMessages(entries chan *NginxLogEntry, messages chan []byte) { +func entriesToMessages(entries <-chan NGINXLogEntry, messages chan<- []byte) { // Send groups of 8 messages ch := make(chan []byte) go func() { @@ -131,27 +131,23 @@ func entriesToMessages(entries chan *NginxLogEntry, messages chan []byte) { // Track the previous IP to avoid sending duplicate data prevIP := net.IPv4(0, 0, 0, 0) for { - // Read from the channel entry := <-entries - // If the lookup failed, skip this entry - if entry == nil || entry.City == nil { + // Skip the entry if it's an immediate duplicate + if prevIP.Equal(entry.IP) { continue } + prevIP = entry.IP - // Skip if the IP is the same as the previous one - if prevIP.Equal(entry.IP) { + if entry.City == nil { continue } - // Update the previous IP - prevIP = entry.IP - - // Get the distro - project, ok := projects[entry.Distro] - if !ok { + // Maps project names to project structs + if projects[entry.Project] != nil { continue } + id := projects[entry.Project].ID // Get the location lat_ := entry.City.Location.Latitude @@ -169,7 +165,7 @@ func entriesToMessages(entries chan *NginxLogEntry, messages chan []byte) { // Create a new message msg := make([]byte, 5) // First byte is the project ID - msg[0] = project.Id + msg[0] = id // Second and Third byte are the latitude msg[1] = byte(lat >> 8) msg[2] = byte(lat & 0xFF) diff --git a/nginx.go b/nginx.go deleted file mode 100644 index 63b30e6..0000000 --- a/nginx.go +++ /dev/null @@ -1,206 +0,0 @@ -package main - -import ( - "bufio" - "errors" - "io" - "net" - "os" - "regexp" - "strconv" - "strings" - "time" - - "github.com/COSI-Lab/datarithms" - "github.com/COSI-Lab/logging" - "github.com/IncSW/geoip2" - "github.com/nxadm/tail" -) - -// It is critical that NGINX uses the following log format: -/* - * log_format config '"$time_local" "$remote_addr" "$request" "$status" "$body_bytes_sent" "$request_length" "$http_user_agent"'; - * access_log /var/log/nginx/access.log config; - */ - -// NginxLogEntry is a struct that represents a parsed nginx log entry -type NginxLogEntry struct { - IP net.IP - City *geoip2.CityResult - Time time.Time - Method string - Distro string - Url string - Version string - Status int - BytesSent int64 - BytesRecv int64 - Agent string -} - -var reQuotes = regexp.MustCompile(`"(.*?)"`) - -// ReadNginxLogFile is a testing function that simulates tailing a log file by reading it line by line with some delay between lines -func ReadNginxLogFile(logFile string, channels ...chan *NginxLogEntry) (err error) { - for { - f, err := os.Open(logFile) - if err != nil { - return err - } - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - entry, err := parseNginxLine(scanner.Text()) - if err == nil { - // Send a pointer to the entry down each channel - for ch := range channels { - channels[ch] <- entry - } - } - - time.Sleep(100 * time.Millisecond) - } - - f.Close() - } -} - -// TailNginxLogFile tails a log file and sends the parsed log entries to the specified channels -func TailNginxLogFile(logFile string, lastUpdated time.Time, channels ...chan *NginxLogEntry) { - // Find the offset of the line where the date is past lastUpdated - start := time.Now() - offset, err := datarithms.BinarySearchFileByDate(logFile, lastUpdated, parseNginxDate) - if err != nil { - logging.Error(err) - return - } - logging.Info("Found nginx log offset in", time.Since(start)) - - // Tail the log file `tail -F` starting at the offset - seek := tail.SeekInfo{ - Offset: offset, - Whence: io.SeekStart, - } - tail, err := tail.TailFile(logFile, tail.Config{Follow: true, ReOpen: true, MustExist: true, Location: &seek}) - if err != nil { - logging.Error("Failed to start tailing `nginx.log`:", err) - return - } - - logging.Success("Tailing nginx log file") - - // Parse each line as we receive it - for line := range tail.Lines { - entry, err := parseNginxLine(line.Text) - - if err == nil { - // Send a pointer to the entry down each channel - for ch := range channels { - channels[ch] <- entry - } - } - } -} - -// parseNginxDate parses a single line of the nginx log file and returns the time.Time of the line -func parseNginxDate(line string) (time.Time, error) { - tm, err := time.Parse("\"02/Jan/2006:15:04:05 -0700\"", reQuotes.FindString(line)) - if err != nil { - return time.Time{}, err - } - return tm, nil -} - -// parseNginxLine parses a single line of the nginx log file -// It's critical the log file uses the correct format found at the top of this file -// If the log file is not in the correct format or if some other part of the parsing fails -// this function will return an error -func parseNginxLine(line string) (*NginxLogEntry, error) { - // "$time_local" "$remote_addr" "$request" "$status" "$body_bytes_sent" "$request_length" "$http_user_agent"; - quoteList := reQuotes.FindAllString(line, -1) - - if len(quoteList) != 7 { - return nil, errors.New("invalid number of parameters in log entry") - } - - // Trim quotation marks - for i := 0; i < len(quoteList); i++ { - quoteList[i] = quoteList[i][1 : len(quoteList[i])-1] - } - - var entry NginxLogEntry - var err error - - // Time - t := "02/Jan/2006:15:04:05 -0700" - tm, err := time.Parse(t, quoteList[0]) - if err != nil { - return nil, err - } - entry.Time = tm - - // IPv4 or IPv6 address - entry.IP = net.ParseIP(quoteList[1]) - if entry.IP == nil { - return nil, errors.New("failed to parse ip") - } - - // Optional GeoIP lookup - if geoipHandler != nil { - city, err := geoipHandler.Lookup(entry.IP) - if err != nil { - entry.City = nil - } else { - entry.City = city - } - } else { - entry.City = nil - } - - // Method url http version - split := strings.Split(quoteList[2], " ") - if len(split) != 3 { - // this should never fail - return nil, errors.New("invalid number of strings in request") - } - entry.Method = split[0] - entry.Url = split[1] - entry.Version = split[2] - - // Distro is the top level of the URL path - split = strings.Split(entry.Url, "/") - - if len(split) >= 2 { - entry.Distro = split[1] - } else { - return nil, errors.New("invalid number of parts in url") - } - - // HTTP response status - status, err := strconv.Atoi(quoteList[3]) - if err != nil { - // this should never fail - return nil, errors.New("could not parse http response status") - } - entry.Status = status - - // Bytes sent int64 - bytesSent, err := strconv.ParseInt(quoteList[4], 10, 64) - if err != nil { - // this should never fail - return nil, errors.New("could not parse bytes_sent") - } - entry.BytesSent = bytesSent - - // Bytes received - bytesRecv, err := strconv.ParseInt(quoteList[5], 10, 64) - if err != nil { - return nil, errors.New("could not parse bytes_recv") - } - entry.BytesRecv = bytesRecv - - // User agent - entry.Agent = quoteList[6] - - return &entry, nil -} diff --git a/rsyncd.go b/rsyncd.go deleted file mode 100644 index 74bc056..0000000 --- a/rsyncd.go +++ /dev/null @@ -1,148 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" - "time" - - "github.com/COSI-Lab/datarithms" - "github.com/COSI-Lab/logging" - "github.com/nxadm/tail" -) - -type RsyncdLogEntry struct { - time time.Time - sent int64 - recv int64 -} - -func ReadRsyncdLogFile(logFile string, ch chan *RsyncdLogEntry) (err error) { - for { - f, err := os.Open(logFile) - if err != nil { - return err - } - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - entry, err := parseRsyncdLine(scanner.Text()) - if err == nil { - // Send a pointer to the entry down the channel - ch <- entry - } - - time.Sleep(10 * time.Millisecond) - } - - f.Close() - } -} - -func TailRSyncdLogFile(logFile string, lastUpdated time.Time, ch chan *RsyncdLogEntry) { - // Find the offset of the line where the date is past lastUpdated - start := time.Now() - offset, err := datarithms.BinarySearchFileByDate(logFile, lastUpdated, parseRsyncdDate) - if err != nil { - logging.Error(err) - return - } - logging.Info("Found rsyncd log offset in", time.Since(start)) - - // Tail the log file `tail -F` starting at the offset - seek := tail.SeekInfo{ - Offset: offset, - Whence: io.SeekStart, - } - tail, err := tail.TailFile(logFile, tail.Config{Follow: true, ReOpen: true, MustExist: true, Location: &seek}) - if err != nil { - logging.Error("Failed to start tailing `rsyncd.log`:", err) - return - } - - logging.Success("Tailing rsyncd log file") - - // Parse each line as we receive it - for line := range tail.Lines { - entry, err := parseRsyncdLine(line.Text) - - if err == nil { - // Send a pointer to the entry down each channel - ch <- entry - } - } -} - -type ParseLineError struct{} - -func (e ParseLineError) Error() string { - return "Failed to parse line" -} - -func parseRsyncdDate(line string) (time.Time, error) { - // Split the line over whitespace - parts := strings.Split(line, " ") - - if len(parts) < 2 { - return time.Time{}, ParseLineError{} - } - - // The 1st part is the date - dt := parts[0] - // 2nd part is the time - tm := parts[1] - - // make the time.Time object - t, err := time.Parse("2006/01/02 15:04:05", dt+" "+tm) - if err != nil { - return time.Time{}, err - } - - return t, nil -} - -func parseRsyncdLine(line string) (*RsyncdLogEntry, error) { - // 2022/04/20 20:00:10 [pid] sent XXX bytes received XXX bytes total size XXX - - // Split the line over whitespace - parts := strings.Split(line, " ") - - // the line we want has 14 parts - if len(parts) != 14 { - return nil, ParseLineError{} - } - - // the 4th part is "sent" - if parts[3] != "sent" { - return nil, ParseLineError{} - } - - // The 1st part is the date - dt := parts[0] - // 2nd part is the time - tm := parts[1] - - // make the time.Time object - t, err := time.Parse("2006/01/02 15:04:05", dt+" "+tm) - if err != nil { - return nil, err - } - - // part 5 is the number of bytes sent - sent, err := strconv.ParseInt(parts[4], 10, 64) - if err != nil { - fmt.Println(err) - return nil, ParseLineError{} - } - - recv, err := strconv.ParseInt(parts[8], 10, 64) - if err != nil { - fmt.Println(err) - return nil, ParseLineError{} - } - - return &RsyncdLogEntry{sent: sent, recv: recv, time: t}, nil -} diff --git a/scheduler/calander.go b/scheduler/calander.go new file mode 100644 index 0000000..784beba --- /dev/null +++ b/scheduler/calander.go @@ -0,0 +1,115 @@ +package scheduler + +import "time" + +// Calendar is a struct that holds a list of tasks and their corresponding time to run [0, 1) +// +// The invariant is that the time must be increasing. +// So the algorithm is trivial. Run the task, sleep until the next start time, repeat. +type Calendar[T any] struct { + tasks []T + times []float32 + iterator int +} + +// Returns the job to run and how long to sleep until the next job +func (s *Calendar[T]) NextJob() (task T, dt time.Duration) { + // Calculate the time since midnight + now := time.Now().UTC() + pos := time.Since(time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)) + + // Convert time to position in the schedule (0.0 <= t <= 1.0) + t := float32(pos) / float32(24*time.Hour) + + // Find the first job that is greater than the current time + for s.iterator < len(s.tasks) && s.times[s.iterator] <= t { + s.iterator++ + } + + // If we are at the end of the schedule, sleep until midnight + if s.iterator == len(s.tasks) { + s.iterator = 0 + dt = time.Until(time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, time.UTC)) + return s.tasks[len(s.tasks)-1], dt + } + + // Time to sleep until the next job + dt = time.Duration((s.times[s.iterator] - t) * float32(24*time.Hour)) + + return s.tasks[s.iterator-1], dt +} + +// Scheduling algorithm +func BuildCalendar[T any](tasks []T, timesPerDay []uint) Calendar[T] { + total_jobs := uint(0) + for _, n := range timesPerDay { + total_jobs += n + } + + // Compute least common multiple of all sync frequencies + lcm := uint(1) + for _, n := range timesPerDay { + // compute the greatest common divisor of best known LCM and sync frequency of the current task + var ( + a uint + b uint + ) + + if lcm > n { + a = lcm + b = n + } else { + a = n + b = lcm + } + for b != 0 { + rem := a % b + a = b + b = rem + } + + // now a is the GCD; we can compute the next LCM + // TODO: check for overflow in multiplication + lcm = lcm * n / a + } + + jobs := make([]T, total_jobs) + times := make([]float32, total_jobs) + + var interval float32 = 1.0 / float32(total_jobs) + c := 0 + for i := uint(0); i < lcm; i++ { + for idx, task := range tasks { + n := timesPerDay[idx] + if i%(lcm/n) == 0 { + // emit a job + tasks[c] = task + times[c] = interval * float32(c) + c += 1 + } + } + } + + return Calendar[T]{ + tasks: jobs, + times: times, + iterator: 0, + } +} + +// Applies a function to each task in the calendar +func (s *Calendar[T]) ForEach(f func(*T)) { + for i := range s.tasks { + f(&s.tasks[i]) + } +} + +// Finds the first task that satisfies the predicate +func (s *Calendar[T]) Find(f func(T) bool) *T { + for i := range s.tasks { + if f(s.tasks[i]) { + return &s.tasks[i] + } + } + return nil +} diff --git a/sync.go b/sync.go index 9c9afe4..09b2f11 100644 --- a/sync.go +++ b/sync.go @@ -1,344 +1,219 @@ package main import ( + "bufio" + "context" "fmt" + "io" "os" - "os/exec" - "strings" "sync" "time" - "github.com/COSI-Lab/datarithms" - "github.com/COSI-Lab/logging" + "github.com/COSI-Lab/Mirror/config" + "github.com/COSI-Lab/Mirror/datarithms" + "github.com/COSI-Lab/Mirror/logging" + "github.com/COSI-Lab/Mirror/scheduler" ) -type Status struct { - StartTime int64 `json:"startTime"` - EndTime int64 `json:"endTime"` - ExitCode int `json:"exitCode"` -} -type RSYNCStatus map[string]*datarithms.CircularQueue[Status] - -var rsyncErrorCodes map[int]string -var syncLock sync.Mutex -var syncLocks = make(map[string]bool) - -func init() { - rsyncErrorCodes = make(map[int]string) - rsyncErrorCodes[0] = "Success" - rsyncErrorCodes[1] = "Syntax or usage error" - rsyncErrorCodes[2] = "Protocol incompatibility" - rsyncErrorCodes[3] = "Errors selecting input/output files, dirs" - rsyncErrorCodes[4] = "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them; or an option was specified that is supported by the client and not by the server." - rsyncErrorCodes[5] = "Error starting client-server protocol" - rsyncErrorCodes[6] = "Daemon unable to append to log-file" - rsyncErrorCodes[10] = "Error in socket I/O" - rsyncErrorCodes[11] = "Error in file I/O" - rsyncErrorCodes[12] = "Error in rsync protocol data stream" - rsyncErrorCodes[13] = "Errors with program diagnostics" - rsyncErrorCodes[14] = "Error in IPC code" - rsyncErrorCodes[20] = "Received SIGUSR1 or SIGINT" - rsyncErrorCodes[21] = "Some error returned by waitpid()" - rsyncErrorCodes[22] = "Error allocating core memory buffers" - rsyncErrorCodes[23] = "Partial transfer due to error" - rsyncErrorCodes[24] = "Partial transfer due to vanished source files" - rsyncErrorCodes[25] = "The --max-delete limit stopped deletions" - rsyncErrorCodes[30] = "Timeout in data send/receive" - rsyncErrorCodes[35] = "Timeout waiting for daemon connection" - - // Create the log directory - if syncLogs != "" { - err := os.MkdirAll(syncLogs, 0755) +// TaskStatus is an enum of possible return statuses for a task +type TaskStatus int + +const ( + // TaskStatusSuccess indicates that the task completed successfully + TaskStatusSuccess TaskStatus = iota + // TaskStatusFailure indicates that the task failed to complete + TaskStatusFailure + // TaskStatusTimeout indicates that the task failed to complete within the allotted time + TaskStatusTimeout + // TaskStatusStopped indicates that the task was stopped before it could complete by the scheduler + TaskStatusStopped +) - if err != nil { - logging.Warn("failed to create RSYNC_LOGS directory", syncLogs, err, "not saving rsync logs") - syncLogs = "" - } else { - logging.Success("opened RSYNC_LOGS directory", syncLogs) - } - } +// Tasks are the units of work to be preformed by the scheduler +// +// Each task runs in its own go-routine and the scheduler ensures that only one instance of task `Run` will be called at a time +type Task interface { + Run(stdout io.Writer, stderr io.Writer, status chan<- logging.LogEntryT, context context.Context) TaskStatus } -func rsync(project *Project, options string) ([]byte, *os.ProcessState) { - // split up the options TODO maybe precompute this? - // actually in hindsight this whole thing can be precomputed - args := strings.Split(options, " ") - - // Run with dry run if specified - if syncDryRun { - args = append(args, "--dry-run") - logging.Info("Syncing", project.Short, "with --dry-run") - } - - // Set the source and destination - if project.Rsync.User != "" { - args = append(args, fmt.Sprintf("%s@%s::%s", project.Rsync.User, project.Rsync.Host, project.Rsync.Src)) - } else { - args = append(args, fmt.Sprintf("%s::%s", project.Rsync.Host, project.Rsync.Src)) - } - args = append(args, project.Rsync.Dest) - - command := exec.Command("rsync", args...) - - // Add the password environment variable if needed - if project.Rsync.Password != "" { - command.Env = append(os.Environ(), "RSYNC_PASSWORD="+project.Rsync.Password) - } - - logging.Info(command) - - output, _ := command.CombinedOutput() - - return output, command.ProcessState +type sync_result_t struct { + start time.Time + end time.Time + status TaskStatus } -func appendToLogFile(short string, data []byte) { - // Get month - month := fmt.Sprintf("%02d", time.Now().UTC().Month()) - - // Open the log file - path := syncLogs + "/" + short + "-" + month + ".log" - file, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0640) - if err != nil { - logging.Warn("failed to open log file ", path, err) - } +// Scheduler is the main task scheduler. It's passed a context that can be used to stop all associated tasks +type Scheduler struct { + ctx context.Context - if admGroup != 0 { - // Set the file to be owned by the adm group - err = file.Chown(os.Getuid(), admGroup) - if err != nil { - logging.Warn("failed to set log file ownership", path, err) - } - } - - // Write to the log file - _, err = file.Write(data) - if err != nil { - logging.Warn("failed to write to log file ", path, err) - } + calendar scheduler.Calendar[*SchedulerTask] } -func syncProject(config *ConfigFile, status RSYNCStatus, short string) { - logging.Info("Running job: SYNC", short) - - // Lock the project - syncLock.Lock() - if syncLocks[short] { - syncLock.Unlock() - logging.Warn("Sync is already running for ", short) - return - } - syncLocks[short] = true - syncLock.Unlock() +// SchedulerTask wraps a `task` to provide storage for stdout, stderr, and a channel for logging +type SchedulerTask struct { + sync.Mutex + running bool - start := time.Now() + short string - if config.Mirrors[short].SyncStyle == "rsync" { - // 1 stage syncs are the norm - output1, state1 := rsync(config.Mirrors[short], config.Mirrors[short].Rsync.Options) - status[short].Push(Status{StartTime: start.Unix(), EndTime: time.Now().Unix(), ExitCode: state1.ExitCode()}) + queue *datarithms.CircularQueue[logging.LogEntryT] + results *datarithms.CircularQueue[sync_result_t] - // append stage 1 to its log file - if syncLogs != "" { - appendToLogFile(short, []byte("\n\n"+start.Format(time.RFC1123)+"\n")) - appendToLogFile(short, output1) - } - - checkRSYNCState(short, state1, output1) - - // 2 stage syncs happen sometimes - if config.Mirrors[short].Rsync.Second != "" { - start = time.Now() - output2, state2 := rsync(config.Mirrors[short], config.Mirrors[short].Rsync.Second) - status[short].Push(Status{StartTime: start.Unix(), EndTime: time.Now().Unix(), ExitCode: state2.ExitCode()}) - - if syncLogs != "" { - appendToLogFile(short, []byte("\n\n"+start.Format(time.RFC1123)+"\n")) - appendToLogFile(short, output2) - } + channel chan logging.LogEntryT + stdout *bufio.Writer + stderr *bufio.Writer + task Task +} - checkRSYNCState(short, state2, output2) +func NewScheduler(config *config.File, ctx context.Context) Scheduler { + failed := false + month := time.Now().UTC().Month() + + tasks := make([]*SchedulerTask, 0, len(config.Projects)) + timesPerDay := make([]uint, 0, len(config.Projects)) + + for short, project := range config.Projects { + var task Task + var syncsPerDay uint + + switch project.SyncStyle { + case "rsync": + task = NewRsyncTask(project.Rsync, short) + syncsPerDay = project.Rsync.SyncsPerDay + case "script": + task = NewScriptTask(project.Script, short) + syncsPerDay = project.Script.SyncsPerDay + default: + continue } - // A few mirrors are 3 stage syncs - if config.Mirrors[short].Rsync.Third != "" { - start = time.Now() - output3, state3 := rsync(config.Mirrors[short], config.Mirrors[short].Rsync.Third) - status[short].Push(Status{StartTime: start.Unix(), EndTime: time.Now().Unix(), ExitCode: state3.ExitCode()}) + q := datarithms.NewCircularQueue[logging.LogEntryT](64) + results := datarithms.NewCircularQueue[sync_result_t](64) - if syncLogs != "" { - appendToLogFile(short, []byte("\n\n"+start.Format(time.RFC1123)+"\n")) - appendToLogFile(short, output3) + channel := make(chan logging.LogEntryT, 64) + go func() { + for { + select { + case <-ctx.Done(): + return + case entry := <-channel: + q.Push(entry) + } } + }() - checkRSYNCState(short, state3, output3) - } - } else if config.Mirrors[short].SyncStyle == "script" { - if syncDryRun { - logging.Info("Did not sync", short, "because --dry-run was specified") - return - } - - // Execute the script - logging.Info(config.Mirrors[short].Script.Command, config.Mirrors[short].Script.Arguments) - command := exec.Command(config.Mirrors[short].Script.Command, config.Mirrors[short].Script.Arguments...) - output, _ := command.CombinedOutput() - - if syncLogs != "" { - appendToLogFile(short, []byte("\n\n"+start.Format(time.RFC1123)+"\n")) - appendToLogFile(short, output) + stdout, err := os.OpenFile(fmt.Sprintf("/var/log/mirror/%s-%s.log", short, month), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logging.Error("Failed to open stdout file for project ", short, ": ", err) + failed = true } - } - - // Unlock the project - syncLock.Lock() - syncLocks[short] = false - syncLock.Unlock() -} - -// handleSyncs is the main scheduler -// It builds a schedule of when to sync projects in such a way they are equally spaced across the day -// tasks are run in a separate goroutine and there is a lock to prevent the same project from being synced simultaneously -// the stop channel gracefully stops the scheduler after all active rsync tasks have completed -// the manual channel is used to manually sync a project, assuming it is not already currently syncing -func handleSyncs(config *ConfigFile, status RSYNCStatus, manual <-chan string, stop chan struct{}) { - for _, mirror := range config.Mirrors { - if mirror.Rsync.SyncsPerDay > 0 { - // Store a weeks worth of status messages in memory - status[mirror.Short] = datarithms.CircularQueueInit[Status](7 * mirror.Rsync.SyncsPerDay) + stderr, err := os.OpenFile(fmt.Sprintf("/var/log/mirror/%s-%s.err", short, month), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logging.Error("Failed to open stderr file for project ", short, ": ", err) + failed = true } - } - // prepare the tasks - tasks := make([]datarithms.Task, 0, len(config.Mirrors)) - for _, mirror := range config.Mirrors { - if mirror.SyncStyle == "rsync" { - tasks = append(tasks, datarithms.Task{ - Short: mirror.Short, - Syncs: mirror.Rsync.SyncsPerDay, - }) - } else if mirror.SyncStyle == "script" { - tasks = append(tasks, datarithms.Task{ - Short: mirror.Short, - Syncs: mirror.Script.SyncsPerDay, - }) - } + tasks = append(tasks, &SchedulerTask{ + running: false, + short: short, + queue: q, + results: results, + channel: channel, + stdout: bufio.NewWriter(stdout), + stderr: bufio.NewWriter(stderr), + task: task, + }) + timesPerDay = append(timesPerDay, syncsPerDay) } - // build the schedule - schedule := datarithms.BuildSchedule(tasks) - - // error checking on the schedule - if !datarithms.Verify(schedule, tasks) { - // A "warn" should do because a human should always be watching this when it's called - logging.Warn("RSYNC schedule fails verification") + if failed { + logging.Error("One or more errors occurred while setting up the scheduler") + os.Exit(1) } - // a project can only be syncing once at a time - syncLock = sync.Mutex{} - syncLocks = make(map[string]bool) - for _, project := range config.Mirrors { - syncLocks[project.Short] = false + return Scheduler{ + ctx: ctx, + calendar: scheduler.BuildCalendar[*SchedulerTask](tasks, timesPerDay), } +} - // skip the first job - _, sleep := schedule.NextJob() - timer := time.NewTimer(sleep) - - logging.Success("RSYNC scheduler started, next sync in", sleep) +// Start begins the scheduler and blocks until the context is canceled +// +// manual is a channel that can be used to manually trigger a project sync +func (sc *Scheduler) Start(manual <-chan string) { + timer := time.NewTimer(0) + month := time.NewTimer(waitMonth()) - // run the schedule for { select { - case <-stop: - logging.Info("RSYNC scheduler stopping...") - timer.Stop() - - // Wait for all the rsync tasks to finish - for { - // Check if all the rsync tasks are done - syncLock.Lock() - allDone := true - for _, running := range syncLocks { - if running { - allDone = false - break - } - } - syncLock.Unlock() - - // If all the rsync tasks are done, break - if allDone { - break - } - - time.Sleep(time.Second) - } - - // Respond to the stop signal - stop <- struct{}{} + case <-sc.ctx.Done(): return + case <-month.C: + month.Reset(waitMonth()) + month := time.Now().Local().Month() + sc.calendar.ForEach( + func(task **SchedulerTask) { + t := *task + t.Lock() + t.stdout.Flush() + t.stderr.Flush() + // Create new files for the next month + stdout, err := os.OpenFile(fmt.Sprintf("/var/log/mirror/%s-%s.log", t.short, month), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logging.Error("Failed to open stdout file for project ", t.short, ": ", err) + } else { + t.stdout.Reset(stdout) + } + stderr, err := os.OpenFile(fmt.Sprintf("/var/log/mirror/%s-%s.err", t.short, month), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logging.Error("Failed to open stderr file for project ", t.short, ": ", err) + } else { + t.stderr.Reset(stderr) + } + t.Unlock() + }) case <-timer.C: - short, sleep := schedule.NextJob() - timer.Reset(sleep + time.Second) - - go syncProject(config, status, short) + t, dt := sc.calendar.NextJob() + timer.Reset(dt) + t.runTask(sc.ctx) case short := <-manual: - go syncProject(config, status, short) - } - } -} - -func checkRSYNCState(short string, state *os.ProcessState, output []byte) { - if state != nil && state.Success() { - logging.Success("Job rsync:", short, "finished successfully") - } else { - if state.ExitCode() == 23 || state.ExitCode() == 24 { - // states 23 "Partial transfer due to error" and 24 "Partial transfer" are not considered important enough to message discord - logging.Error("Job rsync: ", short, " failed. Exit code: ", state.ExitCode(), " ", rsyncErrorCodes[state.ExitCode()]) - } else { - // We have some human readable error descriptions - if meaning, ok := rsyncErrorCodes[state.ExitCode()]; ok { - logging.ErrorWithAttachment(output, "Job rsync: ", short, " failed. Exit code: ", state.ExitCode(), " ", meaning) - } else { - logging.ErrorWithAttachment(output, "Job rsync: ", short, " failed. Exit code: ", state.ExitCode()) - } + t := *sc.calendar.Find(func(t *SchedulerTask) bool { + return t.short == short + }) + t.runTask(sc.ctx) } } } -// On start up then once a week checks and deletes all logs older than 3 months -func checkOldLogs() { - ticker := time.NewTicker(168 * time.Hour) - deleteOldLogs() - - for range ticker.C { - deleteOldLogs() +// runTask handles locking and unlocking the task and logging the results +func (t *SchedulerTask) runTask(ctx context.Context) { + t.Lock() + if t.running { + t.Unlock() + return } + t.running = true + t.Unlock() + + go func() { + start := time.Now() + status := t.task.Run(t.stdout, t.stderr, t.channel, ctx) + t.stdout.Flush() + t.stderr.Flush() + end := time.Now() + t.results.Push(sync_result_t{ + start: start, + end: end, + status: status, + }) + t.Lock() + t.running = false + t.Unlock() + }() } -// deletes all logs older than 3 months -func deleteOldLogs() { - logFiles, err := os.ReadDir(syncLogs) - if err != nil { - logging.Error(err) - } else { - for _, logFile := range logFiles { - path := syncLogs + "/" + logFile.Name() - fileStat, err := os.Stat(path) - if err != nil { - logging.Warn(err) - } else { - modTime := fileStat.ModTime() - if modTime.Before(time.Now().Add(-2160 * time.Hour)) { - err = os.Remove(path) - if err != nil { - logging.Warn(err) - } else { - logging.Info("removed " + path) - } - } - } - } - } +// waitMonth returns a timer that will fire at the beginning of the next month +func waitMonth() time.Duration { + now := time.Now().UTC() + return time.Until(time.Date(now.Year(), now.Month()+1, 1, 0, 0, 0, 0, time.Local)) } diff --git a/sync_rsync.go b/sync_rsync.go new file mode 100644 index 0000000..1bc6ccd --- /dev/null +++ b/sync_rsync.go @@ -0,0 +1,154 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + + "github.com/COSI-Lab/Mirror/config" + "github.com/COSI-Lab/Mirror/logging" +) + +var rsyncErrorCodes map[int]string + +func init() { + rsyncErrorCodes = make(map[int]string) + rsyncErrorCodes[0] = "Success" + rsyncErrorCodes[1] = "Syntax or usage error" + rsyncErrorCodes[2] = "Protocol incompatibility" + rsyncErrorCodes[3] = "Errors selecting input/output files, dirs" + rsyncErrorCodes[4] = "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them; or an option was specified that is supported by the client and not by the server." + rsyncErrorCodes[5] = "Error starting client-server protocol" + rsyncErrorCodes[6] = "Daemon unable to append to log-file" + rsyncErrorCodes[10] = "Error in socket I/O" + rsyncErrorCodes[11] = "Error in file I/O" + rsyncErrorCodes[12] = "Error in rsync protocol data stream" + rsyncErrorCodes[13] = "Errors with program diagnostics" + rsyncErrorCodes[14] = "Error in IPC code" + rsyncErrorCodes[20] = "Received SIGUSR1 or SIGINT" + rsyncErrorCodes[21] = "Some error returned by waitpid()" + rsyncErrorCodes[22] = "Error allocating core memory buffers" + rsyncErrorCodes[23] = "Partial transfer due to error" + rsyncErrorCodes[24] = "Partial transfer due to vanished source files" + rsyncErrorCodes[25] = "The --max-delete limit stopped deletions" + rsyncErrorCodes[30] = "Timeout in data send/receive" + rsyncErrorCodes[35] = "Timeout waiting for daemon connection" +} + +func RsyncErrorCodeToString(code int) string { + if msg, ok := rsyncErrorCodes[code]; ok { + return msg + } + + return "Unknown" +} + +// RsyncTask implements the Task interface from `scheduler` +type RsyncTask struct { + // Project `short` name + short string + args []string + stages []string + password string +} + +// NewRsyncTask creates a new RsyncTask from a config.Rsync +func NewRsyncTask(declaration *config.Rsync, short string) *RsyncTask { + args := make([]string, 0) + + if declaration.User != "" { + args = append(args, fmt.Sprintf("%s@%s::%s", declaration.User, declaration.Host, declaration.Src)) + } else { + args = append(args, fmt.Sprintf("%s::%s", declaration.Host, declaration.Src)) + } + args = append(args, declaration.Dest) + + // Add the password if it exists + var password []byte + var err error + if declaration.PasswordFile != "" { + password, err = os.ReadFile(declaration.PasswordFile) + if err != nil { + logging.Error("Failed to read password file:", err) + } + + return &RsyncTask{ + short: short, + args: args, + stages: declaration.Stages, + password: string(password), + } + } else { + return &RsyncTask{ + short: short, + args: args, + stages: declaration.Stages, + password: "", + } + } +} + +// Run runs the script, blocking until it finishes +// See: Aggregator.Run +func (r *RsyncTask) Run(stdout, stderr io.Writer, status chan<- logging.LogEntryT, ctx context.Context) TaskStatus { + status <- logging.InfoLogEntry(fmt.Sprintf("%s: Starting rsync", r.short)) + + for i := 0; i < len(r.stages); i++ { + status := r.RunStage(stdout, stderr, status, ctx, i) + if status != TaskStatusSuccess { + return status + } + } + + return TaskStatusSuccess +} + +// RunStage runs a single stage of the rsync task +func (r *RsyncTask) RunStage(stdout, stderr io.Writer, status chan<- logging.LogEntryT, ctx context.Context, stage int) TaskStatus { + // join r.args and r.stages[stage] + args := make([]string, len(r.args)) + copy(args, r.args) + args = append(args, r.stages[stage]) + + cmd := exec.Command("rsync", args...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + if r.password != "" { + cmd.Env = append(os.Environ(), "RSYNC_PASSWORD="+r.password) + } + + status <- logging.InfoLogEntry("Running: " + cmd.String()) + + err := cmd.Start() + if err != nil { + status <- logging.ErrorLogEntry(fmt.Sprintf("%s: Stage %d failed to start: %s", r.short, stage, err.Error())) + return TaskStatusFailure + } + + c := make(chan struct{}) + go func() { + cmd.Wait() + close(c) + }() + + select { + case <-c: + break + case <-ctx.Done(): + cmd.Process.Kill() + status <- logging.InfoLogEntry(fmt.Sprintf("%s: Stage %d stopped", r.short, stage)) + return TaskStatusStopped + } + + // Report the exit code + if cmd.ProcessState.Success() { + status <- logging.SuccessLogEntry(fmt.Sprintf("%s: Stage %d completed successfully", r.short, stage)) + return TaskStatusSuccess + } + + status <- logging.ErrorLogEntry(fmt.Sprintf("%s: Stage %d failed with exit code %d (%s)", r.short, stage, cmd.ProcessState.ExitCode(), RsyncErrorCodeToString(cmd.ProcessState.ExitCode()))) + return TaskStatusFailure +} diff --git a/sync_script.go b/sync_script.go new file mode 100644 index 0000000..ea8cb92 --- /dev/null +++ b/sync_script.go @@ -0,0 +1,77 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + + "github.com/COSI-Lab/Mirror/config" + "github.com/COSI-Lab/Mirror/logging" +) + +// ScriptTask is a task that runs a script project +type ScriptTask struct { + short string + env map[string]string + command string + arguments []string +} + +// NewScriptTask creates a new ScriptTask from a config.Script +func NewScriptTask(declaration *config.Script, short string) *ScriptTask { + return &ScriptTask{ + short: short, + env: declaration.Env, + command: declaration.Command, + arguments: declaration.Arguments, + } +} + +// Run runs the script, blocking until it finishes +// See: Aggregator.Run +func (s *ScriptTask) Run(stdout, stderr io.Writer, status chan<- logging.LogEntryT, ctx context.Context) TaskStatus { + status <- logging.InfoLogEntry(fmt.Sprintf("%s: Starting script", s.short)) + cmd := exec.Command(s.command, s.arguments...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + // set environment variables + env := os.Environ() + for key, value := range s.env { + env = append(env, fmt.Sprintf("%s=%s", key, value)) + } + cmd.Env = env + + status <- logging.InfoLogEntry("Running: " + cmd.String()) + + err := cmd.Start() + if err != nil { + status <- logging.ErrorLogEntry(fmt.Sprintf("%s: Failed to start script: %s", s.short, err.Error())) + return TaskStatusFailure + } + + c := make(chan struct{}) + go func() { + cmd.Wait() + close(c) + }() + + select { + case <-c: + break + case <-ctx.Done(): + cmd.Process.Kill() + status <- logging.InfoLogEntry(fmt.Sprintf("%s: Script stopped", s.short)) + return TaskStatusStopped + } + + if cmd.ProcessState.Success() { + status <- logging.SuccessLogEntry(fmt.Sprintf("%s: Script finished successfully", s.short)) + return TaskStatusSuccess + } + + status <- logging.ErrorLogEntry(fmt.Sprintf("%s: Script failed with exit code %d", s.short, cmd.ProcessState.ExitCode())) + return TaskStatusFailure +} diff --git a/torrent.go b/torrent.go index 989bfc1..930bec0 100644 --- a/torrent.go +++ b/torrent.go @@ -11,12 +11,13 @@ import ( "syscall" "time" - "github.com/COSI-Lab/logging" + "github.com/COSI-Lab/Mirror/config" + "github.com/COSI-Lab/Mirror/logging" "github.com/gocolly/colly" ) // HandleTorrents periodically downloads remote torrents and extracts torrents from disk -func HandleTorrents(config *ConfigFile, torrentDir, downloadDir string) { +func HandleTorrents(cfg *config.File, torrentDir, downloadDir string) { err := os.MkdirAll(downloadDir, 0755) if err != nil { logging.Error("Failed to create torrents downloadDir: ", err) @@ -34,32 +35,32 @@ func HandleTorrents(config *ConfigFile, torrentDir, downloadDir string) { // - search disk for torrent files and corresponding downloads // - sync downloadDir // - sync torrentDir - go scrapeTorrents(config.Torrents, torrentDir) - go syncTorrents(config, torrentDir, downloadDir) + go scrapeTorrents(cfg.Torrents, torrentDir) + go syncTorrents(cfg, torrentDir, downloadDir) // Sleep until midnight now := time.Now() midnight := time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, time.Local) time.Sleep(time.Until(midnight)) - go scrapeTorrents(config.Torrents, torrentDir) - go syncTorrents(config, torrentDir, downloadDir) + go scrapeTorrents(cfg.Torrents, torrentDir) + go syncTorrents(cfg, torrentDir, downloadDir) ticker := time.NewTicker(24 * time.Hour) for range ticker.C { - go scrapeTorrents(config.Torrents, torrentDir) - go syncTorrents(config, torrentDir, downloadDir) + go scrapeTorrents(cfg.Torrents, torrentDir) + go syncTorrents(cfg, torrentDir, downloadDir) } } // syncTorrents goes over all projects, finds their torrent files, the corresponding source // files and then creates hardlinks in the download and torrent directories -func syncTorrents(config *ConfigFile, torrentDir, ourDir string) { - for _, project := range config.GetProjects() { +func syncTorrents(cfg *config.File, torrentDir, ourDir string) { + for _, project := range cfg.GetProjects() { if project.Torrents == "" { continue } - go func(project Project) { + go func(project config.Project) { // Find all torrent files using glob matches, err := filepath.Glob(project.Torrents + "*.torrent") @@ -94,7 +95,7 @@ func syncTorrents(config *ConfigFile, torrentDir, ourDir string) { } // Fetches a file from a glob and a name. Saves it to downloadDir -func addFile(project Project, downloadDir, fileName string) { +func addFile(project config.Project, downloadDir, fileName string) { // Search the glob for the corresponding file files, err := filepath.Glob(project.Torrents + fileName) if err != nil { @@ -165,9 +166,9 @@ func addFile(project Project, downloadDir, fileName string) { } // scrapeTorrents downloads all torrents from upstreams -func scrapeTorrents(torrents []*Torrent, downloadDir string) { +func scrapeTorrents(torrents []*config.ScrapeTarget, downloadDir string) { for _, upstream := range torrents { - go scrape(upstream.Depth, upstream.Delay, upstream.Url, downloadDir) + go scrape(upstream.Depth, upstream.Delay, upstream.URL, downloadDir) } } diff --git a/tracking.go b/tracking.go deleted file mode 100644 index e5293d7..0000000 --- a/tracking.go +++ /dev/null @@ -1,496 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "net" - "os" - "os/exec" - "strconv" - "strings" - "sync" - "time" - - "github.com/COSI-Lab/logging" - influxdb2 "github.com/influxdata/influxdb-client-go/v2" - "github.com/influxdata/influxdb-client-go/v2/api" -) - -type NetStat struct { - BytesSent int64 - BytesRecv int64 - Requests int64 -} -type DistroStatistics map[string]*NetStat -type TransmissionStatistics struct { - Uploaded int64 - Downloaded int64 - Torrents int - Ratio float64 -} -type Statistics struct { - sync.RWMutex - nginx DistroStatistics - clarkson DistroStatistics - transmission TransmissionStatistics - rsyncd NetStat -} - -var statistics Statistics -var clarksonIPv4net *net.IPNet -var clarksonIPv6net *net.IPNet - -// Prepare filters and regular expressions -func init() { - var err error - _, clarksonIPv4net, err = net.ParseCIDR("128.153.0.0/16") - if err != nil { - logging.Panic(err) - os.Exit(1) - } - _, clarksonIPv6net, err = net.ParseCIDR("2605:6480::/32") - if err != nil { - logging.Panic(err) - os.Exit(1) - } -} - -// HandleStatistics receives parsed log entries over channels and tracks the useful information -// The statistics object should be created before this function can be run. -func HandleStatistics(nginxEntries chan *NginxLogEntry, rsyncdEntries chan *RsyncdLogEntry) { - // We send the latest stats to influxdb every minute - ticker := time.NewTicker(1 * time.Minute) - - for { - select { - case <-ticker.C: - err := SetTransmissionStatistics() - if err != nil { - logging.Error(err) - } - Sendstatistics() - case entry := <-nginxEntries: - statistics.Lock() - // Track all distro usage - if _, ok := statistics.nginx[entry.Distro]; ok { - statistics.nginx[entry.Distro].BytesSent += entry.BytesSent - statistics.nginx[entry.Distro].BytesRecv += entry.BytesRecv - statistics.nginx[entry.Distro].Requests++ - } else { - statistics.nginx["other"].BytesSent += entry.BytesSent - statistics.nginx["other"].BytesRecv += entry.BytesRecv - statistics.nginx["other"].Requests++ - } - statistics.nginx["total"].BytesSent += entry.BytesSent - statistics.nginx["total"].BytesRecv += entry.BytesRecv - statistics.nginx["total"].Requests++ - - // Additionally track usage from within the clarkson network - if clarksonIPv4net.Contains(entry.IP) || clarksonIPv6net.Contains(entry.IP) { - if _, ok := statistics.clarkson[entry.Distro]; ok { - statistics.clarkson[entry.Distro].BytesSent += entry.BytesSent - statistics.clarkson[entry.Distro].BytesRecv += entry.BytesRecv - statistics.clarkson[entry.Distro].Requests++ - } else { - statistics.clarkson["other"].BytesSent += entry.BytesSent - statistics.clarkson["other"].BytesRecv += entry.BytesRecv - statistics.clarkson["other"].Requests++ - } - statistics.clarkson["total"].BytesSent += entry.BytesSent - statistics.clarkson["total"].BytesRecv += entry.BytesRecv - statistics.clarkson["total"].Requests++ - } - statistics.Unlock() - case entry := <-rsyncdEntries: - statistics.Lock() - statistics.rsyncd.BytesSent += entry.sent - statistics.rsyncd.BytesRecv += entry.recv - statistics.rsyncd.Requests++ - statistics.Unlock() - } - } -} - -// Start a command and allow it to cancel after a certain amount of time -func runCommand(cmd *exec.Cmd, d time.Duration) error { - cmd.Start() - - done := make(chan error, 1) - go func() { - done <- cmd.Wait() - }() - - select { - case <-time.After(d): - if err := cmd.Process.Kill(); err != nil { - return err - } - return errors.New("transmission-remote timed out") - case err := <-done: - if err != nil { - return err - } - } - - return nil -} - -// Get the latest statistics from Transmission -func SetTransmissionStatistics() error { - // Get the count by running transmission-remote -l - // The output is in the form of a table, so we can just count the lines - 2 for the head and tail - cmd := exec.Command("transmission-remote", "-ne", "-l") - cmd.Env = append(os.Environ(), "TR_AUTH=transmission:") - - err := runCommand(cmd, 5*time.Second) - if err != nil { - return err - } - - out, err := cmd.Output() - if err != nil { - return err - } - - lines := strings.Split(string(out), "\n") - torrents := len(lines) - 2 - - // Get the total upload and download by running transmission-remote -st - cmd = exec.Command("transmission-remote", "-ne", "-st") - cmd.Env = append(os.Environ(), "TR_AUTH=transmission:") - - err = runCommand(cmd, 5*time.Second) - if err != nil { - return err - } - - out, err = cmd.Output() - if err != nil { - return err - } - - // Get the TOTAL uploaded, downloaded and ratio - lines = strings.Split(string(out), "\n") - uploaded := strings.Split(lines[9], ":")[1] - downloaded := strings.Split(lines[10], ":")[1] - ratio := strings.Split(lines[11], ":")[1] - - // Convert the human readable sizes to bytes - uploadedBytes, err := HumanReadableSizeToBytes(uploaded) - if err != nil { - return err - } - downloadedBytes, err := HumanReadableSizeToBytes(downloaded) - if err != nil { - return err - } - - ratioFloat, err := strconv.ParseFloat(strings.TrimSpace(ratio), 64) - if err != nil { - return err - } - - // Set the statistics - statistics.Lock() - statistics.transmission.Torrents = torrents - statistics.transmission.Uploaded = uploadedBytes - statistics.transmission.Downloaded = downloadedBytes - statistics.transmission.Ratio = ratioFloat - statistics.Unlock() - - return nil -} - -// HumanReadableSizeToBytes converts a human readable size to bytes -// -// Examples: -// -// "1.0 KB" -> 1000 -// "1.0 MB" -> 1000000 -// "1.0 GB" -> 1000000000 -func HumanReadableSizeToBytes(size string) (int64, error) { - // Get the size and unit - size = strings.TrimSpace(size) - unit := size[len(size)-2:] - size = size[:len(size)-2] - - // Convert the size to an int - sizeFloat, err := strconv.ParseFloat(strings.TrimSpace(size), 64) - if err != nil { - return 0, err - } - - // Convert the unit to bytes - switch unit { - case "KB": - return int64(sizeFloat * 1000), nil - case "MB": - return int64(sizeFloat * 1000 * 1000), nil - case "GB": - return int64(sizeFloat * 1000 * 1000 * 1000), nil - case "TB": - return int64(sizeFloat * 1000 * 1000 * 1000 * 1000), nil - case "PB": - return int64(sizeFloat * 1000 * 1000 * 1000 * 1000 * 1000), nil - default: - return 0, fmt.Errorf("Unknown unit %s", unit) - } -} - -// Sends the latest statistics to the database -func Sendstatistics() { - if influxReadOnly { - logging.Info("INFLUX_READ_ONLY is set, not sending data to influx") - return - } - - t := time.Now() - - statistics.RLock() - for short, stat := range statistics.nginx { - p := influxdb2.NewPoint("nginx", - map[string]string{"distro": short}, - map[string]interface{}{ - "bytes_sent": stat.BytesSent, - "bytes_recv": stat.BytesRecv, - "requests": stat.Requests, - }, t) - writer.WritePoint(p) - } - for short, stat := range statistics.clarkson { - p := influxdb2.NewPoint("clarkson", - map[string]string{"distro": short}, - map[string]interface{}{ - "bytes_sent": stat.BytesSent, - "bytes_recv": stat.BytesRecv, - "requests": stat.Requests, - }, t) - writer.WritePoint(p) - } - p := influxdb2.NewPoint("transmission", map[string]string{}, map[string]interface{}{ - "downloaded": statistics.transmission.Downloaded, - "uploaded": statistics.transmission.Uploaded, - "torrents": statistics.transmission.Torrents, - "ratio": statistics.transmission.Ratio, - }, t) - writer.WritePoint(p) - p = influxdb2.NewPoint("rsyncd", map[string]string{}, map[string]interface{}{ - "bytes_sent": statistics.rsyncd.BytesSent, - "bytes_recv": statistics.rsyncd.BytesRecv, - "requests": statistics.rsyncd.Requests, - }, t) - writer.WritePoint(p) - - // To be safe we release the lock before logging because logging takes a seperate lock - statistics.RUnlock() - - logging.Info("Sent statistics") -} - -// InitStatistics queries the database for the all of the latest statistics -// In general everything in `statistics` should be monotonically increasing -// lastUpdated should be the same no matter where we check -func InitStatistics(projects map[string]*Project) (lastUpdated time.Time, err error) { - // Map from short names to bytes sent - statistics = Statistics{} - - lastUpdated, statistics.nginx, err = QueryDistroStatistics(projects, "nginx") - if err != nil { - return lastUpdated, err - } - lastUpdated, statistics.clarkson, err = QueryDistroStatistics(projects, "clarkson") - if err != nil { - return lastUpdated, err - } - - statistics.rsyncd, err = QueryRsyncdStatistics() - if err != nil { - return lastUpdated, err - } - - return lastUpdated, nil -} - -// measurement is the particular filter you want `DistroStatistics` from -// current "clarkson" and "nginx" (all) are supported -func QueryDistroStatistics(projects map[string]*Project, measurement string) (lastUpdated time.Time, stats DistroStatistics, err error) { - // You can paste this into the influxdb data explorer - // Replace MEASUREMENT with "nginx" or "clarkson" - /* - from(bucket: "stats") - |> range(start: 0, stop: now()) - |> filter(fn: (r) => r["_measurement"] == "MEASUREMENT") - |> filter(fn: (r) => r["_field"] == "bytes_sent" or r["_field"] == "bytes_recv" or r["_field"] == "requests") - |> last() - |> group(columns: ["distro"], mode: "by") - */ - request := fmt.Sprintf("from(bucket: \"stats\") |> range(start: 0, stop: now()) |> filter(fn: (r) => r[\"_measurement\"] == \"%s\") |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\" or r[\"_field\"] == \"bytes_recv\" or r[\"_field\"] == \"requests\") |> last() |> group(columns: [\"distro\"], mode: \"by\")", measurement) - - // try the query at most 5 times - var result *api.QueryTableResult - for i := 0; i < 5; i++ { - result, err = reader.Query(context.Background(), request) - - if err != nil { - logging.Warn("Failed to querying influxdb nginx statistics", err) - // Sleep for some time before retrying - time.Sleep(time.Duration(i) * time.Second) - continue - } - - break - } - - if err != nil { - return lastUpdated, stats, errors.New("Error querying influxdb") - } - - stats = make(DistroStatistics) - for short := range projects { - stats[short] = &NetStat{} - } - stats["other"] = &NetStat{} - stats["total"] = &NetStat{} - - for result.Next() { - if result.Err() == nil { - // Get the data point - dp := result.Record() - - // Update the time of the measurement - lastUpdated = dp.Time() - - // Get the distro short name - distro, ok := dp.ValueByKey("distro").(string) - if !ok { - logging.Warn("Error getting distro short name") - fmt.Printf("%T %v\n", distro, distro) - continue - } - - if stats[distro] == nil { - continue - } - - // Get the field - field, ok := dp.ValueByKey("_field").(string) - if !ok { - logging.Warn("Error getting field") - fmt.Printf("%T %v\n", field, field) - continue - } - - // Switch on the field - switch field { - case "bytes_sent": - sent, ok := dp.ValueByKey("_value").(int64) - if !ok { - logging.Warn("Error getting bytes sent") - fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) - continue - } - stats[distro].BytesSent = sent - case "bytes_recv": - received, ok := dp.ValueByKey("_value").(int64) - if !ok { - logging.Warn("Error getting bytes recv") - fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) - continue - } - stats[distro].BytesRecv = received - case "requests": - requests, ok := dp.ValueByKey("_value").(int64) - if !ok { - logging.Warn("Error getting requests") - fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) - continue - } - stats[distro].Requests = requests - } - } else { - logging.Warn("QueryDistroStatistics Flux Query Error", result.Err()) - } - } - result.Close() - - return lastUpdated, stats, nil -} - -func QueryRsyncdStatistics() (stat NetStat, err error) { - // You can paste this into the influxdb data explorer - /* - from(bucket: "stats") - |> range(start: 0, stop: now()) - |> filter(fn: (r) => r["_measurement"] == "rsyncd") - |> filter(fn: (r) => r["_field"] == "bytes_sent" or r["_field"] == "bytes_recv" or r["_field"] == "requests") - |> last() - */ - const request = "from(bucket: \"stats\") |> range(start: 0, stop: now()) |> filter(fn: (r) => r[\"_measurement\"] == \"rsyncd\") |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\" or r[\"_field\"] == \"bytes_recv\") |> last()" - - // try the query at most 5 times - var result *api.QueryTableResult - for i := 0; i < 5; i++ { - result, err = reader.Query(context.Background(), request) - - if err != nil { - logging.Warn("Failed to querying influxdb rsyncd statistics", err) - // Sleep for some time before retrying - time.Sleep(time.Duration(i) * time.Second) - continue - } - - break - } - - if result == nil { - return stat, errors.New("Error querying influxdb for rsyncd stat") - } - - for result.Next() { - if result.Err() == nil { - // Get the data point - dp := result.Record() - - // Get the field - field, ok := dp.ValueByKey("_field").(string) - if !ok { - logging.Warn("Error getting field") - fmt.Printf("%T %v\n", field, field) - continue - } - - // Switch on the field - switch field { - case "bytes_sent": - sent, ok := dp.ValueByKey("_value").(int64) - if !ok { - logging.Warn("Error getting bytes sent") - fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) - continue - } - statistics.rsyncd.BytesSent = sent - case "bytes_recv": - received, ok := dp.ValueByKey("_value").(int64) - if !ok { - logging.Warn("Error getting bytes recv") - fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) - continue - } - statistics.rsyncd.BytesRecv = received - case "requests": - requests, ok := dp.ValueByKey("_value").(int64) - if !ok { - logging.Warn("Error getting requests") - fmt.Printf("%T %v\n", dp.ValueByKey("_value"), dp.ValueByKey("_value")) - continue - } - statistics.rsyncd.Requests = requests - } - } else { - logging.Warn("InitNGINXStats Flux Query Error", result.Err()) - } - } - - return stat, nil -} diff --git a/webserver.go b/webserver.go index d7ab107..345a2c1 100644 --- a/webserver.go +++ b/webserver.go @@ -6,15 +6,17 @@ import ( "net/http" "sync" - "github.com/COSI-Lab/logging" + "github.com/COSI-Lab/Mirror/config" + "github.com/COSI-Lab/Mirror/logging" "github.com/gorilla/mux" "github.com/wcharczuk/go-chart/v2" ) var tmpls *template.Template -var projects map[string]*Project -var projectsById []Project -var projectsGrouped ProjectsGrouped +var projects map[string]*config.Project +var projectsById []config.Project +var projectsGrouped config.ProjectsGrouped +var tokens *config.Tokens var dataLock = &sync.RWMutex{} func init() { @@ -123,6 +125,7 @@ func handleStatistics(w http.ResponseWriter, r *http.Request) { // handleManualSyncs is a endpoint that allows a privileged user to manually cause a project to sync // Access token is included in the query string. The http method is not considered. +// // /sync/{project}?token={token} func handleManualSyncs(manual chan<- string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { @@ -142,40 +145,25 @@ func handleManualSyncs(manual chan<- string) http.HandlerFunc { return } - if projectName == "all" { - // Trigger a sync for every project - if token == pullToken { - // Return a success message - fmt.Fprintf(w, "Sync requested for all projects") - - // Sync the project - logging.InfoToDiscord("Manual sync requested for all projects") + // Check if the token has permission for projectName + t := tokens.GetToken(token) + if t == nil { + http.Error(w, "Invalid access token", http.StatusForbidden) + return + } - for name := range projects { - manual <- name - } - } else { - http.Error(w, "Invalid access token", http.StatusForbidden) - } - } else { - // Trigger a sync for a single project - project, ok := projects[projectName] - if !ok { - http.NotFound(w, r) - return - } + // Check if the token has permission for projectName + if !t.HasProject(projectName) { + http.Error(w, "Invalid access token", http.StatusForbidden) + return + } - if token == pullToken || token == project.AccessToken { - // Return a success message - fmt.Fprintf(w, "Sync requested for project: %s", projectName) + // Return a success message + fmt.Fprintf(w, "Sync requested for project: %s", projectName) - // Sync the project - logging.InfoToDiscord("Manual sync requested for project: _", projectName, "_") - manual <- projectName - } else { - http.Error(w, "Invalid access token", http.StatusForbidden) - } - } + // Sync the project + logging.Info("Manual sync requested for project: _", projectName, "_") + manual <- projectName } } @@ -185,24 +173,23 @@ func handleHealth(w http.ResponseWriter, r *http.Request) { } // Reload distributions and software arrays -func WebserverLoadConfig(config *ConfigFile) { +func WebServerLoadConfig(cfg *config.File, t *config.Tokens) { dataLock.Lock() - projectsById = config.GetProjects() - projectsGrouped = config.GetProjectsByPage() - projects = config.Mirrors + projectsById = cfg.GetProjects() + projectsGrouped = cfg.GetProjectsByPage() + projects = cfg.Projects + tokens = t dataLock.Unlock() } // HandleWebserver starts the webserver and listens for incoming connections // manual is a channel that project short names are sent down to manually trigger a projects rsync // entries is a channel that contains log entries that are disabled by the mirror map -func HandleWebserver(manual chan<- string, entries chan *NginxLogEntry) { +func HandleWebServer(manual chan<- string, entries <-chan NGINXLogEntry) { r := mux.NewRouter() - cache = make(map[string]*CacheEntry) - // Setup the map - r.Handle("/map", cachingMiddleware(handleMap)) + r.HandleFunc("/map", handleMap) mapMessages := make(chan []byte) go entriesToMessages(entries, mapMessages) MapRouter(r.PathPrefix("/map").Subrouter(), mapMessages) @@ -210,17 +197,18 @@ func HandleWebserver(manual chan<- string, entries chan *NginxLogEntry) { // Handlers for the other pages // redirect / to /home r.Handle("/", http.RedirectHandler("/home", http.StatusTemporaryRedirect)) - r.Handle("/home", cachingMiddleware(handleHome)) - r.Handle("/projects", cachingMiddleware(handleProjects)) - r.Handle("/history", cachingMiddleware(handleHistory)) - r.Handle("/stats/{project}/{statistic}", cachingMiddleware(handleStatistics)) - r.Handle("/stats", cachingMiddleware(handleStats)) - r.Handle("/sync/{project}", handleManualSyncs(manual)) + r.HandleFunc("/home", handleHome) + r.HandleFunc("/projects", handleProjects) + r.HandleFunc("/history", handleHistory) + r.HandleFunc("/stats/{project}/{statistic}", handleStatistics) + r.HandleFunc("/stats", handleStats) + r.HandleFunc("/sync/{project}", handleManualSyncs(manual)) r.HandleFunc("/health", handleHealth) - r.HandleFunc("/ws", HandleWebsocket) // Static files - r.PathPrefix("/").Handler(cachingMiddleware(http.FileServer(http.Dir("static")).ServeHTTP)) + r.PathPrefix("/").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.FileServer(http.Dir("static")).ServeHTTP(w, r) + })) // Serve on 8080 l := &http.Server{