From 461c0ab769cd07038f231c6ae05623aed9fd469f Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 24 Sep 2024 17:37:02 +0000 Subject: [PATCH 01/10] support for large Postgres files Large Postgres files may pose a challenge for some systems. This change stores the file on disk, compares the filesize with the remote, and adds the local file to the tar. It has error handling to allow a support export to be generated when there are issues with gathering PG logs. --- internal/cmd/export.go | 253 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 234 insertions(+), 19 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index f9ca1a2..dab44b8 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -23,6 +23,8 @@ import ( "io" "os" "os/exec" + "path/filepath" + "strconv" "strings" "text/tabwriter" "time" @@ -47,6 +49,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + remotecommand "k8s.io/client-go/tools/remotecommand" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator-client/internal" @@ -449,7 +452,7 @@ Collecting PGO CLI logs... // All Postgres Logs on the Postgres Instances (primary and replicas) if numLogs > 0 { if err == nil { - err = gatherPostgresLogsAndConfigs(ctx, clientset, restConfig, namespace, clusterName, numLogs, tw, cmd) + err = gatherPostgresLogsAndConfigs(ctx, clientset, restConfig, namespace, clusterName, outputDir, numLogs, tw, cmd) } } @@ -955,6 +958,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, config *rest.Config, namespace string, clusterName string, + outputDir string, numLogs int, tw *tar.Writer, cmd *cobra.Command, @@ -1025,29 +1029,66 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, logFiles := strings.Split(strings.TrimSpace(stdout), "\n") for _, logFile := range logFiles { - writeDebug(cmd, fmt.Sprintf("LOG FILE: %s\n", logFile)) - var buf bytes.Buffer - - stdout, stderr, err := Executor(exec).catFile(logFile) + // get the file size to stream + fileSize, err := getRemoteFileSize(clientset, config, namespace, pod.Name, util.ContainerDatabase, logFile) if err != nil { - if apierrors.IsForbidden(err) { - writeInfo(cmd, err.Error()) - // Continue and output errors for each log file - // Allow the user to see and address all issues at once - continue - } - return err + writeDebug(cmd, fmt.Sprintf("could not get file size for %s: %v\n", logFile, err)) + continue } - buf.Write([]byte(stdout)) - if stderr != "" { - str := fmt.Sprintf("\nError returned: %s\n", stderr) - buf.Write([]byte(str)) + // longFileName is namespace/podname:path/to/file + longFileName := fmt.Sprintf("%s/%s:%s", namespace, pod.Name, logFile) + writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", longFileName, ConvertBytes(fileSize))) + + // flags to switch behavior between storing the file in memory versus disk + doCat := false + doStream := true + + if doCat { + path := fmt.Sprintf("%s/pods/%s/%s", clusterName, pod.Name, logFile) + var buf bytes.Buffer + + stdout, stderr, err := Executor(exec).catFile(logFile) + if err != nil { + if apierrors.IsForbidden(err) { + writeInfo(cmd, err.Error()) + // Continue and output errors for each log file + // Allow the user to see and address all issues at once + continue + } + return err + } + + buf.Write([]byte(stdout)) + if stderr != "" { + str := fmt.Sprintf("\nError returned: %s\n", stderr) + buf.Write([]byte(str)) + } + + if err := writeTar(tw, buf.Bytes(), path, cmd); err != nil { + return err + } } - path := clusterName + fmt.Sprintf("/pods/%s/", pod.Name) + logFile - if err := writeTar(tw, buf.Bytes(), path, cmd); err != nil { - return err + if doStream { + err = streamFileFromPod(clientset, config, tw, + outputDir, clusterName, namespace, pod.Name, util.ContainerDatabase, logFile) + + if err != nil { + writeInfo(cmd, fmt.Sprintf("\tError streaming file %s: %v", logFile, err)) + // "failed to remove" errors are not remedied with kubectl cp + if !strings.Contains(err.Error(), "failed to remove") { + writeInfo(cmd, fmt.Sprintf("\tCollect manually with kubectl cp -c %s %s/%s:%s %s", + util.ContainerDatabase, namespace, pod.Name, logFile, filepath.Join(outputDir, filepath.Base(logFile)))) + } + // "failed to remove" errors should instruct the user to remove manually + // this happens often on Windows + if strings.Contains(err.Error(), "failed to remove") { + writeInfo(cmd, fmt.Sprintf("\tYou may need to remove %s manually", filepath.Join(outputDir, filepath.Base(logFile)))) + } + + continue + } } } @@ -1800,3 +1841,177 @@ func writeDebug(cmd *cobra.Command, s string) { t := time.Now() cmd.Printf("%s - DEBUG - %s", t.Format(logTimeFormat), s) } + +// streamFileFromPod streams the file from the Kubernetes pod to a local file. +func streamFileFromPod(clientset *kubernetes.Clientset, + config *rest.Config, tw *tar.Writer, + outputDir, clusterName, namespace, podName, containerName, remotePath string) error { + + // create localPath to write the streamed data from remotePath + localPath := filepath.Join(outputDir, filepath.Base(remotePath)) + if err := os.MkdirAll(filepath.Dir(localPath), 0770); err != nil { + return fmt.Errorf("failed to create path for file: %w", err) + } + outFile, err := os.Create(localPath) + if err != nil { + return fmt.Errorf("failed to create local file: %w", err) + } + defer outFile.Close() + + // request to cat remotePath + req := clientset.CoreV1().RESTClient(). + Get(). + Resource("pods"). + Name(podName). + Namespace(namespace). + SubResource("exec"). + Param("container", containerName). + Param("command", "cat"). + Param("command", remotePath). + Param("stderr", "true"). + Param("stdout", "true") + + exec, err := remotecommand.NewSPDYExecutor(config, "GET", req.URL()) + if err != nil { + return fmt.Errorf("failed to initialize SPDY executor: %w", err) + } + + // remoteFileSize is the file size within the container + remoteFileSize, err := getRemoteFileSize(clientset, config, namespace, podName, containerName, remotePath) + if err != nil { + return fmt.Errorf("could not get file size: %w", err) + } + + // stream remotePath to localPath + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: outFile, + Stderr: os.Stderr, + Tty: false, + }) + if err != nil { + return fmt.Errorf("error during file streaming: %w", err) + } + + // compare file sizes + localFileInfo, err := os.Stat(localPath) + if err != nil { + return fmt.Errorf("failed to get file info: %w", err) + } + if remoteFileSize != localFileInfo.Size() { + return fmt.Errorf("filesize mismatch: remote size is %v and local size is %v", + remoteFileSize, localFileInfo.Size()) + } + + // add localPath to the support export tar + tarPath := fmt.Sprintf("%s/pods/%s/%s", clusterName, podName, remotePath) + err = addFileToTar(tw, localPath, tarPath) + if err != nil { + return fmt.Errorf("error writing to tar: %w", err) + } + + // remove localPath + err = os.Remove(localPath) + if err != nil { + return fmt.Errorf("failed to %w", err) + } + + return nil +} + +// addFileToTar copies a local file into a tar archive +func addFileToTar(tw *tar.Writer, localPath, tarPath string) error { + // Open the file to be added to the tar + file, err := os.Open(localPath) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + // Get file info to create tar header + fileInfo, err := file.Stat() + if err != nil { + return fmt.Errorf("failed to get file info: %w", err) + } + + // Create tar header + header := &tar.Header{ + Name: tarPath, // Name in the tar archive + Size: fileInfo.Size(), // File size + Mode: int64(fileInfo.Mode()), // File mode + ModTime: fileInfo.ModTime(), // Modification time + } + + // Write header to the tar + err = tw.WriteHeader(header) + if err != nil { + return fmt.Errorf("failed to write tar header: %w", err) + } + + // Stream the file content to the tar + _, err = io.Copy(tw, file) + if err != nil { + return fmt.Errorf("failed to copy file data to tar: %w", err) + } + + return nil +} + +// getRemoteFileSize returns the size of a file within a container so that we can stream its contents +func getRemoteFileSize(clientset *kubernetes.Clientset, config *rest.Config, + namespace string, podName string, containerName string, filePath string) (int64, error) { + + // Prepare the command to get the file size using "stat -c %s " + req := clientset. + CoreV1(). + RESTClient(). + Post(). + Namespace(namespace). + Resource("pods"). + Name(podName). + SubResource("exec"). + Param("container", containerName). + Param("command", "stat"). + Param("command", "-c"). + Param("command", "%s"). + Param("command", filePath). + Param("stderr", "true"). + Param("stdout", "true") + + exec, err := remotecommand.NewSPDYExecutor(config, "GET", req.URL()) + if err != nil { + return 0, fmt.Errorf("could not initialize SPDY executor for stat: %v", err) + } + + var stdout, stderr strings.Builder + err = exec.Stream(remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + Tty: false, + }) + if err != nil { + return 0, fmt.Errorf("could not get file size: %v, stderr: %s", err, stderr.String()) + } + + // Parse the file size from stdout + size, err := strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse file size: %v", err) + } + + return size, nil +} + +// ConvertBytes converts a byte size (int64) into a human-readable format. +func ConvertBytes(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} From f9dbf485a2fd230fbef8984147dfd7fa0df2c32a Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 24 Sep 2024 18:01:21 +0000 Subject: [PATCH 02/10] fixes for lint errors --- internal/cmd/export.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index dab44b8..20db726 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -1849,14 +1849,21 @@ func streamFileFromPod(clientset *kubernetes.Clientset, // create localPath to write the streamed data from remotePath localPath := filepath.Join(outputDir, filepath.Base(remotePath)) - if err := os.MkdirAll(filepath.Dir(localPath), 0770); err != nil { + if err := os.MkdirAll(filepath.Dir(localPath), 0750); err != nil { return fmt.Errorf("failed to create path for file: %w", err) } - outFile, err := os.Create(localPath) + outFile, err := os.Create(filepath.Clean(localPath)) if err != nil { return fmt.Errorf("failed to create local file: %w", err) } - defer outFile.Close() + + defer func() { + // ignore any errors from Close functions, the writers will be + // closed when the program exits + if outFile != nil { + _ = outFile.Close() + } + }() // request to cat remotePath req := clientset.CoreV1().RESTClient(). @@ -1921,11 +1928,17 @@ func streamFileFromPod(clientset *kubernetes.Clientset, // addFileToTar copies a local file into a tar archive func addFileToTar(tw *tar.Writer, localPath, tarPath string) error { // Open the file to be added to the tar - file, err := os.Open(localPath) + file, err := os.Open(filepath.Clean(localPath)) if err != nil { return fmt.Errorf("failed to open file: %w", err) } - defer file.Close() + defer func() { + // ignore any errors from Close functions, the writers will be + // closed when the program exits + if file != nil { + _ = file.Close() + } + }() // Get file info to create tar header fileInfo, err := file.Stat() @@ -1979,7 +1992,7 @@ func getRemoteFileSize(clientset *kubernetes.Clientset, config *rest.Config, exec, err := remotecommand.NewSPDYExecutor(config, "GET", req.URL()) if err != nil { - return 0, fmt.Errorf("could not initialize SPDY executor for stat: %v", err) + return 0, fmt.Errorf("could not initialize SPDY executor for stat: %w", err) } var stdout, stderr strings.Builder @@ -1990,13 +2003,13 @@ func getRemoteFileSize(clientset *kubernetes.Clientset, config *rest.Config, Tty: false, }) if err != nil { - return 0, fmt.Errorf("could not get file size: %v, stderr: %s", err, stderr.String()) + return 0, fmt.Errorf("could not get file size: %w, stderr: %s", err, stderr.String()) } // Parse the file size from stdout size, err := strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64) if err != nil { - return 0, fmt.Errorf("failed to parse file size: %v", err) + return 0, fmt.Errorf("failed to parse file size: %w", err) } return size, nil From 478c36e1042ee0e819c33d873a0befff84ff43cb Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Wed, 25 Sep 2024 15:19:51 +0000 Subject: [PATCH 03/10] Remove the cat logic for gathering PG logs. Only do streaming. Create a unique sudirectory in outputDir (matching the unique timestamp) so that local files are logically separate from other files. Adding messaging and error handling so a user understands. --- internal/cmd/export.go | 100 ++++++++++++++++++----------------------- 1 file changed, 43 insertions(+), 57 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index 20db726..7cdafd5 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -452,7 +452,8 @@ Collecting PGO CLI logs... // All Postgres Logs on the Postgres Instances (primary and replicas) if numLogs > 0 { if err == nil { - err = gatherPostgresLogsAndConfigs(ctx, clientset, restConfig, namespace, clusterName, outputDir, numLogs, tw, cmd) + err = gatherPostgresLogsAndConfigs(ctx, clientset, restConfig, + namespace, clusterName, outputDir, outputFile, numLogs, tw, cmd) } } @@ -959,6 +960,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, namespace string, clusterName string, outputDir string, + outputFile string, numLogs int, tw *tar.Writer, cmd *cobra.Command, @@ -1028,6 +1030,16 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, } logFiles := strings.Split(strings.TrimSpace(stdout), "\n") + + // localDirectory is created to save data on disk + // e.g. outputDir/crunchy_k8s_support_export_2022-08-08-115726-0400/remotePath + localDirectory := filepath.Join(outputDir, strings.ReplaceAll(outputFile, ".tar.gz", "")) + + // flag to determine whether or not to remove localDirectory after the loop + // When an error happens, this flag will switch to false + // It's nice to have the extra data around when errors have happened + doCleanup := true + for _, logFile := range logFiles { // get the file size to stream fileSize, err := getRemoteFileSize(clientset, config, namespace, pod.Name, util.ContainerDatabase, logFile) @@ -1036,59 +1048,38 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, continue } - // longFileName is namespace/podname:path/to/file - longFileName := fmt.Sprintf("%s/%s:%s", namespace, pod.Name, logFile) - writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", longFileName, ConvertBytes(fileSize))) - - // flags to switch behavior between storing the file in memory versus disk - doCat := false - doStream := true - - if doCat { - path := fmt.Sprintf("%s/pods/%s/%s", clusterName, pod.Name, logFile) - var buf bytes.Buffer - - stdout, stderr, err := Executor(exec).catFile(logFile) - if err != nil { - if apierrors.IsForbidden(err) { - writeInfo(cmd, err.Error()) - // Continue and output errors for each log file - // Allow the user to see and address all issues at once - continue - } - return err - } + // fileSpecSrc is namespace/podname:path/to/file + // fileSpecDest is the local destination of the file + // These are used to help the user grab the file manually when necessary + fileSpecSrc := fmt.Sprintf("%s/%s:%s", namespace, pod.Name, logFile) + fileSpecDest := filepath.Join(localDirectory, logFile) + writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", fileSpecSrc, ConvertBytes(fileSize))) - buf.Write([]byte(stdout)) - if stderr != "" { - str := fmt.Sprintf("\nError returned: %s\n", stderr) - buf.Write([]byte(str)) - } + // Stream the file to disk and write the local file to the tar + err = streamFileFromPod(clientset, config, tw, + localDirectory, clusterName, namespace, pod.Name, util.ContainerDatabase, logFile) - if err := writeTar(tw, buf.Bytes(), path, cmd); err != nil { - return err - } + if err != nil { + doCleanup = false // prevent the deletion of localDirectory so a user can examine contents + writeInfo(cmd, fmt.Sprintf("\tError streaming file %s: %v", logFile, err)) + writeInfo(cmd, fmt.Sprintf("\tCollect manually with kubectl cp -c %s %s %s", + util.ContainerDatabase, fileSpecSrc, fileSpecDest)) + writeInfo(cmd, fmt.Sprintf("\tRemove %s manually after gathering necessary information", localDirectory)) + continue } - if doStream { - err = streamFileFromPod(clientset, config, tw, - outputDir, clusterName, namespace, pod.Name, util.ContainerDatabase, logFile) - - if err != nil { - writeInfo(cmd, fmt.Sprintf("\tError streaming file %s: %v", logFile, err)) - // "failed to remove" errors are not remedied with kubectl cp - if !strings.Contains(err.Error(), "failed to remove") { - writeInfo(cmd, fmt.Sprintf("\tCollect manually with kubectl cp -c %s %s/%s:%s %s", - util.ContainerDatabase, namespace, pod.Name, logFile, filepath.Join(outputDir, filepath.Base(logFile)))) - } - // "failed to remove" errors should instruct the user to remove manually - // this happens often on Windows - if strings.Contains(err.Error(), "failed to remove") { - writeInfo(cmd, fmt.Sprintf("\tYou may need to remove %s manually", filepath.Join(outputDir, filepath.Base(logFile)))) - } + } - continue - } + // doCleanup is true when there are no errors above. + if doCleanup { + // Remove the local directory created to hold the data + // Errors in removing localDirectory should instruct the user to remove manually. + // This happens often on Windows + err = os.RemoveAll(localDirectory) + if err != nil { + writeInfo(cmd, fmt.Sprintf("\tError removing %s: %v", localDirectory, err)) + writeInfo(cmd, fmt.Sprintf("\tYou may need to remove %s manually", localDirectory)) + continue } } @@ -1845,10 +1836,11 @@ func writeDebug(cmd *cobra.Command, s string) { // streamFileFromPod streams the file from the Kubernetes pod to a local file. func streamFileFromPod(clientset *kubernetes.Clientset, config *rest.Config, tw *tar.Writer, - outputDir, clusterName, namespace, podName, containerName, remotePath string) error { + localDirectory, clusterName, namespace, podName, containerName, remotePath string) error { // create localPath to write the streamed data from remotePath - localPath := filepath.Join(outputDir, filepath.Base(remotePath)) + // use the uniqueness of outputFile to avoid overwriting other files + localPath := filepath.Join(localDirectory, remotePath) if err := os.MkdirAll(filepath.Dir(localPath), 0750); err != nil { return fmt.Errorf("failed to create path for file: %w", err) } @@ -1916,12 +1908,6 @@ func streamFileFromPod(clientset *kubernetes.Clientset, return fmt.Errorf("error writing to tar: %w", err) } - // remove localPath - err = os.Remove(localPath) - if err != nil { - return fmt.Errorf("failed to %w", err) - } - return nil } From bb86785bfa308d87216efdc6403679d5661a3791 Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 14:25:06 +0000 Subject: [PATCH 04/10] remove extra call to getRemoteFileSize --- internal/cmd/export.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index 7cdafd5..3d4ef48 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -1057,7 +1057,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, // Stream the file to disk and write the local file to the tar err = streamFileFromPod(clientset, config, tw, - localDirectory, clusterName, namespace, pod.Name, util.ContainerDatabase, logFile) + localDirectory, clusterName, namespace, pod.Name, util.ContainerDatabase, logFile, fileSize) if err != nil { doCleanup = false // prevent the deletion of localDirectory so a user can examine contents @@ -1836,7 +1836,8 @@ func writeDebug(cmd *cobra.Command, s string) { // streamFileFromPod streams the file from the Kubernetes pod to a local file. func streamFileFromPod(clientset *kubernetes.Clientset, config *rest.Config, tw *tar.Writer, - localDirectory, clusterName, namespace, podName, containerName, remotePath string) error { + localDirectory, clusterName, namespace, podName, containerName, remotePath string, + remoteFileSize int64) error { // create localPath to write the streamed data from remotePath // use the uniqueness of outputFile to avoid overwriting other files @@ -1875,12 +1876,6 @@ func streamFileFromPod(clientset *kubernetes.Clientset, return fmt.Errorf("failed to initialize SPDY executor: %w", err) } - // remoteFileSize is the file size within the container - remoteFileSize, err := getRemoteFileSize(clientset, config, namespace, podName, containerName, remotePath) - if err != nil { - return fmt.Errorf("could not get file size: %w", err) - } - // stream remotePath to localPath err = exec.Stream(remotecommand.StreamOptions{ Stdout: outFile, From bd07d214ab08f746b51c09d7272c8cd400467464 Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 15:20:36 +0000 Subject: [PATCH 05/10] refactored getRemoteFileSize to use bashCommand to get the filesize --- internal/cmd/export.go | 46 ++++++++++++++---------------------------- 1 file changed, 15 insertions(+), 31 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index 3d4ef48..f5bcfc5 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -49,7 +49,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - remotecommand "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/tools/remotecommand" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator-client/internal" @@ -1042,7 +1042,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, for _, logFile := range logFiles { // get the file size to stream - fileSize, err := getRemoteFileSize(clientset, config, namespace, pod.Name, util.ContainerDatabase, logFile) + fileSize, err := getRemoteFileSize(config, namespace, pod.Name, util.ContainerDatabase, logFile) if err != nil { writeDebug(cmd, fmt.Sprintf("could not get file size for %s: %v\n", logFile, err)) continue @@ -1951,44 +1951,28 @@ func addFileToTar(tw *tar.Writer, localPath, tarPath string) error { } // getRemoteFileSize returns the size of a file within a container so that we can stream its contents -func getRemoteFileSize(clientset *kubernetes.Clientset, config *rest.Config, +func getRemoteFileSize(config *rest.Config, namespace string, podName string, containerName string, filePath string) (int64, error) { - // Prepare the command to get the file size using "stat -c %s " - req := clientset. - CoreV1(). - RESTClient(). - Post(). - Namespace(namespace). - Resource("pods"). - Name(podName). - SubResource("exec"). - Param("container", containerName). - Param("command", "stat"). - Param("command", "-c"). - Param("command", "%s"). - Param("command", filePath). - Param("stderr", "true"). - Param("stdout", "true") - - exec, err := remotecommand.NewSPDYExecutor(config, "GET", req.URL()) + podExec, err := util.NewPodExecutor(config) if err != nil { - return 0, fmt.Errorf("could not initialize SPDY executor for stat: %w", err) + return 0, fmt.Errorf("could not create executor: %w", err) + } + exec := func(stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return podExec(namespace, podName, containerName, + stdin, stdout, stderr, command...) } - var stdout, stderr strings.Builder - err = exec.Stream(remotecommand.StreamOptions{ - Stdin: nil, - Stdout: &stdout, - Stderr: &stderr, - Tty: false, - }) + // Prepare the command to get the file size using "stat -c %s " + command := fmt.Sprintf("stat -c %s %s", "%s", filePath) + stdout, stderr, err := Executor(exec).bashCommand(command) if err != nil { - return 0, fmt.Errorf("could not get file size: %w, stderr: %s", err, stderr.String()) + return 0, fmt.Errorf("could not get file size: %w, stderr: %s", err, stderr) } // Parse the file size from stdout - size, err := strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64) + size, err := strconv.ParseInt(strings.TrimSpace(stdout), 10, 64) if err != nil { return 0, fmt.Errorf("failed to parse file size: %w", err) } From ec94cfaf01e0917f63262c15dc616ca2a0ecdcd9 Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 17:49:33 +0000 Subject: [PATCH 06/10] added copyFile function to cat a file contents to a local file --- internal/cmd/exec.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/internal/cmd/exec.go b/internal/cmd/exec.go index 9688b47..9cb1d71 100644 --- a/internal/cmd/exec.go +++ b/internal/cmd/exec.go @@ -18,6 +18,7 @@ import ( "bytes" "fmt" "io" + "os" ) // Executor calls commands @@ -110,6 +111,13 @@ func (exec Executor) catFile(filePath string) (string, string, error) { return stdout.String(), stderr.String(), err } +func (exec Executor) copyFile(source string, destination *os.File) (string, error) { + var stderr bytes.Buffer + command := fmt.Sprintf("cat %s", source) + err := exec(nil, destination, &stderr, "bash", "-ceu", "--", command) + return stderr.String(), err +} + // patronictl takes a patronictl subcommand and returns the output of that command func (exec Executor) patronictl(cmd, output string) (string, string, error) { var stdout, stderr bytes.Buffer From 87d12960cea9cbdcf622d4f18a6f8e1550bd3b99 Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 17:50:28 +0000 Subject: [PATCH 07/10] use the copyFile function rather calling manually --- internal/cmd/export.go | 36 +++++++++++------------------------- 1 file changed, 11 insertions(+), 25 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index f5bcfc5..4b7992d 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -49,7 +49,6 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/remotecommand" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator-client/internal" @@ -1056,7 +1055,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", fileSpecSrc, ConvertBytes(fileSize))) // Stream the file to disk and write the local file to the tar - err = streamFileFromPod(clientset, config, tw, + err = streamFileFromPod(config, tw, localDirectory, clusterName, namespace, pod.Name, util.ContainerDatabase, logFile, fileSize) if err != nil { @@ -1834,8 +1833,7 @@ func writeDebug(cmd *cobra.Command, s string) { } // streamFileFromPod streams the file from the Kubernetes pod to a local file. -func streamFileFromPod(clientset *kubernetes.Clientset, - config *rest.Config, tw *tar.Writer, +func streamFileFromPod(config *rest.Config, tw *tar.Writer, localDirectory, clusterName, namespace, podName, containerName, remotePath string, remoteFileSize int64) error { @@ -1858,30 +1856,18 @@ func streamFileFromPod(clientset *kubernetes.Clientset, } }() - // request to cat remotePath - req := clientset.CoreV1().RESTClient(). - Get(). - Resource("pods"). - Name(podName). - Namespace(namespace). - SubResource("exec"). - Param("container", containerName). - Param("command", "cat"). - Param("command", remotePath). - Param("stderr", "true"). - Param("stdout", "true") - - exec, err := remotecommand.NewSPDYExecutor(config, "GET", req.URL()) + // Get Postgres Log Files + podExec, err := util.NewPodExecutor(config) if err != nil { - return fmt.Errorf("failed to initialize SPDY executor: %w", err) + return err + } + exec := func(stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return podExec(namespace, podName, containerName, + stdin, stdout, stderr, command...) } - // stream remotePath to localPath - err = exec.Stream(remotecommand.StreamOptions{ - Stdout: outFile, - Stderr: os.Stderr, - Tty: false, - }) + _, err = Executor(exec).copyFile(remotePath, outFile) if err != nil { return fmt.Errorf("error during file streaming: %w", err) } From cfe24ff0b7bd78f3ff931552d282f0546daa2417 Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 17:51:29 +0000 Subject: [PATCH 08/10] added example comment for fileSpecSrc --- internal/cmd/export.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index 4b7992d..73e3b59 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -1050,6 +1050,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, // fileSpecSrc is namespace/podname:path/to/file // fileSpecDest is the local destination of the file // These are used to help the user grab the file manually when necessary + // e.g. postgres-operator/hippo-instance1-vp9k-0:pgdata/pg16/log/postgresql-Tue.log fileSpecSrc := fmt.Sprintf("%s/%s:%s", namespace, pod.Name, logFile) fileSpecDest := filepath.Join(localDirectory, logFile) writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", fileSpecSrc, ConvertBytes(fileSize))) From a6539fb8e5c11a80755f042f699831a50f9c0718 Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 17:52:15 +0000 Subject: [PATCH 09/10] added coment to copyFile --- internal/cmd/exec.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/cmd/exec.go b/internal/cmd/exec.go index 9cb1d71..2359e69 100644 --- a/internal/cmd/exec.go +++ b/internal/cmd/exec.go @@ -111,6 +111,8 @@ func (exec Executor) catFile(filePath string) (string, string, error) { return stdout.String(), stderr.String(), err } +// copyFile takes the full path of a file and a local destination to save the +// file on disk func (exec Executor) copyFile(source string, destination *os.File) (string, error) { var stderr bytes.Buffer command := fmt.Sprintf("cat %s", source) From 017b571b29691b10a9baf58dac598006b67e4a4f Mon Sep 17 00:00:00 2001 From: Philip Hurst Date: Tue, 1 Oct 2024 17:53:44 +0000 Subject: [PATCH 10/10] rename ConvertBytes to convertBytes --- internal/cmd/export.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/cmd/export.go b/internal/cmd/export.go index 73e3b59..0105352 100644 --- a/internal/cmd/export.go +++ b/internal/cmd/export.go @@ -1053,7 +1053,7 @@ func gatherPostgresLogsAndConfigs(ctx context.Context, // e.g. postgres-operator/hippo-instance1-vp9k-0:pgdata/pg16/log/postgresql-Tue.log fileSpecSrc := fmt.Sprintf("%s/%s:%s", namespace, pod.Name, logFile) fileSpecDest := filepath.Join(localDirectory, logFile) - writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", fileSpecSrc, ConvertBytes(fileSize))) + writeInfo(cmd, fmt.Sprintf("\tSize of %-85s %v", fileSpecSrc, convertBytes(fileSize))) // Stream the file to disk and write the local file to the tar err = streamFileFromPod(config, tw, @@ -1967,8 +1967,8 @@ func getRemoteFileSize(config *rest.Config, return size, nil } -// ConvertBytes converts a byte size (int64) into a human-readable format. -func ConvertBytes(bytes int64) string { +// convertBytes converts a byte size (int64) into a human-readable format. +func convertBytes(bytes int64) string { const unit = 1024 if bytes < unit { return fmt.Sprintf("%d B", bytes)