diff --git a/cmd/checkpoint.go b/cmd/checkpoint.go index 9d17878..98ae5b1 100644 --- a/cmd/checkpoint.go +++ b/cmd/checkpoint.go @@ -1,17 +1,11 @@ package cmd import ( - "context" "fmt" "time" - truenas "github.com/deevus/truenas-go" "github.com/dustin/go-humanize" "github.com/spf13/cobra" - - "github.com/deevus/pixels/internal/cache" - "github.com/deevus/pixels/internal/ssh" - tnc "github.com/deevus/pixels/internal/truenas" ) func init() { @@ -52,17 +46,7 @@ func init() { rootCmd.AddCommand(cpCmd) } -// resolveDatasetPath returns the ZFS dataset path for a container. -// Priority: config override > auto-detect from virt.global.config. -func resolveDatasetPath(ctx context.Context, client *tnc.Client, name string) (string, error) { - if cfg.Checkpoint.DatasetPrefix != "" { - return cfg.Checkpoint.DatasetPrefix + "/" + containerName(name), nil - } - return client.ContainerDataset(ctx, containerName(name)) -} - func runCheckpointCreate(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name := args[0] label, _ := cmd.Flags().GetString("label") @@ -70,45 +54,30 @@ func runCheckpointCreate(cmd *cobra.Command, args []string) error { label = "px-" + time.Now().Format("20060102-150405") } - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() + defer sb.Close() - ds, err := resolveDatasetPath(ctx, client, name) - if err != nil { + if err := sb.CreateSnapshot(cmd.Context(), name, label); err != nil { return err } - _, err = client.Snapshot.Create(ctx, truenas.CreateSnapshotOpts{ - Dataset: ds, - Name: label, - }) - if err != nil { - return fmt.Errorf("creating checkpoint: %w", err) - } - fmt.Fprintf(cmd.OutOrStdout(), "Checkpoint %q created for %s\n", label, name) return nil } func runCheckpointList(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name := args[0] - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() + defer sb.Close() - ds, err := resolveDatasetPath(ctx, client, name) - if err != nil { - return err - } - - snapshots, err := client.ListSnapshots(ctx, ds) + snapshots, err := sb.ListSnapshots(cmd.Context(), name) if err != nil { return err } @@ -121,84 +90,50 @@ func runCheckpointList(cmd *cobra.Command, args []string) error { w := newTabWriter(cmd) fmt.Fprintln(w, "LABEL\tSIZE") for _, s := range snapshots { - fmt.Fprintf(w, "%s\t%s\n", s.SnapshotName, humanize.IBytes(uint64(s.Referenced))) + fmt.Fprintf(w, "%s\t%s\n", s.Label, humanize.IBytes(uint64(s.Size))) } return w.Flush() } func runCheckpointRestore(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name, label := args[0], args[1] - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() - - ds, err := resolveDatasetPath(ctx, client, name) - if err != nil { - return err - } - sid := ds + "@" + label + defer sb.Close() start := time.Now() - fmt.Fprintf(cmd.ErrOrStderr(), "Stopping %s...\n", name) - if err := client.Virt.StopInstance(ctx, containerName(name), truenas.StopVirtInstanceOpts{ - Timeout: 30, - }); err != nil { - return fmt.Errorf("stopping %s: %w", name, err) - } + fmt.Fprintf(cmd.ErrOrStderr(), "Restoring %s to %q...\n", name, label) - if err := client.SnapshotRollback(ctx, sid); err != nil { + if err := sb.RestoreSnapshot(cmd.Context(), name, label); err != nil { return err } - if err := client.Virt.StartInstance(ctx, containerName(name)); err != nil { - return fmt.Errorf("starting %s: %w", name, err) - } - - instance, err := client.Virt.GetInstance(ctx, containerName(name)) - if err != nil { - return fmt.Errorf("refreshing %s: %w", name, err) - } - - ip := resolveIP(instance) - cache.Put(name, &cache.Entry{IP: ip, Status: instance.Status}) - if ip != "" { - if err := ssh.WaitReady(ctx, ip, 30*time.Second, nil); err != nil { - fmt.Fprintf(cmd.ErrOrStderr(), "Warning: SSH not ready: %v\n", err) - } - } - elapsed := time.Since(start).Truncate(100 * time.Millisecond) fmt.Fprintf(cmd.OutOrStdout(), "Restored %s to %q in %s\n", name, label, elapsed) - if ip != "" { - fmt.Fprintf(cmd.OutOrStdout(), " SSH: ssh %s@%s\n", cfg.SSH.User, ip) + + inst, err := sb.Get(cmd.Context(), name) + if err == nil && len(inst.Addresses) > 0 { + fmt.Fprintf(cmd.OutOrStdout(), " SSH: ssh %s@%s\n", cfg.SSH.User, inst.Addresses[0]) } return nil } func runCheckpointDelete(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name, label := args[0], args[1] - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() + defer sb.Close() - ds, err := resolveDatasetPath(ctx, client, name) - if err != nil { + if err := sb.DeleteSnapshot(cmd.Context(), name, label); err != nil { return err } - sid := ds + "@" + label - - if err := client.Snapshot.Delete(ctx, sid); err != nil { - return fmt.Errorf("deleting checkpoint: %w", err) - } fmt.Fprintf(cmd.OutOrStdout(), "Deleted checkpoint %q from %s\n", label, name) return nil diff --git a/cmd/destroy.go b/cmd/destroy.go index 400dec4..cca006d 100644 --- a/cmd/destroy.go +++ b/cmd/destroy.go @@ -2,17 +2,11 @@ package cmd import ( "bufio" - "context" "fmt" "os" "strings" - "time" - truenas "github.com/deevus/truenas-go" "github.com/spf13/cobra" - - "github.com/deevus/pixels/internal/cache" - "github.com/deevus/pixels/internal/retry" ) func init() { @@ -27,7 +21,6 @@ func init() { } func runDestroy(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name := args[0] force, _ := cmd.Flags().GetBool("force") @@ -44,38 +37,16 @@ func runDestroy(cmd *cobra.Command, args []string) error { } } - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() - - instance, err := client.Virt.GetInstance(ctx, containerName(name)) - if err != nil { - return fmt.Errorf("looking up %s: %w", name, err) - } - if instance == nil { - return fmt.Errorf("pixel %q not found", name) - } + defer sb.Close() - if instance.Status == "RUNNING" { - fmt.Fprintf(cmd.ErrOrStderr(), "Stopping %s...\n", name) - if err := client.Virt.StopInstance(ctx, containerName(name), truenas.StopVirtInstanceOpts{ - Timeout: 30, - }); err != nil { - return fmt.Errorf("stopping %s: %w", name, err) - } - } - - // Retry delete — Incus sometimes needs a moment after stop for the - // storage volume to be fully released. - if err := retry.Do(ctx, 3, 2*time.Second, func(ctx context.Context) error { - return client.Virt.DeleteInstance(ctx, containerName(name)) - }); err != nil { - return fmt.Errorf("deleting %s: %w", name, err) + if err := sb.Delete(cmd.Context(), name); err != nil { + return err } - cache.Delete(name) fmt.Fprintf(cmd.OutOrStdout(), "Destroyed %s\n", name) return nil } diff --git a/cmd/exec.go b/cmd/exec.go index db7c2a5..76b844d 100644 --- a/cmd/exec.go +++ b/cmd/exec.go @@ -5,6 +5,7 @@ import ( "os" "time" + "al.essio.dev/pkg/shellescape" "github.com/spf13/cobra" "github.com/deevus/pixels/internal/ssh" @@ -38,7 +39,12 @@ func runExec(cmd *cobra.Command, args []string) error { return err } - exitCode, err := ssh.Exec(ctx, ssh.ConnConfig{Host: ip, User: cfg.SSH.User, KeyPath: cfg.SSH.Key, Env: cfg.EnvForward}, command) + // Wrap in a login shell so ~/.profile is sourced (adds ~/.local/bin to PATH). + // Activate mise if installed so tools it manages (claude, node, etc.) are on PATH. + // Pass as a single string so SSH's argument concatenation preserves quoting. + inner := shellescape.QuoteCommand(command) + loginCmd := []string{"bash -lc " + shellescape.Quote("eval \"$(mise activate bash 2>/dev/null)\"; "+inner)} + exitCode, err := ssh.Exec(ctx, ssh.ConnConfig{Host: ip, User: cfg.SSH.User, KeyPath: cfg.SSH.Key, Env: cfg.EnvForward}, loginCmd) if err != nil { return err } diff --git a/cmd/list.go b/cmd/list.go index eba18cd..15f39db 100644 --- a/cmd/list.go +++ b/cmd/list.go @@ -16,15 +16,13 @@ func init() { } func runList(cmd *cobra.Command, _ []string) error { - ctx := cmd.Context() - - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() + defer sb.Close() - instances, err := client.ListInstances(ctx) + instances, err := sb.List(cmd.Context()) if err != nil { return err } @@ -38,13 +36,10 @@ func runList(cmd *cobra.Command, _ []string) error { fmt.Fprintln(w, "NAME\tSTATUS\tIP") for _, inst := range instances { ip := "—" - for _, a := range inst.Aliases { - if (a.Type == "INET" || a.Type == "ipv4") && a.Address != "" { - ip = a.Address - break - } + if len(inst.Addresses) > 0 { + ip = inst.Addresses[0] } - fmt.Fprintf(w, "%s\t%s\t%s\n", displayName(inst.Name), inst.Status, ip) + fmt.Fprintf(w, "%s\t%s\t%s\n", inst.Name, inst.Status, ip) } return w.Flush() } diff --git a/cmd/network.go b/cmd/network.go index 0ffe0d5..41e8453 100644 --- a/cmd/network.go +++ b/cmd/network.go @@ -2,14 +2,10 @@ package cmd import ( "fmt" - "strings" "github.com/spf13/cobra" - "github.com/deevus/pixels/internal/cache" - "github.com/deevus/pixels/internal/egress" - "github.com/deevus/pixels/internal/ssh" - tnc "github.com/deevus/pixels/internal/truenas" + "github.com/deevus/pixels/sandbox" ) func init() { @@ -49,74 +45,33 @@ func init() { rootCmd.AddCommand(networkCmd) } -// networkContext holds the resolved state needed by network subcommands. -type networkContext struct { - name string - ip string - client *tnc.Client -} - -// resolveNetworkContext connects to TrueNAS and resolves the container's IP. -func resolveNetworkContext(cmd *cobra.Command, name string) (*networkContext, error) { - ctx := cmd.Context() - - // Try cache for IP. - var ip string - if entry := cache.Get(name); entry != nil && entry.Status == "RUNNING" && entry.IP != "" { - ip = entry.IP - } +func runNetworkShow(cmd *cobra.Command, args []string) error { + name := args[0] - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { - return nil, err - } - - if ip == "" { - instance, err := client.Virt.GetInstance(ctx, containerName(name)) - if err != nil { - client.Close() - return nil, fmt.Errorf("looking up %s: %w", name, err) - } - if instance.Status != "RUNNING" { - client.Close() - return nil, fmt.Errorf("%s is not running (status: %s)", name, instance.Status) - } - ip = resolveIP(instance) - if ip == "" { - client.Close() - return nil, fmt.Errorf("%s has no IP address", name) - } + return err } + defer sb.Close() - return &networkContext{name: name, ip: ip, client: client}, nil -} - -// sshAsRoot runs a command on the container as root via SSH. -func sshAsRoot(cmd *cobra.Command, ip string, command []string) (int, error) { - return ssh.Exec(cmd.Context(), ssh.ConnConfig{Host: ip, User: "root", KeyPath: cfg.SSH.Key}, command) -} - -func runNetworkShow(cmd *cobra.Command, args []string) error { - nc, err := resolveNetworkContext(cmd, args[0]) + policy, err := sb.GetPolicy(cmd.Context(), name) if err != nil { return err } - defer nc.client.Close() - fmt.Fprintf(cmd.ErrOrStderr(), "Fetching egress rules for %s...\n", args[0]) + if policy.Mode == sandbox.EgressUnrestricted { + fmt.Fprintln(cmd.OutOrStdout(), "Mode: unrestricted") + return nil + } - // Show domains and rule count via a single shell command. - showCmd := `if [ -f /etc/pixels-egress-domains ]; then - echo "Mode: restricted" - echo "Domains:" - sed 's/^/ /' /etc/pixels-egress-domains - count=$(nft list set inet pixels_egress allowed_v4 2>/dev/null | grep -c 'elements' || echo 0) - echo "Resolved IPs: $count" -else - echo "Mode: unrestricted (no egress policy configured)" -fi` - _, err = sshAsRoot(cmd, nc.ip, []string{"bash", "-c", showCmd}) - return err + fmt.Fprintf(cmd.OutOrStdout(), "Mode: %s\n", policy.Mode) + if len(policy.Domains) > 0 { + fmt.Fprintln(cmd.OutOrStdout(), "Domains:") + for _, d := range policy.Domains { + fmt.Fprintf(cmd.OutOrStdout(), " %s\n", d) + } + } + return nil } func runNetworkSet(cmd *cobra.Command, args []string) error { @@ -126,108 +81,33 @@ func runNetworkSet(cmd *cobra.Command, args []string) error { return fmt.Errorf("invalid mode %q: must be unrestricted, agent, or allowlist", mode) } - nc, err := resolveNetworkContext(cmd, name) + sb, err := openSandbox() if err != nil { return err } - defer nc.client.Close() - ctx := cmd.Context() - cname := containerName(name) + defer sb.Close() - if mode == "unrestricted" { - // Remove egress rules and restore blanket sudo. - sshAsRoot(cmd, nc.ip, []string{"nft", "flush", "ruleset"}) - sshAsRoot(cmd, nc.ip, []string{"rm", "-f", "/etc/pixels-egress-domains", "/etc/nftables.conf", "/usr/local/bin/pixels-resolve-egress.sh"}) - restoreSudo := fmt.Sprintf("cat > /etc/sudoers.d/pixel << 'PIXELS_EOF'\n%sPIXELS_EOF\nchmod 0440 /etc/sudoers.d/pixel", egress.SudoersUnrestricted()) - sshAsRoot(cmd, nc.ip, []string{"bash", "-c", restoreSudo}) - fmt.Fprintf(cmd.OutOrStdout(), "Egress set to unrestricted for %s\n", name) - return nil - } - - // Always write nftables.conf and resolve script — ensures the latest - // rules are applied when switching modes or after binary updates. - if err := writeEgressInfra(cmd, nc.ip, nc.client, cname); err != nil { + if err := sb.SetEgressMode(cmd.Context(), name, sandbox.EgressMode(mode)); err != nil { return err } - domains := egress.ResolveDomains(mode, cfg.Network.Allow) - - // Write domains file via TrueNAS API. - if err := nc.client.WriteContainerFile(ctx, cname, "/etc/pixels-egress-domains", []byte(egress.DomainsFileContent(domains)), 0o644); err != nil { - return fmt.Errorf("writing domains file: %w", err) - } - - // Write CIDRs file if the preset has any. - cidrs := egress.PresetCIDRs(mode) - if len(cidrs) > 0 { - if err := nc.client.WriteContainerFile(ctx, cname, "/etc/pixels-egress-cidrs", []byte(egress.CIDRsFileContent(cidrs)), 0o644); err != nil { - return fmt.Errorf("writing cidrs file: %w", err) - } - } - - // Resolve domains and load nftables rules. - if code, err := sshAsRoot(cmd, nc.ip, []string{"/usr/local/bin/pixels-resolve-egress.sh"}); err != nil || code != 0 { - return fmt.Errorf("running resolve script: exit %d, err %v", code, err) - } - - // Write safe-apt wrapper and restrict sudoers. - if err := nc.client.WriteContainerFile(ctx, cname, "/usr/local/bin/safe-apt", []byte(egress.SafeAptScript()), 0o755); err != nil { - return fmt.Errorf("writing safe-apt wrapper: %w", err) - } - if err := nc.client.WriteContainerFile(ctx, cname, "/etc/sudoers.d/pixel", []byte(egress.SudoersRestricted()), 0o440); err != nil { - return fmt.Errorf("writing restricted sudoers: %w", err) - } - - fmt.Fprintf(cmd.OutOrStdout(), "Egress set to %s for %s (%d domains)\n", mode, name, len(domains)) + fmt.Fprintf(cmd.OutOrStdout(), "Egress set to %s for %s\n", mode, name) return nil } func runNetworkAllow(cmd *cobra.Command, args []string) error { name, domain := args[0], args[1] - nc, err := resolveNetworkContext(cmd, name) + sb, err := openSandbox() if err != nil { return err } - defer nc.client.Close() - ctx := cmd.Context() - cname := containerName(name) + defer sb.Close() - // Ensure egress infrastructure exists (idempotent). - if err := ensureEgressFiles(cmd, nc.ip, nc.client, cname); err != nil { + if err := sb.AllowDomain(cmd.Context(), name, domain); err != nil { return err } - // Read current domains via SSH. - out, err := ssh.Output(ctx, ssh.ConnConfig{Host: nc.ip, User: "root", KeyPath: cfg.SSH.Key}, []string{"cat", "/etc/pixels-egress-domains"}) - if err != nil { - return fmt.Errorf("reading domains file: %w", err) - } - - // Append domain if not already present. - current := strings.TrimSpace(string(out)) - lines := strings.Split(current, "\n") - for _, l := range lines { - if strings.TrimSpace(l) == domain { - fmt.Fprintf(cmd.OutOrStdout(), "%s already allowed for %s\n", domain, name) - return nil - } - } - if current != "" { - current += "\n" - } - current += domain + "\n" - - // Write back via TrueNAS API. - if err := nc.client.WriteContainerFile(ctx, cname, "/etc/pixels-egress-domains", []byte(current), 0o644); err != nil { - return fmt.Errorf("writing domains file: %w", err) - } - - // Re-resolve. - if code, err := sshAsRoot(cmd, nc.ip, []string{"/usr/local/bin/pixels-resolve-egress.sh"}); err != nil || code != 0 { - return fmt.Errorf("reloading rules: exit %d, err %v", code, err) - } - fmt.Fprintf(cmd.OutOrStdout(), "Allowed %s for %s\n", domain, name) return nil } @@ -235,85 +115,16 @@ func runNetworkAllow(cmd *cobra.Command, args []string) error { func runNetworkDeny(cmd *cobra.Command, args []string) error { name, domain := args[0], args[1] - nc, err := resolveNetworkContext(cmd, name) + sb, err := openSandbox() if err != nil { return err } - defer nc.client.Close() - ctx := cmd.Context() - cname := containerName(name) - - // Read current domains via SSH. - out, err := ssh.Output(ctx, ssh.ConnConfig{Host: nc.ip, User: "root", KeyPath: cfg.SSH.Key}, []string{"cat", "/etc/pixels-egress-domains"}) - if err != nil { - return fmt.Errorf("no egress policy configured on %s", name) - } - - // Remove domain. - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - var kept []string - found := false - for _, l := range lines { - if strings.TrimSpace(l) == domain { - found = true - continue - } - kept = append(kept, l) - } - if !found { - return fmt.Errorf("domain %s not found in egress allowlist for %s", domain, name) - } - - content := strings.Join(kept, "\n") + "\n" - - // Write back via TrueNAS API. - if err := nc.client.WriteContainerFile(ctx, cname, "/etc/pixels-egress-domains", []byte(content), 0o644); err != nil { - return fmt.Errorf("writing domains file: %w", err) - } + defer sb.Close() - // Re-resolve (full reload replaces all rules). - if code, err := sshAsRoot(cmd, nc.ip, []string{"/usr/local/bin/pixels-resolve-egress.sh"}); err != nil || code != 0 { - return fmt.Errorf("reloading rules: exit %d, err %v", code, err) + if err := sb.DenyDomain(cmd.Context(), name, domain); err != nil { + return err } fmt.Fprintf(cmd.OutOrStdout(), "Denied %s for %s\n", domain, name) return nil } - -// writeEgressInfra writes nftables.conf and the resolve script unconditionally. -// Called by `network set` to ensure the latest rules are always applied. -func writeEgressInfra(cmd *cobra.Command, ip string, client *tnc.Client, cname string) error { - ctx := cmd.Context() - - // Write nftables.conf via TrueNAS API. - if err := client.WriteContainerFile(ctx, cname, "/etc/nftables.conf", []byte(egress.NftablesConf()), 0o644); err != nil { - return fmt.Errorf("writing nftables.conf: %w", err) - } - - // Write resolve script via TrueNAS API. - if err := client.WriteContainerFile(ctx, cname, "/usr/local/bin/pixels-resolve-egress.sh", []byte(egress.ResolveScript()), 0o755); err != nil { - return fmt.Errorf("writing resolve script: %w", err) - } - - // Ensure nftables and dnsutils are installed. Use confold to keep our - // pre-written /etc/nftables.conf and avoid dpkg conffile prompts. - sshAsRoot(cmd, ip, []string{"bash", "-c", `DEBIAN_FRONTEND=noninteractive apt-get install -y -qq -o Dpkg::Options::="--force-confold" nftables dnsutils`}) - - return nil -} - -// ensureEgressFiles writes the nftables config and resolve script if not already -// present. This allows `network allow` to work on containers that were created -// without egress configured. -func ensureEgressFiles(cmd *cobra.Command, ip string, client *tnc.Client, cname string) error { - checkCode, _ := sshAsRoot(cmd, ip, []string{"test", "-f", "/usr/local/bin/pixels-resolve-egress.sh"}) - if checkCode == 0 { - return nil // already provisioned - } - if err := writeEgressInfra(cmd, ip, client, cname); err != nil { - return err - } - // Create empty domains file so allow can append to it. - sshAsRoot(cmd, ip, []string{"touch", "/etc/pixels-egress-domains"}) - return nil -} diff --git a/cmd/resolve.go b/cmd/resolve.go index 633ebae..4957545 100644 --- a/cmd/resolve.go +++ b/cmd/resolve.go @@ -83,6 +83,15 @@ func lookupRunningIP(ctx context.Context, virt truenas.VirtServiceAPI, name stri return ip, nil } +// resolveDatasetPath returns the ZFS dataset path for a container. +// Priority: config override > auto-detect from virt.global.config. +func resolveDatasetPath(ctx context.Context, client *tnc.Client, name string) (string, error) { + if cfg.Checkpoint.DatasetPrefix != "" { + return cfg.Checkpoint.DatasetPrefix + "/" + containerName(name), nil + } + return client.ContainerDataset(ctx, containerName(name)) +} + func newTabWriter(cmd *cobra.Command) *tabwriter.Writer { return tabwriter.NewWriter(cmd.OutOrStdout(), 0, 4, 2, ' ', 0) } diff --git a/cmd/root.go b/cmd/root.go index 63694fe..b211344 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -3,10 +3,16 @@ package cmd import ( "fmt" "os" + "strconv" + "strings" "github.com/spf13/cobra" "github.com/deevus/pixels/internal/config" + "github.com/deevus/pixels/sandbox" + + // Register the TrueNAS sandbox backend. + _ "github.com/deevus/pixels/sandbox/truenas" ) var ( @@ -51,6 +57,62 @@ func logv(cmd *cobra.Command, format string, a ...any) { } } +// openSandbox constructs a Sandbox from the loaded config. +func openSandbox() (sandbox.Sandbox, error) { + m := map[string]string{ + "host": cfg.TrueNAS.Host, + "api_key": cfg.TrueNAS.APIKey, + } + if cfg.TrueNAS.Port != 0 { + m["port"] = strconv.Itoa(cfg.TrueNAS.Port) + } + if cfg.TrueNAS.Username != "" { + m["username"] = cfg.TrueNAS.Username + } + if cfg.TrueNAS.InsecureSkipVerify != nil { + m["insecure"] = strconv.FormatBool(*cfg.TrueNAS.InsecureSkipVerify) + } + if cfg.Defaults.Image != "" { + m["image"] = cfg.Defaults.Image + } + if cfg.Defaults.CPU != "" { + m["cpu"] = cfg.Defaults.CPU + } + if cfg.Defaults.Memory != 0 { + m["memory"] = strconv.FormatInt(cfg.Defaults.Memory, 10) + } + if cfg.Defaults.Pool != "" { + m["pool"] = cfg.Defaults.Pool + } + if cfg.Defaults.NICType != "" { + m["nic_type"] = cfg.Defaults.NICType + } + if cfg.Defaults.Parent != "" { + m["parent"] = cfg.Defaults.Parent + } + if cfg.SSH.User != "" { + m["ssh_user"] = cfg.SSH.User + } + if cfg.SSH.Key != "" { + m["ssh_key"] = cfg.SSH.Key + } + if cfg.Checkpoint.DatasetPrefix != "" { + m["dataset_prefix"] = cfg.Checkpoint.DatasetPrefix + } + m["provision"] = strconv.FormatBool(cfg.Provision.IsEnabled()) + m["devtools"] = strconv.FormatBool(cfg.Provision.DevToolsEnabled()) + if cfg.Network.Egress != "" { + m["egress"] = cfg.Network.Egress + } + if len(cfg.Network.Allow) > 0 { + m["allow"] = strings.Join(cfg.Network.Allow, ",") + } + if len(cfg.Defaults.DNS) > 0 { + m["dns"] = strings.Join(cfg.Defaults.DNS, ",") + } + return sandbox.Open("truenas", m) +} + // Execute runs the root command. func Execute() { if err := rootCmd.Execute(); err != nil { diff --git a/cmd/start.go b/cmd/start.go index 7183254..ed9e7b4 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -2,12 +2,8 @@ package cmd import ( "fmt" - "time" "github.com/spf13/cobra" - - "github.com/deevus/pixels/internal/cache" - "github.com/deevus/pixels/internal/ssh" ) func init() { @@ -20,34 +16,29 @@ func init() { } func runStart(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name := args[0] - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() + defer sb.Close() - if err := client.Virt.StartInstance(ctx, containerName(name)); err != nil { - return fmt.Errorf("starting %s: %w", name, err) + if err := sb.Start(cmd.Context(), name); err != nil { + return err } - // Re-fetch to get the IP. - instance, err := client.Virt.GetInstance(ctx, containerName(name)) + inst, err := sb.Get(cmd.Context(), name) if err != nil { - return fmt.Errorf("refreshing %s: %w", name, err) + // Start succeeded but Get failed — still report success. + fmt.Fprintf(cmd.OutOrStdout(), "Started %s\n", name) + return nil } - ip := resolveIP(instance) - cache.Put(name, &cache.Entry{IP: ip, Status: instance.Status}) - if ip != "" { - if err := ssh.WaitReady(ctx, ip, 30*time.Second, nil); err != nil { - fmt.Fprintf(cmd.ErrOrStderr(), "Warning: SSH not ready: %v\n", err) - } + if len(inst.Addresses) > 0 { fmt.Fprintf(cmd.OutOrStdout(), "Started %s\n", name) - fmt.Fprintf(cmd.OutOrStdout(), " IP: %s\n", ip) - fmt.Fprintf(cmd.OutOrStdout(), " SSH: ssh %s@%s\n", cfg.SSH.User, ip) + fmt.Fprintf(cmd.OutOrStdout(), " IP: %s\n", inst.Addresses[0]) + fmt.Fprintf(cmd.OutOrStdout(), " SSH: ssh %s@%s\n", cfg.SSH.User, inst.Addresses[0]) } else { fmt.Fprintf(cmd.OutOrStdout(), "Started %s (no IP assigned)\n", name) } diff --git a/cmd/stop.go b/cmd/stop.go index 4a23c02..74eb8e3 100644 --- a/cmd/stop.go +++ b/cmd/stop.go @@ -3,10 +3,7 @@ package cmd import ( "fmt" - truenas "github.com/deevus/truenas-go" "github.com/spf13/cobra" - - "github.com/deevus/pixels/internal/cache" ) func init() { @@ -19,22 +16,18 @@ func init() { } func runStop(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() name := args[0] - client, err := connectClient(ctx) + sb, err := openSandbox() if err != nil { return err } - defer client.Close() + defer sb.Close() - if err := client.Virt.StopInstance(ctx, containerName(name), truenas.StopVirtInstanceOpts{ - Timeout: 30, - }); err != nil { - return fmt.Errorf("stopping %s: %w", name, err) + if err := sb.Stop(cmd.Context(), name); err != nil { + return err } - cache.Delete(name) fmt.Fprintf(cmd.OutOrStdout(), "Stopped %s\n", name) return nil } diff --git a/go.mod b/go.mod index 3abd500..472957d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/deevus/pixels go 1.25.0 require ( + al.essio.dev/pkg/shellescape v1.6.0 github.com/BurntSushi/toml v1.6.0 + github.com/briandowns/spinner v1.23.2 github.com/caarlos0/env/v11 v11.4.0 github.com/deevus/truenas-go v0.4.0 github.com/dustin/go-humanize v1.0.1 @@ -11,8 +13,6 @@ require ( ) require ( - al.essio.dev/pkg/shellescape v1.6.0 // indirect - github.com/briandowns/spinner v1.23.2 // indirect github.com/fatih/color v1.7.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect diff --git a/internal/ssh/ssh.go b/internal/ssh/ssh.go index efa89bd..8933f65 100644 --- a/internal/ssh/ssh.go +++ b/internal/ssh/ssh.go @@ -55,7 +55,7 @@ func WaitReady(ctx context.Context, host string, timeout time.Duration, log io.W // Exec runs a command on the remote host via SSH and returns its exit code. func Exec(ctx context.Context, cc ConnConfig, command []string) (int, error) { - args := append(sshArgs(cc), command...) + args := append(Args(cc), command...) cmd := exec.CommandContext(ctx, "ssh", args...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout @@ -74,7 +74,7 @@ func Exec(ctx context.Context, cc ConnConfig, command []string) (int, error) { // ExecQuiet runs a non-interactive command on the remote host via SSH and // returns its exit code. Unlike Exec, it does not attach stdin/stdout/stderr. func ExecQuiet(ctx context.Context, cc ConnConfig, command []string) (int, error) { - args := append(sshArgs(cc), command...) + args := append(Args(cc), command...) cmd := exec.CommandContext(ctx, "ssh", args...) if err := cmd.Run(); err != nil { @@ -89,7 +89,7 @@ func ExecQuiet(ctx context.Context, cc ConnConfig, command []string) (int, error // Output runs a command on the remote host via SSH and returns its stdout. func Output(ctx context.Context, cc ConnConfig, command []string) ([]byte, error) { - args := append(sshArgs(cc), command...) + args := append(Args(cc), command...) cmd := exec.CommandContext(ctx, "ssh", args...) cmd.Stderr = os.Stderr return cmd.Output() @@ -98,7 +98,7 @@ func Output(ctx context.Context, cc ConnConfig, command []string) ([]byte, error // OutputQuiet runs a command on the remote host via SSH and returns its stdout, // discarding stderr. Use this when parsing command output programmatically. func OutputQuiet(ctx context.Context, cc ConnConfig, command []string) ([]byte, error) { - args := append(sshArgs(cc), command...) + args := append(Args(cc), command...) cmd := exec.CommandContext(ctx, "ssh", args...) return cmd.Output() } @@ -106,12 +106,15 @@ func OutputQuiet(ctx context.Context, cc ConnConfig, command []string) ([]byte, // TestAuth runs a quick SSH connection test (ssh ... true) to verify // key-based authentication works. Returns nil on success. func TestAuth(ctx context.Context, cc ConnConfig) error { - args := append(sshArgs(cc), "true") + args := append(Args(cc), "true") cmd := exec.CommandContext(ctx, "ssh", args...) return cmd.Run() } -func sshArgs(cc ConnConfig) []string { +// Args builds the SSH command-line arguments for the given connection config. +// It is exported for use by callers that need to construct custom exec.Cmd +// with non-standard Stdin/Stdout/Stderr (e.g. sandbox backends). +func Args(cc ConnConfig) []string { args := []string{ "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=" + os.DevNull, @@ -152,9 +155,9 @@ func sshArgs(cc ConnConfig) []string { // and the command is appended after user@host. func consoleArgs(cc ConnConfig, remoteCmd string) []string { if remoteCmd == "" { - return sshArgs(cc) + return Args(cc) } - args := sshArgs(cc) + args := Args(cc) // Insert -t before user@host (last element). userHost := args[len(args)-1] args = append(args[:len(args)-1], "-t", userHost, remoteCmd) diff --git a/internal/ssh/ssh_test.go b/internal/ssh/ssh_test.go index 219d659..e6e30f4 100644 --- a/internal/ssh/ssh_test.go +++ b/internal/ssh/ssh_test.go @@ -19,7 +19,7 @@ func TestConsole_SSHNotFound(t *testing.T) { func TestSSHArgs(t *testing.T) { t.Run("with key", func(t *testing.T) { - args := sshArgs(ConnConfig{Host: "10.0.0.1", User: "pixel", KeyPath: "/tmp/key"}) + args := Args(ConnConfig{Host: "10.0.0.1", User: "pixel", KeyPath: "/tmp/key"}) wantSuffix := []string{"-i", "/tmp/key", "pixel@10.0.0.1"} got := args[len(args)-3:] for i, w := range wantSuffix { @@ -30,7 +30,7 @@ func TestSSHArgs(t *testing.T) { }) t.Run("uses os.DevNull for UserKnownHostsFile", func(t *testing.T) { - args := sshArgs(ConnConfig{Host: "10.0.0.1", User: "pixel"}) + args := Args(ConnConfig{Host: "10.0.0.1", User: "pixel"}) want := "UserKnownHostsFile=" + os.DevNull found := false for _, a := range args { @@ -45,7 +45,7 @@ func TestSSHArgs(t *testing.T) { }) t.Run("without key", func(t *testing.T) { - args := sshArgs(ConnConfig{Host: "10.0.0.1", User: "pixel"}) + args := Args(ConnConfig{Host: "10.0.0.1", User: "pixel"}) last := args[len(args)-1] if last != "pixel@10.0.0.1" { t.Errorf("last arg = %q, want %q", last, "pixel@10.0.0.1") @@ -66,7 +66,7 @@ func TestSSHArgs(t *testing.T) { "API_KEY": "sk-secret", }, } - args := sshArgs(cc) + args := Args(cc) // All vars should be in a single SetEnv directive (space-separated, // sorted by key), preceded by -o. Multiple -o SetEnv flags don't @@ -95,7 +95,7 @@ func TestSSHArgs(t *testing.T) { }) t.Run("nil env produces no SetEnv", func(t *testing.T) { - args := sshArgs(ConnConfig{Host: "10.0.0.1", User: "pixel"}) + args := Args(ConnConfig{Host: "10.0.0.1", User: "pixel"}) for _, a := range args { if strings.HasPrefix(a, "SetEnv=") { t.Errorf("unexpected SetEnv arg %q with nil env", a) @@ -104,7 +104,7 @@ func TestSSHArgs(t *testing.T) { }) t.Run("empty env produces no SetEnv", func(t *testing.T) { - args := sshArgs(ConnConfig{Host: "10.0.0.1", User: "pixel", Env: map[string]string{}}) + args := Args(ConnConfig{Host: "10.0.0.1", User: "pixel", Env: map[string]string{}}) for _, a := range args { if strings.HasPrefix(a, "SetEnv=") { t.Errorf("unexpected SetEnv arg %q with empty env", a) @@ -117,7 +117,7 @@ func TestConsoleArgs(t *testing.T) { t.Run("no remote command", func(t *testing.T) { cc := ConnConfig{Host: "10.0.0.1", User: "pixel", KeyPath: "/tmp/key"} args := consoleArgs(cc, "") - sshA := sshArgs(cc) + sshA := Args(cc) if len(args) != len(sshA) { t.Fatalf("len(consoleArgs) = %d, want %d (same as sshArgs)", len(args), len(sshA)) } diff --git a/sandbox/registry.go b/sandbox/registry.go new file mode 100644 index 0000000..659baf3 --- /dev/null +++ b/sandbox/registry.go @@ -0,0 +1,26 @@ +package sandbox + +import "fmt" + +// Factory constructs a Sandbox from backend-specific configuration. +// The cfg map is interpreted by each backend (e.g. "host" and "api_key" +// for TrueNAS, "socket" for Incus). +type Factory func(cfg map[string]string) (Sandbox, error) + +var backends = map[string]Factory{} + +// Register makes a backend available by name. It is intended to be called +// from init() in backend implementation packages. +func Register(name string, f Factory) { + backends[name] = f +} + +// Open returns a Sandbox for the named backend, configured with cfg. +// If no backend is registered under that name, it returns an error. +func Open(name string, cfg map[string]string) (Sandbox, error) { + f, ok := backends[name] + if !ok { + return nil, fmt.Errorf("sandbox: unknown backend %q", name) + } + return f(cfg) +} diff --git a/sandbox/registry_test.go b/sandbox/registry_test.go new file mode 100644 index 0000000..41ea499 --- /dev/null +++ b/sandbox/registry_test.go @@ -0,0 +1,49 @@ +package sandbox + +import ( + "testing" +) + +func TestOpenUnknownBackend(t *testing.T) { + _, err := Open("nonexistent", nil) + if err == nil { + t.Fatal("expected error for unregistered backend, got nil") + } + want := `sandbox: unknown backend "nonexistent"` + if err.Error() != want { + t.Errorf("error = %q, want %q", err.Error(), want) + } +} + +func TestRegisterAndOpen(t *testing.T) { + // Clean up after test to avoid polluting global state. + defer delete(backends, "test") + + Register("test", func(cfg map[string]string) (Sandbox, error) { + return nil, nil + }) + + _, err := Open("test", map[string]string{"key": "value"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestRegisterOverwrites(t *testing.T) { + defer delete(backends, "dup") + + called := "" + Register("dup", func(cfg map[string]string) (Sandbox, error) { + called = "first" + return nil, nil + }) + Register("dup", func(cfg map[string]string) (Sandbox, error) { + called = "second" + return nil, nil + }) + + _, _ = Open("dup", nil) + if called != "second" { + t.Errorf("expected second factory to win, got %q", called) + } +} diff --git a/sandbox/sandbox.go b/sandbox/sandbox.go new file mode 100644 index 0000000..5fc94d5 --- /dev/null +++ b/sandbox/sandbox.go @@ -0,0 +1,110 @@ +// Package sandbox defines backend-agnostic interfaces for container lifecycle, +// execution, and network policy. Concrete implementations live in separate +// packages and register themselves via [Register]. +package sandbox + +import ( + "context" + "io" + "time" +) + +// Backend manages the lifecycle of sandbox instances and their snapshots. +type Backend interface { + Create(ctx context.Context, opts CreateOpts) (*Instance, error) + Get(ctx context.Context, name string) (*Instance, error) + List(ctx context.Context) ([]Instance, error) + Start(ctx context.Context, name string) error + Stop(ctx context.Context, name string) error + Delete(ctx context.Context, name string) error + + CreateSnapshot(ctx context.Context, name, label string) error + ListSnapshots(ctx context.Context, name string) ([]Snapshot, error) + DeleteSnapshot(ctx context.Context, name, label string) error + RestoreSnapshot(ctx context.Context, name, label string) error + CloneFrom(ctx context.Context, source, label, newName string) error + + Capabilities() Capabilities + Close() error +} + +// Exec runs commands and interactive sessions inside a sandbox instance. +type Exec interface { + Run(ctx context.Context, name string, opts ExecOpts) (exitCode int, err error) + Output(ctx context.Context, name string, cmd []string) ([]byte, error) + Console(ctx context.Context, name string, opts ConsoleOpts) error + Ready(ctx context.Context, name string, timeout time.Duration) error +} + +// NetworkPolicy controls egress filtering for a sandbox instance. +type NetworkPolicy interface { + SetEgressMode(ctx context.Context, name string, mode EgressMode) error + AllowDomain(ctx context.Context, name, domain string) error + DenyDomain(ctx context.Context, name, domain string) error + GetPolicy(ctx context.Context, name string) (*Policy, error) +} + +// Sandbox composes all sandbox capabilities into a single interface. +type Sandbox interface { + Backend + Exec + NetworkPolicy +} + +// Instance is the backend-agnostic representation of a container. +type Instance struct { + Name string + Status string + Addresses []string +} + +// Snapshot is a point-in-time capture of an instance's filesystem. +type Snapshot struct { + Label string + Size int64 +} + +// CreateOpts holds parameters for creating a new sandbox instance. +type CreateOpts struct { + Name string + Image string + CPU string + Memory int64 +} + +// ExecOpts holds parameters for running a command inside a sandbox. +type ExecOpts struct { + Cmd []string + Env []string + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// ConsoleOpts holds parameters for attaching an interactive console. +type ConsoleOpts struct { + Env []string + RemoteCmd []string +} + +// EgressMode controls what network traffic a sandbox may initiate. +type EgressMode string + +const ( + EgressUnrestricted EgressMode = "unrestricted" + EgressAgent EgressMode = "agent" + EgressAllowlist EgressMode = "allowlist" +) + +// Policy describes the current egress policy for a sandbox instance. +type Policy struct { + Mode EgressMode + Domains []string +} + +// Capabilities advertises optional features a backend supports. +type Capabilities struct { + Snapshots bool + CloneFrom bool + EgressControl bool +} diff --git a/sandbox/truenas/backend.go b/sandbox/truenas/backend.go new file mode 100644 index 0000000..9a58fbc --- /dev/null +++ b/sandbox/truenas/backend.go @@ -0,0 +1,334 @@ +package truenas + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "time" + + tnapi "github.com/deevus/truenas-go" + + "github.com/deevus/pixels/internal/cache" + "github.com/deevus/pixels/internal/provision" + "github.com/deevus/pixels/internal/retry" + "github.com/deevus/pixels/internal/ssh" + tnc "github.com/deevus/pixels/internal/truenas" + "github.com/deevus/pixels/sandbox" +) + +// Create creates a new container instance with the full provisioning flow: +// NIC resolution, instance creation, provisioning, restart, IP poll, SSH wait. +func (t *TrueNAS) Create(ctx context.Context, opts sandbox.CreateOpts) (*sandbox.Instance, error) { + name := opts.Name + full := prefixed(name) + + image := opts.Image + if image == "" { + image = t.cfg.image + } + cpu := opts.CPU + if cpu == "" { + cpu = t.cfg.cpu + } + memory := opts.Memory + if memory == 0 { + memory = t.cfg.memory * 1024 * 1024 // MiB → bytes + } + + createOpts := tnc.CreateInstanceOpts{ + Name: full, + Image: image, + CPU: cpu, + Memory: memory, + Autostart: true, + } + + // Resolve NIC: config override or auto-detect. + if t.cfg.nicType != "" { + createOpts.NIC = &tnc.NICOpts{ + NICType: strings.ToUpper(t.cfg.nicType), + Parent: t.cfg.parent, + } + } else { + nic, err := t.client.DefaultNIC(ctx) + if err == nil { + createOpts.NIC = nic + } + } + + instance, err := t.client.CreateInstance(ctx, createOpts) + if err != nil { + return nil, fmt.Errorf("creating instance: %w", err) + } + + // Provision if enabled. + if t.cfg.provision { + pubKey := readSSHPubKey(t.cfg.sshKey) + steps := provision.Steps(t.cfg.egress, t.cfg.devtools) + + provOpts := tnc.ProvisionOpts{ + SSHPubKey: pubKey, + DNS: t.cfg.dns, + Env: t.cfg.env, + DevTools: t.cfg.devtools, + Egress: t.cfg.egress, + EgressAllow: t.cfg.allow, + } + if len(steps) > 0 { + provOpts.ProvisionScript = provision.Script(steps) + } + + needsProvision := pubKey != "" || len(t.cfg.dns) > 0 || + len(t.cfg.env) > 0 || t.cfg.devtools + + if needsProvision { + if err := t.client.Provision(ctx, full, provOpts); err != nil { + // Non-fatal: continue without provisioning. + _ = err + } else if pubKey != "" { + // Restart so rc.local runs on boot. + _ = t.client.Virt.StopInstance(ctx, full, tnapi.StopVirtInstanceOpts{Timeout: 30}) + if err := t.client.Virt.StartInstance(ctx, full); err != nil { + return nil, fmt.Errorf("restarting after provision: %w", err) + } + instance = nil // force re-fetch below + } + } + } + + // Poll for IP assignment. + if err := retry.Poll(ctx, time.Second, 15*time.Second, func(ctx context.Context) (bool, error) { + inst, err := t.client.Virt.GetInstance(ctx, full) + if err != nil { + return false, fmt.Errorf("refreshing instance: %w", err) + } + instance = inst + return ipFromAliases(inst.Aliases) != "", nil + }); err != nil && !errors.Is(err, retry.ErrTimeout) { + return nil, err + } + + ip := ipFromAliases(instance.Aliases) + + // Wait for SSH readiness. + if ip != "" { + cc := ssh.ConnConfig{Host: ip, User: t.cfg.sshUser, KeyPath: t.cfg.sshKey} + _ = t.ssh.WaitReady(ctx, cc.Host, 90*time.Second, nil) + } + + cache.Put(name, &cache.Entry{IP: ip, Status: instance.Status}) + + return &sandbox.Instance{ + Name: name, + Status: instance.Status, + Addresses: collectAddresses(instance.Aliases), + }, nil +} + +// Get returns a single instance by bare name. +func (t *TrueNAS) Get(ctx context.Context, name string) (*sandbox.Instance, error) { + inst, err := t.client.Virt.GetInstance(ctx, prefixed(name)) + if err != nil { + return nil, fmt.Errorf("getting %s: %w", name, err) + } + if inst == nil { + return nil, fmt.Errorf("instance %q not found", name) + } + return toInstance(inst), nil +} + +// List returns all px- prefixed instances with the prefix stripped. +func (t *TrueNAS) List(ctx context.Context) ([]sandbox.Instance, error) { + instances, err := t.client.ListInstances(ctx) + if err != nil { + return nil, err + } + result := make([]sandbox.Instance, len(instances)) + for i, inst := range instances { + result[i] = sandbox.Instance{ + Name: unprefixed(inst.Name), + Status: inst.Status, + Addresses: collectAddresses(inst.Aliases), + } + } + return result, nil +} + +// Start starts a stopped instance. +func (t *TrueNAS) Start(ctx context.Context, name string) error { + full := prefixed(name) + if err := t.client.Virt.StartInstance(ctx, full); err != nil { + return fmt.Errorf("starting %s: %w", name, err) + } + + // Get instance and wait for SSH. + inst, err := t.client.Virt.GetInstance(ctx, full) + if err != nil { + return fmt.Errorf("refreshing %s: %w", name, err) + } + + ip := ipFromAliases(inst.Aliases) + if ip != "" { + _ = t.ssh.WaitReady(ctx, ip, 30*time.Second, nil) + cache.Put(name, &cache.Entry{IP: ip, Status: inst.Status}) + } + return nil +} + +// Stop stops a running instance. +func (t *TrueNAS) Stop(ctx context.Context, name string) error { + if err := t.client.Virt.StopInstance(ctx, prefixed(name), tnapi.StopVirtInstanceOpts{ + Timeout: 30, + }); err != nil { + return fmt.Errorf("stopping %s: %w", name, err) + } + cache.Delete(name) + return nil +} + +// Delete stops (if running) and deletes an instance with retry. +func (t *TrueNAS) Delete(ctx context.Context, name string) error { + full := prefixed(name) + + // Best-effort stop. + _ = t.client.Virt.StopInstance(ctx, full, tnapi.StopVirtInstanceOpts{Timeout: 30}) + + // Retry delete (Incus storage release timing). + if err := retry.Do(ctx, 3, 2*time.Second, func(ctx context.Context) error { + return t.client.Virt.DeleteInstance(ctx, full) + }); err != nil { + return fmt.Errorf("deleting %s: %w", name, err) + } + + cache.Delete(name) + return nil +} + +// CreateSnapshot creates a ZFS snapshot for the named instance. +func (t *TrueNAS) CreateSnapshot(ctx context.Context, name, label string) error { + ds, err := t.resolveDataset(ctx, name) + if err != nil { + return err + } + _, err = t.client.Snapshot.Create(ctx, tnapi.CreateSnapshotOpts{ + Dataset: ds, + Name: label, + }) + if err != nil { + return fmt.Errorf("creating snapshot: %w", err) + } + return nil +} + +// ListSnapshots returns all snapshots for the named instance. +func (t *TrueNAS) ListSnapshots(ctx context.Context, name string) ([]sandbox.Snapshot, error) { + ds, err := t.resolveDataset(ctx, name) + if err != nil { + return nil, err + } + snaps, err := t.client.ListSnapshots(ctx, ds) + if err != nil { + return nil, err + } + result := make([]sandbox.Snapshot, len(snaps)) + for i, s := range snaps { + result[i] = sandbox.Snapshot{ + Label: s.SnapshotName, + Size: s.Referenced, + } + } + return result, nil +} + +// DeleteSnapshot deletes a ZFS snapshot by label. +func (t *TrueNAS) DeleteSnapshot(ctx context.Context, name, label string) error { + ds, err := t.resolveDataset(ctx, name) + if err != nil { + return err + } + return t.client.Snapshot.Delete(ctx, ds+"@"+label) +} + +// RestoreSnapshot rolls back to the given snapshot: stop, rollback, start, +// poll IP, SSH wait. +func (t *TrueNAS) RestoreSnapshot(ctx context.Context, name, label string) error { + full := prefixed(name) + ds, err := t.resolveDataset(ctx, name) + if err != nil { + return err + } + + if err := t.client.Virt.StopInstance(ctx, full, tnapi.StopVirtInstanceOpts{Timeout: 30}); err != nil { + return fmt.Errorf("stopping %s: %w", name, err) + } + if err := t.client.SnapshotRollback(ctx, ds+"@"+label); err != nil { + return err + } + if err := t.client.Virt.StartInstance(ctx, full); err != nil { + return fmt.Errorf("starting %s: %w", name, err) + } + + inst, err := t.client.Virt.GetInstance(ctx, full) + if err != nil { + return fmt.Errorf("refreshing %s: %w", name, err) + } + + ip := ipFromAliases(inst.Aliases) + cache.Put(name, &cache.Entry{IP: ip, Status: inst.Status}) + + if ip != "" { + _ = t.ssh.WaitReady(ctx, ip, 30*time.Second, nil) + } + return nil +} + +// CloneFrom clones a source container's snapshot into a new container. +func (t *TrueNAS) CloneFrom(ctx context.Context, source, label, newName string) error { + ds, err := t.resolveDataset(ctx, source) + if err != nil { + return err + } + return t.client.ReplaceContainerRootfs(ctx, prefixed(newName), ds+"@"+label) +} + +// resolveDataset returns the ZFS dataset path for an instance. +func (t *TrueNAS) resolveDataset(ctx context.Context, name string) (string, error) { + if t.cfg.datasetPrefix != "" { + return t.cfg.datasetPrefix + "/" + prefixed(name), nil + } + return t.client.ContainerDataset(ctx, prefixed(name)) +} + +// toInstance converts a truenas-go VirtInstance to a sandbox.Instance. +func toInstance(inst *tnapi.VirtInstance) *sandbox.Instance { + return &sandbox.Instance{ + Name: unprefixed(inst.Name), + Status: inst.Status, + Addresses: collectAddresses(inst.Aliases), + } +} + +// collectAddresses extracts all IPv4 addresses from aliases. +func collectAddresses(aliases []tnapi.VirtAlias) []string { + var addrs []string + for _, a := range aliases { + if a.Type == "INET" || a.Type == "ipv4" { + addrs = append(addrs, a.Address) + } + } + return addrs +} + +// readSSHPubKey reads the .pub file corresponding to the given private key path. +func readSSHPubKey(keyPath string) string { + if keyPath == "" { + return "" + } + data, err := os.ReadFile(keyPath + ".pub") + if err != nil { + return "" + } + return strings.TrimSpace(string(data)) +} diff --git a/sandbox/truenas/backend_test.go b/sandbox/truenas/backend_test.go new file mode 100644 index 0000000..b276f38 --- /dev/null +++ b/sandbox/truenas/backend_test.go @@ -0,0 +1,436 @@ +package truenas + +import ( + "context" + "errors" + "strings" + "testing" + + tnapi "github.com/deevus/truenas-go" + + "github.com/deevus/pixels/internal/cache" + tnc "github.com/deevus/pixels/internal/truenas" + "github.com/deevus/pixels/sandbox" +) + +// testCfg returns a minimal valid config map for NewForTest. +func testCfg() map[string]string { + return map[string]string{ + "host": "nas.test", + "api_key": "test-key", + "provision": "false", + } +} + +// newTestBackend creates a TrueNAS backend with mock services. +func newTestBackend(t *testing.T, client *tnc.Client) *TrueNAS { + t.Helper() + tn, err := NewForTest(client, &mockSSH{}, testCfg()) + if err != nil { + t.Fatalf("NewForTest: %v", err) + } + return tn +} + +func TestGet(t *testing.T) { + tests := []struct { + name string + instance *tnapi.VirtInstance + getErr error + wantName string + wantErr string + }{ + { + name: "found", + instance: &tnapi.VirtInstance{ + Name: "px-mybox", + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "10.0.0.5"}, + }, + }, + wantName: "mybox", + }, + { + name: "not found", + wantErr: "not found", + }, + { + name: "API error", + getErr: errors.New("connection failed"), + wantErr: "getting mybox", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetInstanceFunc: func(ctx context.Context, name string) (*tnapi.VirtInstance, error) { + if name != "px-mybox" { + t.Errorf("GetInstance called with %q, want px-mybox", name) + } + if tt.getErr != nil { + return nil, tt.getErr + } + return tt.instance, nil + }, + }, + }) + + inst, err := tn.Get(context.Background(), "mybox") + if tt.wantErr != "" { + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("error %q should contain %q", err.Error(), tt.wantErr) + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if inst.Name != tt.wantName { + t.Errorf("name = %q, want %q", inst.Name, tt.wantName) + } + if inst.Status != "RUNNING" { + t.Errorf("status = %q", inst.Status) + } + if len(inst.Addresses) != 1 || inst.Addresses[0] != "10.0.0.5" { + t.Errorf("addresses = %v", inst.Addresses) + } + }) + } +} + +func TestList(t *testing.T) { + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + ListInstancesFunc: func(ctx context.Context, filters [][]any) ([]tnapi.VirtInstance, error) { + return []tnapi.VirtInstance{ + {Name: "px-alpha", Status: "RUNNING", Aliases: []tnapi.VirtAlias{{Type: "INET", Address: "10.0.0.1"}}}, + {Name: "px-beta", Status: "STOPPED"}, + }, nil + }, + }, + }) + + instances, err := tn.List(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(instances) != 2 { + t.Fatalf("got %d instances, want 2", len(instances)) + } + if instances[0].Name != "alpha" { + t.Errorf("instances[0].Name = %q, want alpha", instances[0].Name) + } + if instances[1].Name != "beta" { + t.Errorf("instances[1].Name = %q, want beta", instances[1].Name) + } + if instances[0].Status != "RUNNING" { + t.Errorf("instances[0].Status = %q", instances[0].Status) + } +} + +func TestStop(t *testing.T) { + var stopCalled bool + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + StopInstanceFunc: func(ctx context.Context, name string, opts tnapi.StopVirtInstanceOpts) error { + stopCalled = true + if name != "px-test" { + t.Errorf("stop called with %q", name) + } + if opts.Timeout != 30 { + t.Errorf("timeout = %d, want 30", opts.Timeout) + } + return nil + }, + }, + }) + + if err := tn.Stop(context.Background(), "test"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !stopCalled { + t.Error("stop not called") + } +} + +func TestDelete(t *testing.T) { + t.Run("success", func(t *testing.T) { + var deleteCalled bool + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + StopInstanceFunc: func(ctx context.Context, name string, opts tnapi.StopVirtInstanceOpts) error { + return nil + }, + DeleteInstanceFunc: func(ctx context.Context, name string) error { + deleteCalled = true + if name != "px-test" { + t.Errorf("delete called with %q", name) + } + return nil + }, + }, + }) + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + if err := tn.Delete(context.Background(), "test"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !deleteCalled { + t.Error("delete not called") + } + if cached := cache.Get("test"); cached != nil { + t.Error("cache should be evicted") + } + }) + + t.Run("retry on error", func(t *testing.T) { + attempts := 0 + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + StopInstanceFunc: func(ctx context.Context, name string, opts tnapi.StopVirtInstanceOpts) error { + return nil + }, + DeleteInstanceFunc: func(ctx context.Context, name string) error { + attempts++ + if attempts < 3 { + return errors.New("storage busy") + } + return nil + }, + }, + }) + + if err := tn.Delete(context.Background(), "test"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if attempts != 3 { + t.Errorf("attempts = %d, want 3", attempts) + } + }) +} + +func TestCreateSnapshot(t *testing.T) { + var created tnapi.CreateSnapshotOpts + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Dataset: "tank/ix-virt"}, nil + }, + }, + Snapshot: &tnapi.MockSnapshotService{ + CreateFunc: func(ctx context.Context, opts tnapi.CreateSnapshotOpts) (*tnapi.Snapshot, error) { + created = opts + return &tnapi.Snapshot{}, nil + }, + }, + }) + + if err := tn.CreateSnapshot(context.Background(), "test", "snap1"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if created.Dataset != "tank/ix-virt/containers/px-test" { + t.Errorf("dataset = %q", created.Dataset) + } + if created.Name != "snap1" { + t.Errorf("name = %q", created.Name) + } +} + +func TestListSnapshots(t *testing.T) { + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Dataset: "tank/ix-virt"}, nil + }, + }, + Snapshot: &tnapi.MockSnapshotService{ + QueryFunc: func(ctx context.Context, filters [][]any) ([]tnapi.Snapshot, error) { + return []tnapi.Snapshot{ + {SnapshotName: "snap1", Referenced: 1024}, + {SnapshotName: "snap2", Referenced: 2048}, + }, nil + }, + }, + }) + + snaps, err := tn.ListSnapshots(context.Background(), "test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(snaps) != 2 { + t.Fatalf("got %d snapshots, want 2", len(snaps)) + } + if snaps[0].Label != "snap1" || snaps[0].Size != 1024 { + t.Errorf("snap[0] = %+v", snaps[0]) + } + if snaps[1].Label != "snap2" || snaps[1].Size != 2048 { + t.Errorf("snap[1] = %+v", snaps[1]) + } +} + +func TestDeleteSnapshot(t *testing.T) { + var deletedID string + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Dataset: "tank/ix-virt"}, nil + }, + }, + Snapshot: &tnapi.MockSnapshotService{ + DeleteFunc: func(ctx context.Context, id string) error { + deletedID = id + return nil + }, + }, + }) + + if err := tn.DeleteSnapshot(context.Background(), "test", "snap1"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + want := "tank/ix-virt/containers/px-test@snap1" + if deletedID != want { + t.Errorf("deleted = %q, want %q", deletedID, want) + } +} + +func TestResolveDataset(t *testing.T) { + t.Run("with prefix override", func(t *testing.T) { + tn, _ := NewForTest(&tnc.Client{}, &mockSSH{}, map[string]string{ + "host": "nas.test", + "api_key": "key", + "dataset_prefix": "mypool/virt", + "provision": "false", + }) + + ds, err := tn.resolveDataset(context.Background(), "test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if ds != "mypool/virt/px-test" { + t.Errorf("dataset = %q", ds) + } + }) + + t.Run("auto-detect from API", func(t *testing.T) { + tn := newTestBackend(t, &tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Dataset: "tank/ix-virt"}, nil + }, + }, + }) + + ds, err := tn.resolveDataset(context.Background(), "test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if ds != "tank/ix-virt/containers/px-test" { + t.Errorf("dataset = %q", ds) + } + }) +} + +func TestCreateNoProvision(t *testing.T) { + cache.Delete("test") + defer cache.Delete("test") + + mssh := &mockSSH{} + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + CreateInstanceFunc: func(ctx context.Context, opts tnapi.CreateVirtInstanceOpts) (*tnapi.VirtInstance, error) { + return &tnapi.VirtInstance{ + Name: opts.Name, + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "10.0.0.42"}, + }, + }, nil + }, + GetInstanceFunc: func(ctx context.Context, name string) (*tnapi.VirtInstance, error) { + return &tnapi.VirtInstance{ + Name: name, + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "10.0.0.42"}, + }, + }, nil + }, + }, + Interface: &tnapi.MockInterfaceService{}, + Network: &tnapi.MockNetworkService{}, + }, mssh, testCfg()) + + inst, err := tn.Create(context.Background(), sandbox.CreateOpts{Name: "test"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if inst.Name != "test" { + t.Errorf("name = %q", inst.Name) + } + if inst.Status != "RUNNING" { + t.Errorf("status = %q", inst.Status) + } + if len(inst.Addresses) != 1 || inst.Addresses[0] != "10.0.0.42" { + t.Errorf("addresses = %v", inst.Addresses) + } + + // Verify cached. + cached := cache.Get("test") + if cached == nil || cached.IP != "10.0.0.42" { + t.Errorf("cache = %+v", cached) + } +} + +func TestStart(t *testing.T) { + cache.Delete("test") + defer cache.Delete("test") + + mssh := &mockSSH{} + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + StartInstanceFunc: func(ctx context.Context, name string) error { + if name != "px-test" { + t.Errorf("start called with %q", name) + } + return nil + }, + GetInstanceFunc: func(ctx context.Context, name string) (*tnapi.VirtInstance, error) { + return &tnapi.VirtInstance{ + Name: name, + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{{Type: "INET", Address: "10.0.0.7"}}, + }, nil + }, + }, + }, mssh, testCfg()) + + if err := tn.Start(context.Background(), "test"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cached := cache.Get("test") + if cached == nil || cached.IP != "10.0.0.7" { + t.Errorf("cache = %+v", cached) + } +} + +func TestCapabilities(t *testing.T) { + tn := &TrueNAS{} + caps := tn.Capabilities() + if !caps.Snapshots { + t.Error("Snapshots should be true") + } + if !caps.CloneFrom { + t.Error("CloneFrom should be true") + } + if !caps.EgressControl { + t.Error("EgressControl should be true") + } +} diff --git a/sandbox/truenas/config.go b/sandbox/truenas/config.go new file mode 100644 index 0000000..642525a --- /dev/null +++ b/sandbox/truenas/config.go @@ -0,0 +1,197 @@ +package truenas + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/deevus/pixels/internal/config" +) + +// tnConfig holds parsed backend configuration. +type tnConfig struct { + host string + port int + username string + apiKey string + insecure bool + + image string + cpu string + memory int64 // MiB + pool string + + nicType string + parent string + + sshUser string + sshKey string + + datasetPrefix string + + provision bool + devtools bool + egress string + allow []string + dns []string + + env map[string]string + envForward map[string]string +} + +// parseCfg extracts a tnConfig from a flat key-value map, applying the same +// defaults as internal/config. +func parseCfg(m map[string]string) (*tnConfig, error) { + c := &tnConfig{ + username: "root", + image: "ubuntu/24.04", + cpu: "2", + memory: 2048, + pool: "tank", + sshUser: "pixel", + sshKey: "~/.ssh/id_ed25519", + provision: true, + devtools: true, + egress: "unrestricted", + } + + if v := m["host"]; v != "" { + c.host = v + } + if v := m["port"]; v != "" { + p, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("invalid port %q: %w", v, err) + } + c.port = p + } + if v := m["username"]; v != "" { + c.username = v + } + if v := m["api_key"]; v != "" { + c.apiKey = v + } + if v := m["insecure"]; v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("invalid insecure %q: %w", v, err) + } + c.insecure = b + } + + if v := m["image"]; v != "" { + c.image = v + } + if v := m["cpu"]; v != "" { + c.cpu = v + } + if v := m["memory"]; v != "" { + mem, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid memory %q: %w", v, err) + } + c.memory = mem + } + if v := m["pool"]; v != "" { + c.pool = v + } + + if v := m["nic_type"]; v != "" { + c.nicType = v + } + if v := m["parent"]; v != "" { + c.parent = v + } + + if v := m["ssh_user"]; v != "" { + c.sshUser = v + } + if v := m["ssh_key"]; v != "" { + c.sshKey = v + } + + if v := m["dataset_prefix"]; v != "" { + c.datasetPrefix = v + } + + if v := m["provision"]; v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("invalid provision %q: %w", v, err) + } + c.provision = b + } + if v := m["devtools"]; v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("invalid devtools %q: %w", v, err) + } + c.devtools = b + } + if v := m["egress"]; v != "" { + switch v { + case "unrestricted", "agent", "allowlist": + c.egress = v + default: + return nil, fmt.Errorf("invalid egress %q: must be unrestricted, agent, or allowlist", v) + } + } + if v := m["allow"]; v != "" { + c.allow = strings.Split(v, ",") + } + if v := m["dns"]; v != "" { + c.dns = strings.Split(v, ",") + } + + // Validate required fields. + if c.host == "" { + return nil, fmt.Errorf("host is required") + } + if c.apiKey == "" { + return nil, fmt.Errorf("api_key is required") + } + + c.sshKey = expandHome(c.sshKey) + + return c, nil +} + +// toConfig converts a tnConfig to the internal config.Config used by +// truenas.Connect. +func (c *tnConfig) toConfig() *config.Config { + cfg := &config.Config{ + TrueNAS: config.TrueNAS{ + Host: c.host, + Port: c.port, + Username: c.username, + APIKey: c.apiKey, + }, + Defaults: config.Defaults{ + Image: c.image, + CPU: c.cpu, + Memory: c.memory, + Pool: c.pool, + }, + SSH: config.SSH{ + User: c.sshUser, + Key: c.sshKey, + }, + } + if c.insecure { + cfg.TrueNAS.InsecureSkipVerify = boolPtr(true) + } + return cfg +} + +func expandHome(path string) string { + if strings.HasPrefix(path, "~/") { + if home, err := os.UserHomeDir(); err == nil { + return filepath.Join(home, path[2:]) + } + } + return path +} + +func boolPtr(v bool) *bool { return &v } diff --git a/sandbox/truenas/config_test.go b/sandbox/truenas/config_test.go new file mode 100644 index 0000000..b762887 --- /dev/null +++ b/sandbox/truenas/config_test.go @@ -0,0 +1,236 @@ +package truenas + +import ( + "strings" + "testing" +) + +func TestParseCfg(t *testing.T) { + tests := []struct { + name string + cfg map[string]string + check func(t *testing.T, c *tnConfig) + wantErr string + }{ + { + name: "all defaults with required fields", + cfg: map[string]string{ + "host": "truenas.local", + "api_key": "1-abc", + }, + check: func(t *testing.T, c *tnConfig) { + if c.host != "truenas.local" { + t.Errorf("host = %q", c.host) + } + if c.apiKey != "1-abc" { + t.Errorf("api_key = %q", c.apiKey) + } + if c.username != "root" { + t.Errorf("username = %q, want root", c.username) + } + if c.image != "ubuntu/24.04" { + t.Errorf("image = %q, want ubuntu/24.04", c.image) + } + if c.cpu != "2" { + t.Errorf("cpu = %q, want 2", c.cpu) + } + if c.memory != 2048 { + t.Errorf("memory = %d, want 2048", c.memory) + } + if c.pool != "tank" { + t.Errorf("pool = %q, want tank", c.pool) + } + if c.sshUser != "pixel" { + t.Errorf("ssh_user = %q, want pixel", c.sshUser) + } + if c.provision != true { + t.Error("provision should default to true") + } + if c.devtools != true { + t.Error("devtools should default to true") + } + if c.egress != "unrestricted" { + t.Errorf("egress = %q, want unrestricted", c.egress) + } + }, + }, + { + name: "custom values", + cfg: map[string]string{ + "host": "nas.example.com", + "port": "8443", + "api_key": "2-xyz", + "username": "admin", + "insecure": "true", + "image": "debian/12", + "cpu": "4", + "memory": "4096", + "pool": "storage", + "nic_type": "bridged", + "parent": "br0", + "ssh_user": "testuser", + "ssh_key": "/tmp/key", + "egress": "agent", + "allow": "example.com,test.com", + "dns": "1.1.1.1,8.8.8.8", + "provision": "false", + "devtools": "false", + "dataset_prefix": "mypool/virt", + }, + check: func(t *testing.T, c *tnConfig) { + if c.port != 8443 { + t.Errorf("port = %d", c.port) + } + if c.username != "admin" { + t.Errorf("username = %q", c.username) + } + if c.insecure != true { + t.Error("insecure should be true") + } + if c.image != "debian/12" { + t.Errorf("image = %q", c.image) + } + if c.cpu != "4" { + t.Errorf("cpu = %q", c.cpu) + } + if c.memory != 4096 { + t.Errorf("memory = %d", c.memory) + } + if c.pool != "storage" { + t.Errorf("pool = %q", c.pool) + } + if c.nicType != "bridged" { + t.Errorf("nic_type = %q", c.nicType) + } + if c.parent != "br0" { + t.Errorf("parent = %q", c.parent) + } + if c.sshUser != "testuser" { + t.Errorf("ssh_user = %q", c.sshUser) + } + if c.sshKey != "/tmp/key" { + t.Errorf("ssh_key = %q", c.sshKey) + } + if c.provision != false { + t.Error("provision should be false") + } + if c.devtools != false { + t.Error("devtools should be false") + } + if c.egress != "agent" { + t.Errorf("egress = %q", c.egress) + } + if len(c.allow) != 2 || c.allow[0] != "example.com" || c.allow[1] != "test.com" { + t.Errorf("allow = %v", c.allow) + } + if len(c.dns) != 2 || c.dns[0] != "1.1.1.1" { + t.Errorf("dns = %v", c.dns) + } + if c.datasetPrefix != "mypool/virt" { + t.Errorf("dataset_prefix = %q", c.datasetPrefix) + } + }, + }, + { + name: "missing host", + cfg: map[string]string{"api_key": "abc"}, + wantErr: "host is required", + }, + { + name: "missing api_key", + cfg: map[string]string{"host": "nas.local"}, + wantErr: "api_key is required", + }, + { + name: "invalid port", + cfg: map[string]string{"host": "nas", "api_key": "k", "port": "abc"}, + wantErr: "invalid port", + }, + { + name: "invalid memory", + cfg: map[string]string{"host": "nas", "api_key": "k", "memory": "big"}, + wantErr: "invalid memory", + }, + { + name: "invalid insecure", + cfg: map[string]string{"host": "nas", "api_key": "k", "insecure": "maybe"}, + wantErr: "invalid insecure", + }, + { + name: "invalid provision", + cfg: map[string]string{"host": "nas", "api_key": "k", "provision": "sometimes"}, + wantErr: "invalid provision", + }, + { + name: "invalid devtools", + cfg: map[string]string{"host": "nas", "api_key": "k", "devtools": "yes"}, + wantErr: "invalid devtools", + }, + { + name: "invalid egress mode", + cfg: map[string]string{"host": "nas", "api_key": "k", "egress": "deny-all"}, + wantErr: "invalid egress", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c, err := parseCfg(tt.cfg) + if tt.wantErr != "" { + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("error %q should contain %q", err.Error(), tt.wantErr) + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.check != nil { + tt.check(t, c) + } + }) + } +} + +func TestToConfig(t *testing.T) { + c := &tnConfig{ + host: "nas.local", + port: 8443, + username: "admin", + apiKey: "key-123", + insecure: true, + image: "ubuntu/24.04", + cpu: "2", + memory: 2048, + pool: "tank", + sshUser: "pixel", + sshKey: "/tmp/key", + } + + cfg := c.toConfig() + + if cfg.TrueNAS.Host != "nas.local" { + t.Errorf("host = %q", cfg.TrueNAS.Host) + } + if cfg.TrueNAS.Port != 8443 { + t.Errorf("port = %d", cfg.TrueNAS.Port) + } + if cfg.TrueNAS.Username != "admin" { + t.Errorf("username = %q", cfg.TrueNAS.Username) + } + if cfg.TrueNAS.APIKey != "key-123" { + t.Errorf("api_key = %q", cfg.TrueNAS.APIKey) + } + if cfg.TrueNAS.InsecureSkipVerify == nil || !*cfg.TrueNAS.InsecureSkipVerify { + t.Error("insecure should be true") + } + if cfg.SSH.User != "pixel" { + t.Errorf("ssh user = %q", cfg.SSH.User) + } + if cfg.SSH.Key != "/tmp/key" { + t.Errorf("ssh key = %q", cfg.SSH.Key) + } +} diff --git a/sandbox/truenas/exec.go b/sandbox/truenas/exec.go new file mode 100644 index 0000000..054c343 --- /dev/null +++ b/sandbox/truenas/exec.go @@ -0,0 +1,100 @@ +package truenas + +import ( + "context" + "os/exec" + "strings" + "time" + + "github.com/deevus/pixels/internal/ssh" + "github.com/deevus/pixels/sandbox" +) + +// Run executes a command inside a sandbox instance. If ExecOpts provides +// custom Stdin/Stdout/Stderr, it builds a custom exec.Cmd using ssh.Args(). +// Otherwise it delegates to ssh.Exec. +func (t *TrueNAS) Run(ctx context.Context, name string, opts sandbox.ExecOpts) (int, error) { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return 1, err + } + + cc := ssh.ConnConfig{ + Host: ip, + User: t.cfg.sshUser, + KeyPath: t.cfg.sshKey, + Env: envToMap(opts.Env), + } + + hasCustomIO := opts.Stdin != nil || opts.Stdout != nil || opts.Stderr != nil + if hasCustomIO { + args := append(ssh.Args(cc), opts.Cmd...) + cmd := exec.CommandContext(ctx, "ssh", args...) + cmd.Stdin = opts.Stdin + cmd.Stdout = opts.Stdout + cmd.Stderr = opts.Stderr + + if err := cmd.Run(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return exitErr.ExitCode(), nil + } + return 1, err + } + return 0, nil + } + + return ssh.Exec(ctx, cc, opts.Cmd) +} + +// Output executes a command and returns its combined stdout. +func (t *TrueNAS) Output(ctx context.Context, name string, cmd []string) ([]byte, error) { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return nil, err + } + cc := ssh.ConnConfig{ + Host: ip, + User: t.cfg.sshUser, + KeyPath: t.cfg.sshKey, + } + return t.ssh.OutputQuiet(ctx, cc, cmd) +} + +// Console attaches an interactive console session. +func (t *TrueNAS) Console(ctx context.Context, name string, opts sandbox.ConsoleOpts) error { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return err + } + cc := ssh.ConnConfig{ + Host: ip, + User: t.cfg.sshUser, + KeyPath: t.cfg.sshKey, + Env: envToMap(opts.Env), + } + remoteCmd := strings.Join(opts.RemoteCmd, " ") + return ssh.Console(cc, remoteCmd) +} + +// Ready waits until the instance is reachable via SSH. +func (t *TrueNAS) Ready(ctx context.Context, name string, timeout time.Duration) error { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return err + } + return t.ssh.WaitReady(ctx, ip, timeout, nil) +} + +// envToMap converts a slice of "KEY=VALUE" pairs to a map. +func envToMap(env []string) map[string]string { + if len(env) == 0 { + return nil + } + m := make(map[string]string, len(env)) + for _, e := range env { + if k, v, ok := strings.Cut(e, "="); ok { + m[k] = v + } + } + return m +} diff --git a/sandbox/truenas/network.go b/sandbox/truenas/network.go new file mode 100644 index 0000000..d4ce059 --- /dev/null +++ b/sandbox/truenas/network.go @@ -0,0 +1,227 @@ +package truenas + +import ( + "context" + "fmt" + "strings" + + "github.com/deevus/pixels/internal/egress" + "github.com/deevus/pixels/internal/ssh" + "github.com/deevus/pixels/sandbox" +) + +// SetEgressMode sets the egress filtering mode for a container. +// +// For "unrestricted": flushes nftables, removes egress files, restores +// blanket sudoers. +// +// For "agent"/"allowlist": writes nftables config, domains/cidrs, resolve +// script, safe-apt wrapper, restricted sudoers via the TrueNAS API, then +// SSHes in to install nftables and resolve domains. +func (t *TrueNAS) SetEgressMode(ctx context.Context, name string, mode sandbox.EgressMode) error { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return err + } + cc := ssh.ConnConfig{Host: ip, User: "root", KeyPath: t.cfg.sshKey} + full := prefixed(name) + + switch mode { + case sandbox.EgressUnrestricted: + // Flush nftables. + t.ssh.ExecQuiet(ctx, cc, []string{"nft flush ruleset"}) + + // Remove egress files. + t.ssh.ExecQuiet(ctx, cc, []string{"rm -f /etc/pixels-egress-domains /etc/pixels-egress-cidrs /etc/nftables.conf /usr/local/bin/pixels-resolve-egress.sh /usr/local/bin/safe-apt"}) + + // Restore blanket sudoers. + if err := t.client.WriteContainerFile(ctx, full, "/etc/sudoers.d/pixel", []byte(egress.SudoersUnrestricted()), 0o440); err != nil { + return fmt.Errorf("writing unrestricted sudoers: %w", err) + } + // Remove restricted sudoers if present. + t.ssh.ExecQuiet(ctx, cc, []string{"rm -f /etc/sudoers.d/pixel.restricted"}) + + return nil + + case sandbox.EgressAgent, sandbox.EgressAllowlist: + egressName := string(mode) + domains := egress.ResolveDomains(egressName, t.cfg.allow) + + // Write domain list. + if err := t.client.WriteContainerFile(ctx, full, "/etc/pixels-egress-domains", []byte(egress.DomainsFileContent(domains)), 0o644); err != nil { + return fmt.Errorf("writing egress domains: %w", err) + } + + // Write CIDRs if any. + cidrs := egress.PresetCIDRs(egressName) + if len(cidrs) > 0 { + if err := t.client.WriteContainerFile(ctx, full, "/etc/pixels-egress-cidrs", []byte(egress.CIDRsFileContent(cidrs)), 0o644); err != nil { + return fmt.Errorf("writing egress cidrs: %w", err) + } + } + + // Write nftables config. + if err := t.client.WriteContainerFile(ctx, full, "/etc/nftables.conf", []byte(egress.NftablesConf()), 0o644); err != nil { + return fmt.Errorf("writing nftables.conf: %w", err) + } + + // Write resolve script. + if err := t.client.WriteContainerFile(ctx, full, "/usr/local/bin/pixels-resolve-egress.sh", []byte(egress.ResolveScript()), 0o755); err != nil { + return fmt.Errorf("writing resolve script: %w", err) + } + + // Write safe-apt wrapper. + if err := t.client.WriteContainerFile(ctx, full, "/usr/local/bin/safe-apt", []byte(egress.SafeAptScript()), 0o755); err != nil { + return fmt.Errorf("writing safe-apt: %w", err) + } + + // Write restricted sudoers. + if err := t.client.WriteContainerFile(ctx, full, "/etc/sudoers.d/pixel", []byte(egress.SudoersRestricted()), 0o440); err != nil { + return fmt.Errorf("writing restricted sudoers: %w", err) + } + + // Install nftables and resolve domains via SSH. + code, err := t.ssh.ExecQuiet(ctx, cc, []string{"DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::=--force-confold nftables >/dev/null 2>&1"}) + if err != nil { + return fmt.Errorf("installing nftables: %w", err) + } + if code != 0 { + return fmt.Errorf("installing nftables: exit code %d", code) + } + + code, err = t.ssh.ExecQuiet(ctx, cc, []string{"/usr/local/bin/pixels-resolve-egress.sh"}) + if err != nil { + return fmt.Errorf("resolving egress: %w", err) + } + if code != 0 { + return fmt.Errorf("resolving egress: exit code %d", code) + } + + return nil + + default: + return fmt.Errorf("unknown egress mode %q", mode) + } +} + +// AllowDomain adds a domain to the egress allowlist and re-resolves. +func (t *TrueNAS) AllowDomain(ctx context.Context, name, domain string) error { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return err + } + cc := ssh.ConnConfig{Host: ip, User: "root", KeyPath: t.cfg.sshKey} + full := prefixed(name) + + // Ensure egress infrastructure exists. + code, _ := t.ssh.ExecQuiet(ctx, cc, []string{"test -f /etc/pixels-egress-domains"}) + if code != 0 { + // No egress infra — set up allowlist mode first. + if err := t.SetEgressMode(ctx, name, sandbox.EgressAllowlist); err != nil { + return fmt.Errorf("setting up egress infra: %w", err) + } + } + + // Read current domains. + out, err := t.ssh.OutputQuiet(ctx, cc, []string{"cat /etc/pixels-egress-domains"}) + if err != nil { + return fmt.Errorf("reading domains: %w", err) + } + + domains := parseDomains(string(out)) + + // Check for duplicate. + for _, d := range domains { + if d == domain { + return nil // already allowed + } + } + + domains = append(domains, domain) + + // Write updated domains via API. + if err := t.client.WriteContainerFile(ctx, full, "/etc/pixels-egress-domains", []byte(egress.DomainsFileContent(domains)), 0o644); err != nil { + return fmt.Errorf("writing domains: %w", err) + } + + // Re-resolve. + t.ssh.ExecQuiet(ctx, cc, []string{"/usr/local/bin/pixels-resolve-egress.sh"}) + + return nil +} + +// DenyDomain removes a domain from the egress allowlist and re-resolves. +func (t *TrueNAS) DenyDomain(ctx context.Context, name, domain string) error { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return err + } + cc := ssh.ConnConfig{Host: ip, User: "root", KeyPath: t.cfg.sshKey} + full := prefixed(name) + + out, err := t.ssh.OutputQuiet(ctx, cc, []string{"cat /etc/pixels-egress-domains"}) + if err != nil { + return fmt.Errorf("reading domains: %w", err) + } + + domains := parseDomains(string(out)) + var filtered []string + found := false + for _, d := range domains { + if d == domain { + found = true + continue + } + filtered = append(filtered, d) + } + if !found { + return fmt.Errorf("domain %q not in allowlist", domain) + } + + if err := t.client.WriteContainerFile(ctx, full, "/etc/pixels-egress-domains", []byte(egress.DomainsFileContent(filtered)), 0o644); err != nil { + return fmt.Errorf("writing domains: %w", err) + } + + // Re-resolve. + t.ssh.ExecQuiet(ctx, cc, []string{"/usr/local/bin/pixels-resolve-egress.sh"}) + + return nil +} + +// GetPolicy returns the current egress policy for an instance. +func (t *TrueNAS) GetPolicy(ctx context.Context, name string) (*sandbox.Policy, error) { + ip, err := t.resolveRunningIP(ctx, name) + if err != nil { + return nil, err + } + cc := ssh.ConnConfig{Host: ip, User: "root", KeyPath: t.cfg.sshKey} + + code, _ := t.ssh.ExecQuiet(ctx, cc, []string{"test -f /etc/pixels-egress-domains"}) + if code != 0 { + return &sandbox.Policy{Mode: sandbox.EgressUnrestricted}, nil + } + + out, err := t.ssh.OutputQuiet(ctx, cc, []string{"cat /etc/pixels-egress-domains"}) + if err != nil { + return nil, fmt.Errorf("reading domains: %w", err) + } + + domains := parseDomains(string(out)) + return &sandbox.Policy{ + Mode: sandbox.EgressAllowlist, + Domains: domains, + }, nil +} + +// parseDomains splits newline-delimited domain content into a slice, +// trimming whitespace and skipping empty lines. +func parseDomains(content string) []string { + var domains []string + for _, line := range strings.Split(content, "\n") { + d := strings.TrimSpace(line) + if d != "" { + domains = append(domains, d) + } + } + return domains +} diff --git a/sandbox/truenas/network_test.go b/sandbox/truenas/network_test.go new file mode 100644 index 0000000..4935b74 --- /dev/null +++ b/sandbox/truenas/network_test.go @@ -0,0 +1,418 @@ +package truenas + +import ( + "context" + "io" + "io/fs" + "strings" + "testing" + "time" + + tnapi "github.com/deevus/truenas-go" + + "github.com/deevus/pixels/internal/cache" + "github.com/deevus/pixels/internal/ssh" + tnc "github.com/deevus/pixels/internal/truenas" + "github.com/deevus/pixels/sandbox" +) + +// mockSSH records SSH calls for test verification. +type mockSSH struct { + execCalls []mockSSHCall + outputCalls []mockSSHCall + waitCalls []string + + // Configurable responses. + execFn func(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) + outputFn func(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) +} + +type mockSSHCall struct { + Host string + User string + Cmd []string +} + +func (m *mockSSH) ExecQuiet(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) { + m.execCalls = append(m.execCalls, mockSSHCall{Host: cc.Host, User: cc.User, Cmd: cmd}) + if m.execFn != nil { + return m.execFn(ctx, cc, cmd) + } + return 0, nil +} + +func (m *mockSSH) OutputQuiet(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + m.outputCalls = append(m.outputCalls, mockSSHCall{Host: cc.Host, User: cc.User, Cmd: cmd}) + if m.outputFn != nil { + return m.outputFn(ctx, cc, cmd) + } + return nil, nil +} + +func (m *mockSSH) WaitReady(ctx context.Context, host string, timeout time.Duration, log io.Writer) error { + m.waitCalls = append(m.waitCalls, host) + return nil +} + +// writeCall records a WriteContainerFile call. +type writeCall struct { + name string + path string + content string + mode fs.FileMode +} + +func TestSetEgressModeUnrestricted(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + var writes []writeCall + mssh := &mockSSH{} + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Pool: "tank"}, nil + }, + }, + Filesystem: &tnapi.MockFilesystemService{ + WriteFileFunc: func(ctx context.Context, path string, params tnapi.WriteFileParams) error { + writes = append(writes, writeCall{ + path: path, + content: string(params.Content), + mode: params.Mode, + }) + return nil + }, + }, + }, mssh, testCfg()) + + if err := tn.SetEgressMode(context.Background(), "test", sandbox.EgressUnrestricted); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Should have flushed nftables and removed files via SSH. + if len(mssh.execCalls) < 2 { + t.Fatalf("expected >= 2 SSH exec calls, got %d", len(mssh.execCalls)) + } + + // First call: flush nftables. + if !strings.Contains(strings.Join(mssh.execCalls[0].Cmd, " "), "nft flush") { + t.Errorf("first SSH call should flush nftables, got %v", mssh.execCalls[0].Cmd) + } + + // Second call: rm egress files. + if !strings.Contains(strings.Join(mssh.execCalls[1].Cmd, " "), "rm -f") { + t.Errorf("second SSH call should remove files, got %v", mssh.execCalls[1].Cmd) + } + + // Should write unrestricted sudoers via API. + if len(writes) == 0 { + t.Fatal("expected write calls") + } + found := false + for _, w := range writes { + if strings.Contains(w.path, "sudoers") && strings.Contains(w.content, "NOPASSWD: ALL") { + found = true + } + } + if !found { + t.Error("should write unrestricted sudoers") + } +} + +func TestSetEgressModeAllowlist(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + var writes []writeCall + mssh := &mockSSH{} + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Pool: "tank"}, nil + }, + }, + Filesystem: &tnapi.MockFilesystemService{ + WriteFileFunc: func(ctx context.Context, path string, params tnapi.WriteFileParams) error { + writes = append(writes, writeCall{ + path: path, + content: string(params.Content), + mode: params.Mode, + }) + return nil + }, + }, + }, mssh, testCfg()) + + if err := tn.SetEgressMode(context.Background(), "test", sandbox.EgressAllowlist); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Verify files written via API. + writePaths := make(map[string]bool) + for _, w := range writes { + // Extract relative path from full rootfs path. + writePaths[w.path] = true + } + + // Should write nftables, resolve script, safe-apt, sudoers. + wantPaths := []string{"nftables.conf", "pixels-resolve-egress.sh", "safe-apt", "sudoers"} + for _, want := range wantPaths { + found := false + for p := range writePaths { + if strings.Contains(p, want) { + found = true + break + } + } + if !found { + t.Errorf("missing write for %q", want) + } + } + + // Should install nftables and resolve via SSH. + var hasInstall, hasResolve bool + for _, call := range mssh.execCalls { + cmd := strings.Join(call.Cmd, " ") + if strings.Contains(cmd, "apt-get install") && strings.Contains(cmd, "nftables") { + hasInstall = true + } + if strings.Contains(cmd, "pixels-resolve-egress.sh") { + hasResolve = true + } + } + if !hasInstall { + t.Error("should install nftables") + } + if !hasResolve { + t.Error("should run resolve script") + } +} + +func TestAllowDomain(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + var lastWritten string + mssh := &mockSSH{ + execFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) { + return 0, nil + }, + outputFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + return []byte("existing.com\n"), nil + }, + } + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Pool: "tank"}, nil + }, + }, + Filesystem: &tnapi.MockFilesystemService{ + WriteFileFunc: func(ctx context.Context, path string, params tnapi.WriteFileParams) error { + lastWritten = string(params.Content) + return nil + }, + }, + }, mssh, testCfg()) + + if err := tn.AllowDomain(context.Background(), "test", "new.example.com"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(lastWritten, "existing.com") { + t.Error("should preserve existing domains") + } + if !strings.Contains(lastWritten, "new.example.com") { + t.Error("should append new domain") + } +} + +func TestAllowDomainDuplicate(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + mssh := &mockSSH{ + execFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) { + return 0, nil + }, + outputFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + return []byte("example.com\n"), nil + }, + } + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Pool: "tank"}, nil + }, + }, + Filesystem: &tnapi.MockFilesystemService{ + WriteFileFunc: func(ctx context.Context, path string, params tnapi.WriteFileParams) error { + t.Error("should not write when domain already exists") + return nil + }, + }, + }, mssh, testCfg()) + + if err := tn.AllowDomain(context.Background(), "test", "example.com"); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestDenyDomain(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + var lastWritten string + mssh := &mockSSH{ + outputFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + return []byte("keep.com\nremove.com\nalso-keep.com\n"), nil + }, + } + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Pool: "tank"}, nil + }, + }, + Filesystem: &tnapi.MockFilesystemService{ + WriteFileFunc: func(ctx context.Context, path string, params tnapi.WriteFileParams) error { + lastWritten = string(params.Content) + return nil + }, + }, + }, mssh, testCfg()) + + if err := tn.DenyDomain(context.Background(), "test", "remove.com"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if strings.Contains(lastWritten, "remove.com") { + t.Error("should remove domain") + } + if !strings.Contains(lastWritten, "keep.com") { + t.Error("should keep other domains") + } + if !strings.Contains(lastWritten, "also-keep.com") { + t.Error("should keep other domains") + } +} + +func TestDenyDomainNotFound(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + mssh := &mockSSH{ + outputFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + return []byte("other.com\n"), nil + }, + } + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetGlobalConfigFunc: func(ctx context.Context) (*tnapi.VirtGlobalConfig, error) { + return &tnapi.VirtGlobalConfig{Pool: "tank"}, nil + }, + }, + Filesystem: &tnapi.MockFilesystemService{}, + }, mssh, testCfg()) + + err := tn.DenyDomain(context.Background(), "test", "missing.com") + if err == nil { + t.Fatal("expected error for missing domain") + } + if !strings.Contains(err.Error(), "not in allowlist") { + t.Errorf("error = %q", err.Error()) + } +} + +func TestGetPolicy(t *testing.T) { + t.Run("unrestricted", func(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + mssh := &mockSSH{ + execFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) { + // test -f returns 1 (file not found). + return 1, nil + }, + } + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{}, + }, mssh, testCfg()) + + policy, err := tn.GetPolicy(context.Background(), "test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if policy.Mode != sandbox.EgressUnrestricted { + t.Errorf("mode = %q, want unrestricted", policy.Mode) + } + }) + + t.Run("restricted", func(t *testing.T) { + cache.Put("test", &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}) + defer cache.Delete("test") + + mssh := &mockSSH{ + execFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) { + return 0, nil // file exists + }, + outputFn: func(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + return []byte("api.example.com\ncdn.example.com\n"), nil + }, + } + + tn, _ := NewForTest(&tnc.Client{ + Virt: &tnapi.MockVirtService{}, + }, mssh, testCfg()) + + policy, err := tn.GetPolicy(context.Background(), "test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if policy.Mode != sandbox.EgressAllowlist { + t.Errorf("mode = %q, want allowlist", policy.Mode) + } + if len(policy.Domains) != 2 { + t.Fatalf("domains = %v", policy.Domains) + } + if policy.Domains[0] != "api.example.com" { + t.Errorf("domains[0] = %q", policy.Domains[0]) + } + }) +} + +func TestParseDomains(t *testing.T) { + tests := []struct { + name string + input string + want []string + }{ + {"normal", "a.com\nb.com\n", []string{"a.com", "b.com"}}, + {"trailing whitespace", " a.com \n b.com \n", []string{"a.com", "b.com"}}, + {"empty lines", "a.com\n\nb.com\n\n", []string{"a.com", "b.com"}}, + {"empty string", "", nil}, + {"single domain", "example.com\n", []string{"example.com"}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := parseDomains(tt.input) + if len(got) != len(tt.want) { + t.Fatalf("got %v, want %v", got, tt.want) + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("got[%d] = %q, want %q", i, got[i], tt.want[i]) + } + } + }) + } +} diff --git a/sandbox/truenas/resolve.go b/sandbox/truenas/resolve.go new file mode 100644 index 0000000..a38d354 --- /dev/null +++ b/sandbox/truenas/resolve.go @@ -0,0 +1,61 @@ +package truenas + +import ( + "context" + "fmt" + "strings" + + tnapi "github.com/deevus/truenas-go" + + "github.com/deevus/pixels/internal/cache" +) + +const containerPrefix = "px-" + +// prefixed prepends the container prefix to a bare name. +func prefixed(name string) string { + return containerPrefix + name +} + +// unprefixed strips the container prefix from a full name. +func unprefixed(name string) string { + return strings.TrimPrefix(name, containerPrefix) +} + +// resolveRunningIP returns the IP of a running container, checking the local +// cache first (keyed by bare name) then falling back to the API. +func (t *TrueNAS) resolveRunningIP(ctx context.Context, name string) (string, error) { + full := prefixed(name) + + if cached := cache.Get(name); cached != nil && cached.IP != "" && cached.Status == "RUNNING" { + return cached.IP, nil + } + + instance, err := t.client.Virt.GetInstance(ctx, full) + if err != nil { + return "", fmt.Errorf("looking up %s: %w", name, err) + } + if instance == nil { + return "", fmt.Errorf("instance %q not found", name) + } + if instance.Status != "RUNNING" { + return "", fmt.Errorf("instance %q is %s — start it first", name, instance.Status) + } + + ip := ipFromAliases(instance.Aliases) + if ip == "" { + return "", fmt.Errorf("no IP address for %s", name) + } + cache.Put(name, &cache.Entry{IP: ip, Status: instance.Status}) + return ip, nil +} + +// ipFromAliases extracts the first IPv4 address from a VirtInstance's aliases. +func ipFromAliases(aliases []tnapi.VirtAlias) string { + for _, a := range aliases { + if a.Type == "INET" || a.Type == "ipv4" { + return a.Address + } + } + return "" +} diff --git a/sandbox/truenas/resolve_test.go b/sandbox/truenas/resolve_test.go new file mode 100644 index 0000000..6e746bd --- /dev/null +++ b/sandbox/truenas/resolve_test.go @@ -0,0 +1,200 @@ +package truenas + +import ( + "context" + "strings" + "testing" + + tnapi "github.com/deevus/truenas-go" + + "github.com/deevus/pixels/internal/cache" + tnc "github.com/deevus/pixels/internal/truenas" +) + +func TestResolveRunningIP(t *testing.T) { + tests := []struct { + name string + cached *cache.Entry + instance *tnapi.VirtInstance + getErr error + wantIP string + wantErr string + }{ + { + name: "cache hit", + cached: &cache.Entry{IP: "10.0.0.5", Status: "RUNNING"}, + wantIP: "10.0.0.5", + }, + { + name: "cache miss, no IP cached", + cached: &cache.Entry{IP: "", Status: "RUNNING"}, + instance: &tnapi.VirtInstance{ + Name: "px-test", + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "10.0.0.10"}, + }, + }, + wantIP: "10.0.0.10", + }, + { + name: "cache miss, status not running", + cached: &cache.Entry{IP: "10.0.0.5", Status: "STOPPED"}, + instance: &tnapi.VirtInstance{ + Name: "px-test", + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "10.0.0.10"}, + }, + }, + wantIP: "10.0.0.10", + }, + { + name: "no cache, API lookup", + instance: &tnapi.VirtInstance{ + Name: "px-test", + Status: "RUNNING", + Aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "192.168.1.50"}, + }, + }, + wantIP: "192.168.1.50", + }, + { + name: "API error", + getErr: context.DeadlineExceeded, + wantErr: "looking up test", + }, + { + name: "instance not found", + wantErr: "not found", + }, + { + name: "instance not running", + instance: &tnapi.VirtInstance{ + Name: "px-test", + Status: "STOPPED", + }, + wantErr: "is STOPPED", + }, + { + name: "no IP address", + instance: &tnapi.VirtInstance{ + Name: "px-test", + Status: "RUNNING", + }, + wantErr: "no IP address", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Clean up cache. + cache.Delete("test") + if tt.cached != nil { + cache.Put("test", tt.cached) + } + defer cache.Delete("test") + + tn := &TrueNAS{ + client: &tnc.Client{ + Virt: &tnapi.MockVirtService{ + GetInstanceFunc: func(ctx context.Context, name string) (*tnapi.VirtInstance, error) { + if name != "px-test" { + t.Errorf("GetInstance called with %q, want px-test", name) + } + if tt.getErr != nil { + return nil, tt.getErr + } + return tt.instance, nil + }, + }, + }, + } + + ip, err := tn.resolveRunningIP(context.Background(), "test") + if tt.wantErr != "" { + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("error %q should contain %q", err.Error(), tt.wantErr) + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if ip != tt.wantIP { + t.Errorf("ip = %q, want %q", ip, tt.wantIP) + } + }) + } +} + +func TestIPFromAliases(t *testing.T) { + tests := []struct { + name string + aliases []tnapi.VirtAlias + want string + }{ + { + name: "INET type", + aliases: []tnapi.VirtAlias{ + {Type: "INET", Address: "10.0.0.1"}, + }, + want: "10.0.0.1", + }, + { + name: "ipv4 type", + aliases: []tnapi.VirtAlias{ + {Type: "ipv4", Address: "192.168.1.1"}, + }, + want: "192.168.1.1", + }, + { + name: "skips INET6", + aliases: []tnapi.VirtAlias{ + {Type: "INET6", Address: "fe80::1"}, + {Type: "INET", Address: "10.0.0.1"}, + }, + want: "10.0.0.1", + }, + { + name: "no aliases", + aliases: nil, + want: "", + }, + { + name: "only IPv6", + aliases: []tnapi.VirtAlias{ + {Type: "INET6", Address: "fe80::1"}, + }, + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ipFromAliases(tt.aliases) + if got != tt.want { + t.Errorf("ipFromAliases = %q, want %q", got, tt.want) + } + }) + } +} + +func TestPrefixed(t *testing.T) { + if got := prefixed("mybox"); got != "px-mybox" { + t.Errorf("prefixed(mybox) = %q", got) + } +} + +func TestUnprefixed(t *testing.T) { + if got := unprefixed("px-mybox"); got != "mybox" { + t.Errorf("unprefixed(px-mybox) = %q", got) + } + if got := unprefixed("mybox"); got != "mybox" { + t.Errorf("unprefixed(mybox) = %q", got) + } +} diff --git a/sandbox/truenas/ssh.go b/sandbox/truenas/ssh.go new file mode 100644 index 0000000..df0ce06 --- /dev/null +++ b/sandbox/truenas/ssh.go @@ -0,0 +1,32 @@ +package truenas + +import ( + "context" + "io" + "time" + + "github.com/deevus/pixels/internal/ssh" +) + +// sshRunner abstracts SSH operations for testability. The default +// implementation (realSSH) delegates to the ssh package functions. +type sshRunner interface { + ExecQuiet(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) + OutputQuiet(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) + WaitReady(ctx context.Context, host string, timeout time.Duration, log io.Writer) error +} + +// realSSH is the production sshRunner that delegates to the ssh package. +type realSSH struct{} + +func (realSSH) ExecQuiet(ctx context.Context, cc ssh.ConnConfig, cmd []string) (int, error) { + return ssh.ExecQuiet(ctx, cc, cmd) +} + +func (realSSH) OutputQuiet(ctx context.Context, cc ssh.ConnConfig, cmd []string) ([]byte, error) { + return ssh.OutputQuiet(ctx, cc, cmd) +} + +func (realSSH) WaitReady(ctx context.Context, host string, timeout time.Duration, log io.Writer) error { + return ssh.WaitReady(ctx, host, timeout, log) +} diff --git a/sandbox/truenas/truenas.go b/sandbox/truenas/truenas.go new file mode 100644 index 0000000..2cd28d6 --- /dev/null +++ b/sandbox/truenas/truenas.go @@ -0,0 +1,74 @@ +// Package truenas implements the sandbox.Sandbox interface using TrueNAS +// Incus containers via the WebSocket API. +package truenas + +import ( + "context" + + "github.com/deevus/pixels/sandbox" + + tnc "github.com/deevus/pixels/internal/truenas" +) + +// Compile-time check that TrueNAS implements sandbox.Sandbox. +var _ sandbox.Sandbox = (*TrueNAS)(nil) + +func init() { + sandbox.Register("truenas", func(cfg map[string]string) (sandbox.Sandbox, error) { + return New(cfg) + }) +} + +// TrueNAS implements sandbox.Sandbox by wrapping internal/truenas.Client, +// internal/ssh, internal/cache, and internal/egress. +type TrueNAS struct { + client *tnc.Client + cfg *tnConfig + ssh sshRunner +} + +// New creates a TrueNAS sandbox backend from a flat config map. +func New(cfg map[string]string) (*TrueNAS, error) { + c, err := parseCfg(cfg) + if err != nil { + return nil, err + } + + client, err := tnc.Connect(context.Background(), c.toConfig()) + if err != nil { + return nil, err + } + + return &TrueNAS{ + client: client, + cfg: c, + ssh: realSSH{}, + }, nil +} + +// NewForTest creates a TrueNAS backend with injected dependencies for testing. +func NewForTest(client *tnc.Client, ssh sshRunner, cfg map[string]string) (*TrueNAS, error) { + c, err := parseCfg(cfg) + if err != nil { + return nil, err + } + return &TrueNAS{ + client: client, + cfg: c, + ssh: ssh, + }, nil +} + +// Capabilities advertises that TrueNAS supports all optional features. +func (t *TrueNAS) Capabilities() sandbox.Capabilities { + return sandbox.Capabilities{ + Snapshots: true, + CloneFrom: true, + EgressControl: true, + } +} + +// Close closes the underlying TrueNAS WebSocket connection. +func (t *TrueNAS) Close() error { + return t.client.Close() +}