Status: Draft
Author: @bdougie
Date: 2026-02-15
Add pluggable storage providers to Tapes, enabling session sync to cloud backends for team sharing. The SaaS option (tapes.dev) serves as an on-ramp to Paper Compute's broader agent infrastructure platform.
Tapes is local-first by design—sessions live in ~/.tapes/tapes.db. This is great for privacy and offline use, but creates friction for teams:
- No sync: Sessions are isolated per developer machine
- Manual sharing: Export/import is tedious for regular collaboration
- No team visibility: Managers can't see aggregate usage
A cloud storage provider solves this while preserving local-first as the default.
Business context:
- Tapes SaaS is not the core business—it's a funnel
- Users who adopt team sharing → discover value of agent observability
- Natural upgrade path → Paper Compute's broader agent infrastructure platform
- Free tier keeps friction low; paid tiers unlock team features
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
│ tapes │ │ proxy │ │ storage │
│ CLI │────▶│ worker │────▶│ driver │
└──────────────┘ │ pool │ │ (sqlite) │
└──────────────┘ └──────────────┘
│
~/.tapes/tapes.db
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
│ tapes │ │ proxy │ │ storage │
│ CLI │────▶│ worker │────▶│ driver │
└──────────────┘ │ pool │ │ (sqlite) │
└──────────────┘ └──────┬───────┘
│
┌───────────────────────────┼───────────────────────────┐
│ │ │
┌──────▼──────┐ ┌───────▼───────┐ ┌───────▼───────┐
│ local │ │ cloud │ │ hybrid │
│ (sqlite) │ │ (tapes.dev) │ │ (local+sync) │
└─────────────┘ └───────────────┘ └───────┬───────┘
│ │
┌──────▼──────┐ ┌──────▼──────┐
│ tapes.dev │ │ sqlite │
│ backend │ │ + │
└──────┬──────┘ │ tapes.dev │
│ └─────────────┘
┌──────▼──────┐
│ Paper │
│ Compute │
│ Platform │
└─────────────┘
Following the pattern from memory.Driver (PR #86):
// pkg/storage/provider.go
package storage
import (
"context"
"github.com/papercomputeco/tapes/pkg/merkle"
)
// Provider defines the interface for pluggable storage backends.
// Implementations handle where and how session data is persisted.
type Provider interface {
// Put stores a node. Returns true if newly inserted.
Put(ctx context.Context, node *merkle.Node) (bool, error)
// Get retrieves a node by hash.
Get(ctx context.Context, hash string) (*merkle.Node, error)
// Has checks if a node exists.
Has(ctx context.Context, hash string) (bool, error)
// Ancestry returns the path from a node to its root.
Ancestry(ctx context.Context, hash string) ([]*merkle.Node, error)
// Leaves returns all leaf nodes (session heads).
Leaves(ctx context.Context) ([]*merkle.Node, error)
// Close releases resources.
Close() error
}
// SyncProvider extends Provider with sync capabilities for hybrid storage.
type SyncProvider interface {
Provider
// Sync pushes local changes to remote.
Sync(ctx context.Context) error
// Pull fetches remote changes to local.
Pull(ctx context.Context) error
// SyncStatus returns sync state.
SyncStatus(ctx context.Context) (*SyncStatus, error)
}
type SyncStatus struct {
LastSyncAt time.Time `json:"last_sync_at"`
PendingPush int `json:"pending_push"`
PendingPull int `json:"pending_pull"`
RemoteVersion string `json:"remote_version"`
}
// TeamProvider extends Provider with team-aware operations.
type TeamProvider interface {
Provider
// SharedSessions returns sessions shared with the current user.
SharedSessions(ctx context.Context, teamID string) ([]*SharedSession, error)
// ShareSession shares a session with a team.
ShareSession(ctx context.Context, sessionID, teamID string, opts ShareOptions) error
// UnshareSession removes a session from sharing.
UnshareSession(ctx context.Context, sessionID, teamID string) error
}// pkg/storage/local/local.go
type LocalProvider struct {
client *ent.Client
dbPath string
}
func NewLocalProvider(dbPath string) (*LocalProvider, error) {
// Existing SQLite implementation
// ~/.tapes/tapes.db
}Config:
[storage]
provider = "local" # default
path = "~/.tapes/tapes.db"// pkg/storage/cloud/cloud.go
type CloudProvider struct {
apiURL string
apiKey string
teamID string
client *http.Client
}
func NewCloudProvider(config CloudConfig) (*CloudProvider, error) {
// Pure cloud storage - no local persistence
// All operations go to tapes.dev API
}
func (p *CloudProvider) Put(ctx context.Context, node *merkle.Node) (bool, error) {
// POST /api/v1/nodes
return p.postNode(ctx, node)
}
func (p *CloudProvider) SharedSessions(ctx context.Context, teamID string) ([]*SharedSession, error) {
// GET /api/v1/teams/:teamID/sessions
return p.fetchSharedSessions(ctx, teamID)
}Config:
[storage]
provider = "cloud"
api_url = "https://api.tapes.dev"
api_key = "tapes_..."
[storage.cloud]
team_id = "team_abc123"
auto_share = true # automatically share to team// pkg/storage/hybrid/hybrid.go
type HybridProvider struct {
local *LocalProvider
remote *CloudProvider
syncMu sync.Mutex
}
func NewHybridProvider(localPath string, cloudConfig CloudConfig) (*HybridProvider, error) {
// Local-first with background sync to cloud
}
func (p *HybridProvider) Put(ctx context.Context, node *merkle.Node) (bool, error) {
// Write to local first (fast, always works)
inserted, err := p.local.Put(ctx, node)
if err != nil {
return false, err
}
// Queue for async sync to remote
if inserted {
p.queueSync(node.Hash)
}
return inserted, nil
}
func (p *HybridProvider) Sync(ctx context.Context) error {
p.syncMu.Lock()
defer p.syncMu.Unlock()
// Push pending local changes
pending, err := p.getPendingSync(ctx)
if err != nil {
return err
}
for _, hash := range pending {
node, err := p.local.Get(ctx, hash)
if err != nil {
continue
}
if _, err := p.remote.Put(ctx, node); err != nil {
return fmt.Errorf("sync failed for %s: %w", hash, err)
}
p.markSynced(hash)
}
return nil
}Config:
[storage]
provider = "hybrid"
path = "~/.tapes/tapes.db"
[storage.cloud]
api_url = "https://api.tapes.dev"
api_key = "tapes_..."
team_id = "team_abc123"
sync_interval = "5m" # background sync every 5 minutes
sync_on_close = true # sync when CLI exits# Login to tapes.dev
tapes auth login
# Opens browser for OAuth flow
# Check storage status
tapes storage status
# Provider: hybrid
# Local: ~/.tapes/tapes.db (245 sessions)
# Remote: tapes.dev (230 synced, 15 pending)
# Last sync: 2 minutes ago
# Force sync
tapes storage sync
# Switch provider
tapes storage set-provider hybrid
# Configure cloud
tapes storage configure --api-key tapes_abc123 --team team_xyzPOST /auth/github
POST /auth/token/refresh
POST /api/v1/nodes Create/sync node
GET /api/v1/nodes/:hash Get node
GET /api/v1/sessions List sessions (with pagination)
GET /api/v1/sessions/:id Get session detail
POST /api/v1/teams Create team
GET /api/v1/teams List user's teams
GET /api/v1/teams/:id Get team
POST /api/v1/teams/:id/members Add member
DELETE /api/v1/teams/:id/members/:uid Remove member
POST /api/v1/teams/:id/invite Create invite
POST /api/v1/sessions/:id/share Share session
DELETE /api/v1/sessions/:id/share Unshare session
GET /api/v1/shared Get sessions shared with me
POST /api/v1/sync/push Push nodes to remote
GET /api/v1/sync/pull Pull nodes from remote
GET /api/v1/sync/status Get sync status
-- Users (from OAuth)
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
github_id BIGINT UNIQUE,
email TEXT UNIQUE NOT NULL,
name TEXT,
avatar_url TEXT,
created_at TIMESTAMPTZ DEFAULT now()
);
-- Teams
CREATE TABLE teams (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name TEXT NOT NULL,
slug TEXT UNIQUE NOT NULL,
owner_id UUID NOT NULL REFERENCES users(id),
tier TEXT DEFAULT 'free', -- 'free', 'pro', 'enterprise'
created_at TIMESTAMPTZ DEFAULT now()
);
-- Team members
CREATE TABLE team_members (
team_id UUID REFERENCES teams(id) ON DELETE CASCADE,
user_id UUID REFERENCES users(id) ON DELETE CASCADE,
role TEXT DEFAULT 'member',
joined_at TIMESTAMPTZ DEFAULT now(),
PRIMARY KEY (team_id, user_id)
);
-- Nodes (content-addressed, deduplicated)
CREATE TABLE nodes (
hash TEXT PRIMARY KEY,
parent_hash TEXT REFERENCES nodes(hash),
user_id UUID NOT NULL REFERENCES users(id),
bucket JSONB NOT NULL,
-- Denormalized fields for querying
model TEXT,
provider TEXT,
agent_name TEXT,
project TEXT,
role TEXT,
-- Token usage
prompt_tokens INT,
completion_tokens INT,
total_tokens INT,
-- Timestamps
created_at TIMESTAMPTZ DEFAULT now(),
synced_at TIMESTAMPTZ DEFAULT now()
);
-- Shared sessions
CREATE TABLE shared_sessions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
session_hash TEXT NOT NULL, -- leaf node hash
team_id UUID NOT NULL REFERENCES teams(id) ON DELETE CASCADE,
shared_by UUID NOT NULL REFERENCES users(id),
title TEXT,
note TEXT,
visibility TEXT DEFAULT 'team',
shared_at TIMESTAMPTZ DEFAULT now(),
expires_at TIMESTAMPTZ,
UNIQUE(session_hash, team_id)
);
-- Indexes
CREATE INDEX idx_nodes_user ON nodes(user_id);
CREATE INDEX idx_nodes_parent ON nodes(parent_hash);
CREATE INDEX idx_nodes_project ON nodes(project);
CREATE INDEX idx_nodes_created ON nodes(created_at);
CREATE INDEX idx_shared_team ON shared_sessions(team_id);| Tier | Price | Sessions | Teams | Members | Features |
|---|---|---|---|---|---|
| Free | $0 | 100/mo | 1 | 3 | Basic sync, 30-day retention |
| Pro | $10/mo | Unlimited | 3 | 10 | Full history, team dashboard |
| Enterprise | Custom | Unlimited | Unlimited | Unlimited | SSO, audit logs, SLA |
Upgrade path:
- Free users who hit limits → upgrade to Pro
- Pro teams needing orchestration → discover Paper Compute platform
- Enterprise → full Paper Compute platform
Tapes SaaS is designed to integrate with Paper Compute's broader agent infrastructure platform (details TBA).
# Replay a session in an isolated sandbox
tapes replay abc123 --sandbox
# Use cases:
# - A/B test models: replay same session with different models
# - Debug failures: replay with verbose logging
# - Compliance audit: prove deterministic behaviorThe team dashboard will provide hooks into orchestration features:
- Replay sessions with different configurations
- Spin up agent sandboxes from session context
- Run comparative benchmarks
- Export session patterns as reusable templates
Generated skills can be packaged as reusable agent configurations:
# Skill exported as agent template
name: debug-react-hooks
system_prompt: |
When debugging React hook issues, follow this systematic approach:
1. Identify the problematic hook
2. Check dependency arrays...
tools:
- read_file
- grep_search
- edit_file
tapes:
enabled: trueFor regulated industries:
- All agent actions logged with cryptographic verification
- Merkle DAG proofs for immutable audit trails
- Session replay for incident investigation
- Air-gapped mode: local Tapes only, no cloud sync
Free Tier (Tapes)
│ 100 sessions/month
│ Basic sharing
▼
Pro Tier ($10/mo)
│ Unlimited sessions
│ Team dashboard
│ Skills generation
▼
Team Tier ($100/mo)
│ Advanced orchestration
│ Custom templates
│ Full telemetry + SSO
▼
Enterprise (Custom pricing)
│ Air-gapped deployment
│ Compliance features
│ Dedicated support
// pkg/storage/cloud/orchestration.go
// OrchestrationClient provides hooks for future platform integration
type OrchestrationClient interface {
// ReplaySession replays a session in an isolated environment
ReplaySession(ctx context.Context, sessionID string, opts ReplayOptions) (*ReplayResult, error)
// CreateTemplateFromSkill packages a skill as a reusable template
CreateTemplateFromSkill(ctx context.Context, skill *Skill) (*Template, error)
// GetTeamStatus returns orchestration status for team
GetTeamStatus(ctx context.Context, teamID string) (*TeamStatus, error)
}- Define
Provider,SyncProvider,TeamProviderinterfaces - Refactor existing SQLite driver to implement
Provider - Provider factory with config-based selection
-
tapes storage statuscommand
- tapes.dev API (auth, nodes, sync endpoints)
- Cloud provider implementation
-
tapes auth loginOAuth flow - API key management
- Local + remote sync logic
- Background sync worker
- Conflict resolution (content-addressed = no conflicts)
-
tapes storage synccommand
- Team CRUD API
- Sharing endpoints
- Team dashboard API
- Deck integration for shared sessions
- Platform integration points
- Agent sandbox spawn from session
- Cross-model replay
- Sync granularity: Full session DAG or individual nodes?
- Conflict handling: Should we support multi-writer to same session?
- Offline mode: How long to queue without sync?
- Data residency: EU/US region selection for compliance?
- Encryption: E2E encryption for session content?
- API keys scoped per-team
- OAuth tokens refreshed automatically
- Session content encrypted at rest
- Option to exclude message content (metadata-only sync)
- Audit log for shared session access