Skip to content

Instantly share code, notes, and snippets.

@pjcdawkins
Created November 13, 2025 23:38
Show Gist options
  • Select an option

  • Save pjcdawkins/6f63fad7eea19c3d698b2740aaf21959 to your computer and use it in GitHub Desktop.

Select an option

Save pjcdawkins/6f63fad7eea19c3d698b2740aaf21959 to your computer and use it in GitHub Desktop.
package main
import (
"context"
"fmt"
"os"
"runtime"
"time"
"github.com/maximhq/bifrost/core"
"github.com/maximhq/bifrost/core/schemas"
)
// Simple account implementation for testing.
type testAccount struct{}
func (a *testAccount) GetConfiguredProviders() ([]schemas.ModelProvider, error) {
return []schemas.ModelProvider{schemas.OpenAI}, nil
}
func (a *testAccount) GetKeysForProvider(
ctx *context.Context, providerKey schemas.ModelProvider,
) ([]schemas.Key, error) {
if providerKey != schemas.OpenAI {
return nil, fmt.Errorf("only OpenAI supported")
}
apiKey := os.Getenv("OPENAI_API_KEY")
if apiKey == "" {
return nil, fmt.Errorf("OPENAI_API_KEY not set")
}
return []schemas.Key{
{
ID: "test-key",
Value: apiKey,
Weight: 1.0,
},
}, nil
}
func (a *testAccount) GetConfigForProvider(_ schemas.ModelProvider) (*schemas.ProviderConfig, error) {
config := &schemas.ProviderConfig{
NetworkConfig: schemas.DefaultNetworkConfig,
ConcurrencyAndBufferSize: schemas.DefaultConcurrencyAndBufferSize,
}
config.CheckAndSetDefaults()
return config, nil
}
func main() {
beforeGoroutines := runtime.NumGoroutine()
fmt.Printf("Goroutines before: %d\n", beforeGoroutines)
// Create Bifrost with a cancellable context
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
bifrostClient, err := bifrost.Init(ctx, schemas.BifrostConfig{
Account: &testAccount{},
})
if err != nil {
panic(err)
}
afterInit := runtime.NumGoroutine()
fmt.Printf("Goroutines after Init: %d (created: %d)\n", afterInit, afterInit-beforeGoroutines)
// Make a streaming request that will timeout
fmt.Println("Making streaming request (will timeout in 5s)...")
stream, bifrostErr := bifrostClient.ChatCompletionStreamRequest(ctx, &schemas.BifrostChatRequest{
Provider: schemas.OpenAI,
Model: "gpt-4.1-mini",
Input: []schemas.ChatMessage{
{
Role: schemas.ChatMessageRoleUser,
Content: &schemas.ChatMessageContent{
ContentStr: schemas.Ptr("Hello, tell me a long story"),
},
},
},
})
if bifrostErr != nil {
fmt.Printf("Error (expected): %v\n", bifrostErr.Error.Message)
} else {
// Try to consume stream (will timeout)
fmt.Println("Consuming stream...")
for range stream {
// Just drain
}
}
// Wait for context to timeout
<-ctx.Done()
fmt.Println("Context cancelled/timed out")
// Give workers time to "clean up"
fmt.Println("Waiting 2 seconds for cleanup...")
time.Sleep(2 * time.Second)
afterGoroutines := runtime.NumGoroutine()
leaked := afterGoroutines - beforeGoroutines
fmt.Printf("\nResults:\n")
fmt.Printf(" Goroutines before: %d\n", beforeGoroutines)
fmt.Printf(" Goroutines after: %d\n", afterGoroutines)
fmt.Printf(" Leaked: %d\n", leaked)
if leaked > 2 { // Allow some tolerance for runtime goroutines
fmt.Printf("\n❌ LEAK DETECTED: %d goroutines leaked\n", leaked)
os.Exit(1)
} else {
fmt.Printf("\n✓ No significant leak detected\n")
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment