|
package main |
|
|
|
import ( |
|
"context" |
|
"crypto/rsa" |
|
"crypto/sha256" |
|
"crypto/x509" |
|
"encoding/base64" |
|
"encoding/json" |
|
"encoding/pem" |
|
"fmt" |
|
"os" |
|
"path/filepath" |
|
"strings" |
|
"time" |
|
|
|
"github.com/aws/aws-sdk-go/aws" |
|
"github.com/aws/aws-sdk-go/aws/awserr" |
|
"github.com/aws/aws-sdk-go/aws/session" |
|
"github.com/aws/aws-sdk-go/service/s3" |
|
"github.com/spf13/cobra" |
|
flag "github.com/spf13/pflag" |
|
|
|
corev1 "k8s.io/api/core/v1" |
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
|
"k8s.io/apimachinery/pkg/runtime" |
|
"k8s.io/apimachinery/pkg/runtime/schema" |
|
"k8s.io/client-go/dynamic" |
|
"k8s.io/client-go/kubernetes" |
|
"k8s.io/client-go/rest" |
|
"k8s.io/client-go/tools/clientcmd" |
|
) |
|
|
|
type RegenerateOptions struct { |
|
HostedClusterName string |
|
HostedClusterNamespace string |
|
Kubeconfig string |
|
WaitForNodes bool |
|
Timeout time.Duration |
|
Local bool |
|
} |
|
|
|
func newRegenerateCommand() *cobra.Command { |
|
opts := &RegenerateOptions{ |
|
HostedClusterNamespace: "clusters", |
|
WaitForNodes: true, |
|
Timeout: 10 * time.Minute, |
|
} |
|
|
|
cmd := &cobra.Command{ |
|
Use: "regenerate", |
|
Short: "Regenerates OIDC documents for a HostedCluster", |
|
Long: "Regenerates OIDC documents for a HostedCluster by creating the necessary documents in S3 and verifying node readiness", |
|
SilenceUsage: true, |
|
RunE: func(cmd *cobra.Command, args []string) error { |
|
return opts.Run(cmd.Context()) |
|
}, |
|
} |
|
|
|
opts.BindFlags(cmd.Flags()) |
|
_ = cmd.MarkFlagRequired("name") |
|
|
|
return cmd |
|
} |
|
|
|
func (o *RegenerateOptions) BindFlags(flags *flag.FlagSet) { |
|
flags.StringVar(&o.HostedClusterName, "name", o.HostedClusterName, "HostedCluster name (required)") |
|
flags.StringVar(&o.HostedClusterNamespace, "namespace", o.HostedClusterNamespace, "HostedCluster namespace") |
|
flags.StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to kubeconfig file") |
|
flags.BoolVar(&o.WaitForNodes, "wait-for-nodes", o.WaitForNodes, "Wait for nodes to become ready after OIDC regeneration") |
|
flags.DurationVar(&o.Timeout, "timeout", o.Timeout, "Timeout for the operation") |
|
flags.BoolVar(&o.Local, "local", o.Local, "Generate OIDC documents locally without uploading to S3") |
|
} |
|
|
|
func (o *RegenerateOptions) Run(ctx context.Context) error { |
|
fmt.Printf("Starting OIDC document regeneration for cluster: %s in namespace: %s\n", o.HostedClusterName, o.HostedClusterNamespace) |
|
|
|
// Load kubeconfig |
|
config, err := clientcmd.BuildConfigFromFlags("", o.Kubeconfig) |
|
if err != nil { |
|
return fmt.Errorf("failed to load kubeconfig: %w", err) |
|
} |
|
|
|
// Create Kubernetes client |
|
kubeClient, err := kubernetes.NewForConfig(config) |
|
if err != nil { |
|
return fmt.Errorf("failed to create kubernetes client: %w", err) |
|
} |
|
|
|
// Get HostedCluster |
|
hostedCluster, err := o.getHostedCluster(ctx, config) |
|
if err != nil { |
|
return fmt.Errorf("failed to get hosted cluster: %w", err) |
|
} |
|
|
|
fmt.Printf("Found HostedCluster: name=%s, infraID=%s, issuerURL=%s\n", |
|
hostedCluster.Name, hostedCluster.Spec.InfraID, hostedCluster.Spec.IssuerURL) |
|
|
|
if o.Local { |
|
// Local generation mode |
|
fmt.Println("Generating OIDC documents locally...") |
|
|
|
// Get service account signing key |
|
signingKey, err := o.getServiceAccountSigningKey(ctx, kubeClient, hostedCluster) |
|
if err != nil { |
|
return fmt.Errorf("failed to get service account signing key: %w", err) |
|
} |
|
|
|
// Generate OIDC documents locally |
|
if err := o.generateLocalOIDCDocuments(hostedCluster, signingKey); err != nil { |
|
return fmt.Errorf("failed to generate local OIDC documents: %w", err) |
|
} |
|
|
|
fmt.Println("Successfully generated OIDC documents locally") |
|
return nil |
|
} |
|
|
|
// Check if OIDC documents exist in S3 |
|
exists, err := o.checkOIDCDocumentsExist(hostedCluster) |
|
if err != nil { |
|
return fmt.Errorf("failed to check OIDC documents: %w", err) |
|
} |
|
|
|
if exists { |
|
fmt.Printf("OIDC documents already exist in S3 for infraID: %s\n", hostedCluster.Spec.InfraID) |
|
} else { |
|
fmt.Printf("OIDC documents not found in S3 for infraID: %s, regenerating...\n", hostedCluster.Spec.InfraID) |
|
|
|
// Get service account signing key |
|
signingKey, err := o.getServiceAccountSigningKey(ctx, kubeClient, hostedCluster) |
|
if err != nil { |
|
return fmt.Errorf("failed to get service account signing key: %w", err) |
|
} |
|
|
|
// Generate and upload OIDC documents |
|
if err := o.generateAndUploadOIDCDocuments(hostedCluster, signingKey); err != nil { |
|
return fmt.Errorf("failed to generate and upload OIDC documents: %w", err) |
|
} |
|
|
|
fmt.Println("Successfully regenerated OIDC documents") |
|
} |
|
|
|
// Wait for nodes to become ready if requested |
|
if o.WaitForNodes { |
|
fmt.Println("Waiting for nodes to become ready...") |
|
if err := o.waitForNodesReady(ctx, kubeClient, hostedCluster); err != nil { |
|
return fmt.Errorf("failed to wait for nodes ready: %w", err) |
|
} |
|
fmt.Println("All nodes are ready") |
|
} |
|
|
|
fmt.Println("OIDC regeneration completed successfully") |
|
return nil |
|
} |
|
|
|
func (o *RegenerateOptions) getHostedCluster(ctx context.Context, config *rest.Config) (*HostedCluster, error) { |
|
// Create a dynamic client to access the hosted cluster |
|
dynamicClient, err := dynamic.NewForConfig(config) |
|
if err != nil { |
|
return nil, fmt.Errorf("failed to create dynamic client: %w", err) |
|
} |
|
|
|
// Define the GVR for HostedCluster |
|
gvr := schema.GroupVersionResource{ |
|
Group: "hypershift.openshift.io", |
|
Version: "v1beta1", |
|
Resource: "hostedclusters", |
|
} |
|
|
|
// Get the hosted cluster |
|
obj, err := dynamicClient.Resource(gvr). |
|
Namespace(o.HostedClusterNamespace). |
|
Get(ctx, o.HostedClusterName, metav1.GetOptions{}) |
|
if err != nil { |
|
return nil, fmt.Errorf("failed to get hosted cluster %s/%s: %w", o.HostedClusterNamespace, o.HostedClusterName, err) |
|
} |
|
|
|
// Convert to HostedCluster |
|
hostedCluster := &HostedCluster{} |
|
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), hostedCluster) |
|
if err != nil { |
|
return nil, fmt.Errorf("failed to convert to HostedCluster: %w", err) |
|
} |
|
|
|
return hostedCluster, nil |
|
} |
|
|
|
func (o *RegenerateOptions) checkOIDCDocumentsExist(hostedCluster *HostedCluster) (bool, error) { |
|
// Parse issuer URL to get bucket and key |
|
issuerURL := hostedCluster.Spec.IssuerURL |
|
if !strings.HasPrefix(issuerURL, "https://") { |
|
return false, fmt.Errorf("invalid issuer URL format: %s", issuerURL) |
|
} |
|
|
|
// Extract bucket and region from issuer URL |
|
// Format: https://bucket.s3.region.amazonaws.com/infraID |
|
parts := strings.Split(strings.TrimPrefix(issuerURL, "https://"), "/") |
|
if len(parts) < 2 { |
|
return false, fmt.Errorf("invalid issuer URL format: %s", issuerURL) |
|
} |
|
|
|
bucketParts := strings.Split(parts[0], ".") |
|
if len(bucketParts) < 4 { |
|
return false, fmt.Errorf("invalid S3 URL format: %s", issuerURL) |
|
} |
|
|
|
bucketName := bucketParts[0] |
|
region := bucketParts[2] |
|
infraID := parts[1] |
|
|
|
// Create S3 client |
|
sess := session.Must(session.NewSession(&aws.Config{ |
|
Region: aws.String(region), |
|
})) |
|
s3Client := s3.New(sess) |
|
|
|
// Check if OIDC configuration document exists |
|
configKey := infraID + "/.well-known/openid-configuration" |
|
_, err := s3Client.HeadObject(&s3.HeadObjectInput{ |
|
Bucket: aws.String(bucketName), |
|
Key: aws.String(configKey), |
|
}) |
|
|
|
if err != nil { |
|
if aerr, ok := err.(awserr.Error); ok && (aerr.Code() == s3.ErrCodeNoSuchKey || aerr.Code() == "NotFound") { |
|
return false, nil |
|
} |
|
return false, fmt.Errorf("failed to check OIDC configuration: %w", err) |
|
} |
|
|
|
// Check if JWKS document exists |
|
jwksKey := infraID + "/openid/v1/jwks" |
|
_, err = s3Client.HeadObject(&s3.HeadObjectInput{ |
|
Bucket: aws.String(bucketName), |
|
Key: aws.String(jwksKey), |
|
}) |
|
|
|
if err != nil { |
|
if aerr, ok := err.(awserr.Error); ok && (aerr.Code() == s3.ErrCodeNoSuchKey || aerr.Code() == "NotFound") { |
|
return false, nil |
|
} |
|
return false, fmt.Errorf("failed to check OIDC JWKS: %w", err) |
|
} |
|
|
|
return true, nil |
|
} |
|
|
|
func (o *RegenerateOptions) getServiceAccountSigningKey(ctx context.Context, kubeClient *kubernetes.Clientset, hostedCluster *HostedCluster) ([]byte, error) { |
|
// Get the service account signing key secret |
|
secretName := "sa-signing-key" |
|
namespace := fmt.Sprintf("%s-%s", o.HostedClusterNamespace, hostedCluster.Name) |
|
|
|
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) |
|
if err != nil { |
|
return nil, fmt.Errorf("failed to get service account signing key secret: %w", err) |
|
} |
|
|
|
publicKeyBytes, exists := secret.Data["service-account.pub"] |
|
if !exists { |
|
return nil, fmt.Errorf("service account public key not found in secret") |
|
} |
|
|
|
return publicKeyBytes, nil |
|
} |
|
|
|
func (o *RegenerateOptions) generateAndUploadOIDCDocuments(hostedCluster *HostedCluster, publicKeyBytes []byte) error { |
|
// Parse issuer URL to get bucket and region |
|
issuerURL := hostedCluster.Spec.IssuerURL |
|
parts := strings.Split(strings.TrimPrefix(issuerURL, "https://"), "/") |
|
bucketParts := strings.Split(parts[0], ".") |
|
bucketName := bucketParts[0] |
|
region := bucketParts[2] |
|
infraID := parts[1] |
|
|
|
// Create S3 client |
|
sess := session.Must(session.NewSession(&aws.Config{ |
|
Region: aws.String(region), |
|
})) |
|
s3Client := s3.New(sess) |
|
|
|
// Generate OIDC documents |
|
configDoc, jwksDoc, err := o.generateOIDCDocuments(issuerURL, publicKeyBytes) |
|
if err != nil { |
|
return fmt.Errorf("failed to generate OIDC documents: %w", err) |
|
} |
|
|
|
// Upload configuration document |
|
configKey := infraID + "/.well-known/openid-configuration" |
|
_, err = s3Client.PutObject(&s3.PutObjectInput{ |
|
Bucket: aws.String(bucketName), |
|
Key: aws.String(configKey), |
|
Body: strings.NewReader(configDoc), |
|
ContentType: aws.String("application/json"), |
|
}) |
|
if err != nil { |
|
return fmt.Errorf("failed to upload OIDC configuration: %w", err) |
|
} |
|
|
|
// Upload JWKS document |
|
jwksKey := infraID + "/openid/v1/jwks" |
|
_, err = s3Client.PutObject(&s3.PutObjectInput{ |
|
Bucket: aws.String(bucketName), |
|
Key: aws.String(jwksKey), |
|
Body: strings.NewReader(jwksDoc), |
|
ContentType: aws.String("application/json"), |
|
}) |
|
if err != nil { |
|
return fmt.Errorf("failed to upload OIDC JWKS: %w", err) |
|
} |
|
|
|
fmt.Printf("Successfully uploaded OIDC documents to bucket: %s, infraID: %s\n", bucketName, infraID) |
|
return nil |
|
} |
|
|
|
func (o *RegenerateOptions) generateLocalOIDCDocuments(hostedCluster *HostedCluster, publicKeyBytes []byte) error { |
|
// Generate OIDC documents |
|
configDoc, jwksDoc, err := o.generateOIDCDocuments(hostedCluster.Spec.IssuerURL, publicKeyBytes) |
|
if err != nil { |
|
return fmt.Errorf("failed to generate OIDC documents: %w", err) |
|
} |
|
|
|
// Create workspace directory |
|
workspaceDir := "workspace" |
|
if err := os.MkdirAll(workspaceDir, 0755); err != nil { |
|
return fmt.Errorf("failed to create workspace directory: %w", err) |
|
} |
|
|
|
// Create cluster-specific directory inside workspace |
|
clusterDir := fmt.Sprintf("%s/%s", workspaceDir, hostedCluster.Spec.InfraID) |
|
if err := os.MkdirAll(clusterDir, 0755); err != nil { |
|
return fmt.Errorf("failed to create cluster directory: %w", err) |
|
} |
|
|
|
// Write configuration document |
|
configPath := fmt.Sprintf("%s/.well-known/openid-configuration", clusterDir) |
|
if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { |
|
return fmt.Errorf("failed to create config directory: %w", err) |
|
} |
|
if err := os.WriteFile(configPath, []byte(configDoc), 0644); err != nil { |
|
return fmt.Errorf("failed to write configuration document: %w", err) |
|
} |
|
|
|
// Write JWKS document |
|
jwksPath := fmt.Sprintf("%s/openid/v1/jwks", clusterDir) |
|
if err := os.MkdirAll(filepath.Dir(jwksPath), 0755); err != nil { |
|
return fmt.Errorf("failed to create jwks directory: %w", err) |
|
} |
|
if err := os.WriteFile(jwksPath, []byte(jwksDoc), 0644); err != nil { |
|
return fmt.Errorf("failed to write JWKS document: %w", err) |
|
} |
|
|
|
fmt.Printf("Successfully generated OIDC documents locally in workspace: %s\n", clusterDir) |
|
fmt.Printf("Configuration document: %s\n", configPath) |
|
fmt.Printf("JWKS document: %s\n", jwksPath) |
|
return nil |
|
} |
|
|
|
func (o *RegenerateOptions) generateOIDCDocuments(issuerURL string, publicKeyBytes []byte) (string, string, error) { |
|
// Parse the public key |
|
block, _ := pem.Decode(publicKeyBytes) |
|
if block == nil { |
|
return "", "", fmt.Errorf("failed to decode PEM block") |
|
} |
|
|
|
pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) |
|
if err != nil { |
|
return "", "", fmt.Errorf("failed to parse public key: %w", err) |
|
} |
|
|
|
rsaPubKey, ok := pubKey.(*rsa.PublicKey) |
|
if !ok { |
|
return "", "", fmt.Errorf("public key is not RSA") |
|
} |
|
|
|
// Generate key ID (kid) |
|
kid := o.generateKeyID(publicKeyBytes) |
|
|
|
// Generate JWKS |
|
jwks := map[string]interface{}{ |
|
"keys": []map[string]interface{}{ |
|
{ |
|
"use": "sig", |
|
"kty": "RSA", |
|
"kid": kid, |
|
"alg": "RS256", |
|
"n": base64.RawURLEncoding.EncodeToString(rsaPubKey.N.Bytes()), |
|
"e": base64.RawURLEncoding.EncodeToString([]byte{byte(rsaPubKey.E)}), |
|
}, |
|
}, |
|
} |
|
|
|
jwksBytes, err := json.MarshalIndent(jwks, "", " ") |
|
if err != nil { |
|
return "", "", fmt.Errorf("failed to marshal JWKS: %w", err) |
|
} |
|
|
|
// Generate configuration document |
|
config := map[string]interface{}{ |
|
"issuer": issuerURL, |
|
"jwks_uri": issuerURL + "/openid/v1/jwks", |
|
"response_types_supported": []string{"id_token"}, |
|
"subject_types_supported": []string{"public"}, |
|
"id_token_signing_alg_values_supported": []string{"RS256"}, |
|
} |
|
|
|
configBytes, err := json.MarshalIndent(config, "", " ") |
|
if err != nil { |
|
return "", "", fmt.Errorf("failed to marshal configuration: %w", err) |
|
} |
|
|
|
return string(configBytes), string(jwksBytes), nil |
|
} |
|
|
|
func (o *RegenerateOptions) generateKeyID(publicKeyBytes []byte) string { |
|
hash := sha256.Sum256(publicKeyBytes) |
|
return base64.StdEncoding.EncodeToString(hash[:]) |
|
} |
|
|
|
func (o *RegenerateOptions) waitForNodesReady(ctx context.Context, kubeClient *kubernetes.Clientset, hostedCluster *HostedCluster) error { |
|
timeout := time.After(o.Timeout) |
|
ticker := time.NewTicker(30 * time.Second) |
|
defer ticker.Stop() |
|
|
|
for { |
|
select { |
|
case <-timeout: |
|
return fmt.Errorf("timeout waiting for nodes to become ready") |
|
case <-ticker.C: |
|
// Check node status |
|
nodes, err := kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) |
|
if err != nil { |
|
fmt.Printf("Failed to list nodes: %v\n", err) |
|
continue |
|
} |
|
|
|
readyNodes := 0 |
|
totalNodes := len(nodes.Items) |
|
|
|
for _, node := range nodes.Items { |
|
for _, condition := range node.Status.Conditions { |
|
if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { |
|
readyNodes++ |
|
break |
|
} |
|
} |
|
} |
|
|
|
fmt.Printf("Node status: %d/%d ready\n", readyNodes, totalNodes) |
|
|
|
if readyNodes == totalNodes && totalNodes > 0 { |
|
return nil |
|
} |
|
} |
|
} |
|
} |
|
|
|
// HostedCluster represents a simplified version of the HostedCluster resource |
|
type HostedCluster struct { |
|
metav1.TypeMeta `json:",inline"` |
|
metav1.ObjectMeta `json:"metadata,omitempty"` |
|
Spec HostedClusterSpec `json:"spec,omitempty"` |
|
} |
|
|
|
type HostedClusterSpec struct { |
|
InfraID string `json:"infraID,omitempty"` |
|
IssuerURL string `json:"issuerURL,omitempty"` |
|
} |