Skip to content

Instantly share code, notes, and snippets.

@twobob
Created March 9, 2026 00:36
Show Gist options
  • Select an option

  • Save twobob/08b97c9600ec27a5c38d827d1e84432e to your computer and use it in GitHub Desktop.

Select an option

Save twobob/08b97c9600ec27a5c38d827d1e84432e to your computer and use it in GitHub Desktop.
place an input.mid in the same directory
import numpy as np
import mido
from scipy.io import wavfile
import os
class RissetExpertSynth:
def __init__(self, sample_rate=44100):
self.fs = sample_rate
# Specific partial weights to mimic the 'hollow' Csound timbre
# Skipping 2, 3, and 4 to create the authentic spectral gaps
self.partial_weights = {1: 1.0, 5: 0.8, 6: 0.7, 7: 0.6, 8: 0.7, 9: 0.8, 10: 1.0}
self.detune_hz = 0.05 # The 'beating' frequency for the shimmer
def generate_risset_expert(self, freq, duration, scan_speed=-1.0):
"""
Generates an expert-level Risset cascade with detuned oscillator banks.
"""
samples = int(self.fs * duration)
if samples <= 0: return np.array([])
t = np.linspace(0, duration, samples, endpoint=False)
output = np.zeros_like(t)
# Logarithmic spectral trajectory
max_p = max(self.partial_weights.keys())
start_mu = np.log(max_p) if scan_speed < 0 else 0
end_mu = 0 if scan_speed < 0 else np.log(max_p)
# The 'centre' of the spectral window moves over the note duration
mus = np.linspace(start_mu, end_mu, len(t))
window_width = 0.5
for n, weight in self.partial_weights.items():
# 1. Calculate time-varying amplitude for this partial
# Using a tighter window for a more pronounced 'arpeggio' effect
amp = weight * np.exp(-((np.log(n) - mus)**2) / (2 * window_width**2))
# 2. The Detuned Bank: Three oscillators create the phasing shimmer
f_n = freq * n
# Central frequency plus two slightly offset 'sidebands'
# These sidebands create the interference patterns
osc_bank = (
np.sin(2 * np.pi * f_n * t) +
0.5 * np.sin(2 * np.pi * (f_n + self.detune_hz) * t) +
0.5 * np.sin(2 * np.pi * (f_n - self.detune_hz) * t)
)
# 3. Apply smoothing to prevent clicking on note transitions
fade = np.clip(t / 0.02, 0, 1) * np.clip((duration - t) / 0.05, 0, 1)
output += (amp * fade) * osc_bank
return output
def process_midi_to_risset(midi_path, output_path):
if not os.path.exists(midi_path):
print(f"Error: {midi_path} not found.")
return
synth = RissetExpertSynth()
mid = mido.MidiFile(midi_path)
# Pre-allocate buffer based on MIDI file length
full_buffer = np.zeros(int(44100 * (mid.length + 1.0)))
active_notes = {}
current_time_sec = 0
note_count = 0
print(f"Synthesising '{midi_path}' using Risset's harmonic bank model...")
for msg in mid:
current_time_sec += msg.time
if msg.type == 'note_on' and msg.velocity > 0:
active_notes[msg.note] = (current_time_sec, msg.velocity)
elif (msg.type == 'note_off') or (msg.type == 'note_on' and msg.velocity == 0):
if msg.note in active_notes:
start_t, velocity = active_notes.pop(msg.note)
duration = current_time_sec - start_t
if duration > 0.05:
freq = 440.0 * (2.0**((msg.note - 69) / 12.0))
# Render the cascade and scale by velocity
wave = synth.generate_risset_expert(freq, duration) * (velocity / 127.0)
start_idx = int(start_t * 44100)
end_idx = start_idx + len(wave)
if end_idx <= len(full_buffer):
full_buffer[start_idx:end_idx] += wave
note_count += 1
if note_count == 0:
print("Process complete: No notes were rendered.")
return
# Normalise with soft-clipping to maintain the vintage digital characteristic
if np.max(np.abs(full_buffer)) > 0:
full_buffer = np.tanh(full_buffer / np.max(np.abs(full_buffer)))
# Final export to 16-bit PCM WAV
wavfile.write(output_path, 44100, (full_buffer * 32767).astype(np.int16))
print(f"Success: {note_count} notes rendered to '{output_path}'.")
if __name__ == "__main__":
process_midi_to_risset('input.mid', 'risset_expert_cascade.wav')
@twobob
Copy link
Author

twobob commented Mar 12, 2026

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment