Since the beginning of the semester, I knew I wanted to work with audio-reactivity. Initially using the Processing Library called Minim, I found resources to be difficult to understand and implement considerin the time I was able to allocate to working on this. As such I turned to the processing website and decided to switch to using Processing Sound, which is Processing’s native sound library. This came with the added bonus of reference and example code directly in Processing itself. I began by trying to understand the code’s logic by making small changes and observing what responds.
The project itself contains an image rasterizer that changes the background image’s pixel count as you move your mouse around the screeen. In the meantime, music can be heard playing and several layers of audio reactive “streams” and shapes sit on top of the images actively borrowing colour from the image as they are moved around. The bezier, is mapped to the mouse position and will move as the mouse is moved. Using FFT(Fast Fourier Transform) data, the amplitutde and shape of the bezier pulses to the music. On top of the bezier is two waveform visualizations that show the soundwave at 256 samples.
These visualizations are not tied into any particular audio. In fact, the audio itself can manipulated using mouse and keyboard. When the mouse is left clicked, the sample speeds up, and when right clicked it slows down. The visuals will reflect these audio changes. Additionally, there is a bandpass and highpass filter mapped to the screen, so upon pressing the space key, moving the mouse around the screen also changes the frequencies that are playing.
import processing.sound.*;
PImage img;
SoundFile sample;
BandPass bandPass;
LowPass lowPass;
FFT fft;
Waveform waveform;
// Number of waveform samples
int samples = 256;
int bands = 512; // FFT bands
int doubleCup = bands / 4;
float smoothingFactor = 0.4; // FFT smoothing factor (1: highest, 0.1: lowest)
float playbackRate = 1.0;
void setup() {
size(1000, 800);
img = loadImage("SLO.jpg");
img.resize(1000, 800);
sample = new SoundFile(this, "please remix.mp3");
sample.loop();
fft = new FFT(this, bands);
fft.input(sample);
waveform = new Waveform(this, samples);
waveform.input(sample);
bandPass = new BandPass(this);
bandPass.process(sample);
lowPass = new LowPass(this);
lowPass.process(sample);
}
void draw() {
background(11, 88);
noFill();
filter();
render();
blendMode(DIFFERENCE);
bezel();
waveTube();
}
void filter() {
if (keyPressed) {
float ratio = float (height)/float (width);
float freq = map(mouseX, 0, width, 250, 10000); // Frequency range: 250 Hz to 10 kHz
float bw = map(mouseY, 0, height, 10, 5000); // Bandwidth range: 10 Hz to 5 kHz
float res = map(ratio, 0, ratio, 0.1, 0.4);
bandPass.freq(freq);
bandPass.bw(bw);
bandPass.res(res);
float lowpassFreq = map(mouseX, 0, height / 2, 250, 10000);
lowPass.freq(lowpassFreq);
lowPass.res(res);
}
}
void render() {
float tilesX = map(mouseX, 0, width, 10, 100); // Number of horizontal tiles
float tileSize = height / tilesX;
for (int y = 0; y < img.height; y += int(tileSize)) {
for (int x = 0; x < img.width; x += int(tileSize)) {
color c = img.get(x, y);
float b = map(brightness(c), 50, 200, 1, 0.5);
pushMatrix();
translate(x, y);
fill(c);
rect(0, 0, b * tileSize, b * tileSize);
popMatrix();
}
}
}
void bezel() {
fft.analyze();
for (int i = 0; i < doubleCup; i++) {
float bandValue = fft.spectrum[i] * (height * 290);
stroke(255, 255, 255, i % 2 == 0 ? 68 : 55);
bezier(
mouseX - i * 10, height / 2,
width / 2 - 100, height / 2 - bandValue,
width / 2 + 100 + 55, mouseY + bandValue + 55,
width / 4 + i * 10, height / 1
);
}
}
void waveTube() {
waveform.analyze();
// Perform waveform analysis and FFT analysis
waveform.analyze();
for (int j = 0; j < samples -1; j++) { //offset waveform
float x3 = map(j, 0, samples, 0, width);
float y3 = map(waveform.data[j], -1, 1, height / 2 + 450 , height / 2 - 400);
float x4 = map(j , 0, samples, 0, width);
float y4 = map(waveform.data[j], -1, 1, height / 2 + 450 , height / 2 - 400);
// Color based on amplitude of waveform
float amplitude = abs(waveform.data[j]);
int alphaValue = (int) map(amplitude, 0, 0.5, 85, 200); // Adjust transparency
strokeWeight(4);
stroke(lerpColor(color(255, 235, 250), color(255, 235, 250), amplitude), alphaValue); // Ghost trail effect
line(x3, y3, x4, y4);
}
for (int i = 0; i < samples - 1; i++) {
float x1 = map(i, 0, samples, 0, width);
float y1 = map(waveform.data[i], -1, 1, random(height / 2 - 400, height / 4 -400), height / 2 + 400);
float x2 = map(i * 0.2, 0, samples, 0, width);
float y2 = map(waveform.data[i], -1, 1, random(height / 2 - 400, height / 4 -400), height / 2 + 400);
// Color based on amplitude of waveform
float amplitude = abs(waveform.data[i]);
int alphaValue = (int) map(amplitude, 0, 0.5, 75, 220); // Adjust transparency
strokeWeight(5);
stroke(lerpColor(color(120, 45, 170), color(130, 15, 185), amplitude), alphaValue); // Ghost trail effect
line(x1, y1, x2, y2);
}
}
void mousePressed() {
if (mouseButton == LEFT) {
playbackRate += 0.1;
} else if (mouseButton == RIGHT) {
playbackRate = max(playbackRate - 0.1, 0.1);
}
sample.rate(playbackRate);
}