~signal_processor/tfft

0667cab5cf1e0a0286dc92837f14c9da05050b69 — signal_Processor a month ago f9c60fc master
Update and fix depdendencies
5 files changed, 78 insertions(+), 67 deletions(-)

M Cargo.toml
M fft/Cargo.toml
M fft/src/lib.rs
M tui/Cargo.toml
M tui/src/main.rs
M Cargo.toml => Cargo.toml +1 -1
@@ 1,5 1,5 @@
[workspace]
members = ["fft", "tui"]
members = ["fft", "tui", "gui"]
resolver = "2"

[profile.release]

M fft/Cargo.toml => fft/Cargo.toml +8 -2
@@ 7,7 7,13 @@ edition = "2021"

[dependencies]
realfft = "3.3.0"
audio_backend = {git="https://git.sr.ht/~signal_processor/audio_backend"}
audio_backend = {git="https://git.sr.ht/~signal_processor/audio_backend", rev="e18bd11"}
num = "0.4.1"

blockfree-chan = {git = "https://git.sr.ht/~signal_processor/blockfree_chan"}
blockfree-chan = {git = "https://git.sr.ht/~signal_processor/blockfree_chan", rev="d6ccf48b7b38b7d6b29cb911cb9ab929426dfb19"}

[features]
# default = ["jack"]
# test_signal = ["dep:ds_p"]
# jack = ["audio_backend/jack"]
# pipewire = ["audio_backend/pipewire"]

M fft/src/lib.rs => fft/src/lib.rs +22 -15
@@ 1,11 1,11 @@
use std::sync::{RwLock, Arc};

use audio_backend::node_interface::{Metadata, Processor};
pub use audio_backend::node_interface::{audio_buffer::{Audio, AudioBufferIterator, Buffers, Buffer, Layout, ProcessSpec}, params::{EmptyParamMetadatas, EmptyParamVals}, IoMetadata, Processor};
use blockfree_chan::{BlockFreeTx, blockfree_channel};
use num::{Float, Signed, FromPrimitive};

pub use audio_backend::Backend;
pub use blockfree_chan::BlockFreeRx;
pub use audio_backend;
pub use blockfree_chan;
///Helper functions and their data for running and configuring several overlapping ffts

use realfft::num_complex::Complex;


@@ 64,7 64,7 @@ pub struct FftState<T: Float> {
    pub spec: FftSpec,
    //determines if the fft data has been read before.
    pub stale: bool,
    bins_tx: BlockFreeTx<Vec<T>>,
    pub bins_tx: BlockFreeTx<Vec<T>>,
    vec: Vec<T>,
}



@@ 95,10 95,13 @@ impl<T: Float+std::fmt::Debug+Send+Sync+Signed+FromPrimitive+'static> FftState<T
}

impl<T: Float+Send+Sync> Processor for FftState<T> {
    type ParamTag = ();
    type ParamVals = EmptyParamVals;
    type ParamMetadatas = EmptyParamMetadatas;
    type SampleType = T;

    fn process(&mut self, audio: &mut audio_backend::node_interface::Audio<Self::SampleType>) {
        for frame in audio.input_frames() {
    fn process(&mut self, audio: &mut audio_backend::node_interface::audio_buffer::Audio<Self::SampleType>) {
        for frame in audio.input().frames() {
            for samp in frame.take(1) {
                let buf_len = self.running_buf.len() as u32;
                self.running_buf[self.idx as usize] = *samp; 


@@ 140,27 143,31 @@ impl<T: Float+Send+Sync> Processor for FftState<T> {
        
    }

    fn params(&mut self) -> &mut dyn audio_backend::node_interface::Param {
        todo!()    
    fn param_vals(&self) -> std::sync::Arc<Self::ParamVals> {
        Arc::new(EmptyParamVals)
    }

    fn prepare(&mut self, spec: &audio_backend::node_interface::ProcessSpec) {
    fn param_metadata() -> &'static Self::ParamMetadatas {
        &EmptyParamMetadatas
    }

    fn prepare(&mut self, spec: &audio_backend::node_interface::audio_buffer::ProcessSpec) {
       self.reset(); 
    }

    fn metadata(&self) -> Metadata {
        Metadata{inputs: vec!["Input".into()], outputs: vec![], name: "FFT".into()}
    fn io_metadata(&self) -> IoMetadata {
        IoMetadata{inputs: vec!["Input".into()], outputs: vec![], name: "FFT".into()}
    }
}

pub fn run_fft(name: &str, backend: audio_backend::Backend, window: Windows, spec: FftSpec) -> BlockFreeRx<Vec<f32>> {
pub fn run_fft(name: &str, backend: audio_backend::Backend, window: Windows, spec: FftSpec) -> blockfree_chan::BlockFreeRx<Vec<f32>> {
    let (bins_tx, bins_rx) = blockfree_channel(&vec![0.0; spec.num_bins as usize/2]);

    let fft_node: FftState<f32> = FftState::new(spec, window, bins_tx);
    let mut meta = fft_node.metadata();
    meta.name = name.into();
    // let mut meta = fft_node.metadata();
    // meta.name = name.into();

    audio_backend::run(backend, &meta, Arc::new(RwLock::new(fft_node)));
    audio_backend::run(backend, Arc::new(RwLock::new(fft_node)));
    bins_rx
}


M tui/Cargo.toml => tui/Cargo.toml +6 -7
@@ 7,13 7,12 @@ edition = "2021"

[dependencies]
atomic_float = "0.1.0"
blockfree-chan = {git = "https://git.sr.ht/~signal_processor/blockfree_chan"}
# blockfree-chan = {git = "https://git.sr.ht/~signal_processor/blockfree_chan", rev="d6ccf48b7b38b7d6b29cb911cb9ab929426dfb19"}
bytemuck = "1.13.0"
fft = {path = "../fft"}
jack = {version = "0.10.0", optional = true }
simple-pipewire-filter = {git = "https://git.sr.ht/~signal_processor/simple-pipewire-filter", optional = true }
ds_p = {git="https://git.sr.ht/~signal_processor/ds_p", optional = true}
realfft = "3.0.0"
# jack = {version = "0.11.0", optional = true }
# simple-pipewire-filter = {git = "https://git.sr.ht/~signal_processor/simple-pipewire-filter", optional = true }
ds_p = {git="https://git.sr.ht/~signal_processor/ds_p", rev = "a0625e9d6d014b82ff25d566072cf48361c6cd8b", optional = true}
signal-hook = "0.3.14"
termion = "1.5.6"
numtoa = "0.2.4"


@@ 21,5 20,5 @@ numtoa = "0.2.4"
[features]
default = ["jack"]
test_signal = ["dep:ds_p"]
jack = ["dep:jack"]
pipewire = ["dep:simple-pipewire-filter"]
jack = []
pipewire = []

M tui/src/main.rs => tui/src/main.rs +41 -42
@@ 1,13 1,17 @@
use std::{sync::{Arc, atomic::{AtomicBool, Ordering::{Acquire, SeqCst}}}, thread::{spawn, sleep}, time::Duration};
use atomic_float::AtomicF32;

use realfft::num_complex::Complex;
use fft::{blockfree_chan::blockfree_channel, Audio, AudioBufferIterator, Buffer, Buffers, Layout, ProcessSpec, Processor};

use termion::{cursor::{Goto, HideCursor, Left}, color::AnsiValue, event::Key, raw::IntoRawMode, screen::AlternateScreen, input::TermRead, clear};

use blockfree_chan::blockfree_channel;
use fft::{FftSpec, FftState};

#[cfg(feature="jack")]
use fft::audio_backend::jack;

#[cfg(feature="pipewire")]
use fft::audio_backend::simple_pipewire_filter;

///Enum to store which test signal is to be used.
#[derive(Copy, Clone, Debug)]
enum SignalMode {


@@ 106,13 110,12 @@ struct AudioContext {
    processing: Arc<AtomicBool>,
    active: Arc<AtomicBool>,
    vec: Vec<f32>,
    fft: FftState,
    fft: FftState<f32>,
    sig: Option<TestSignalContext>,
    bins_tx: blockfree_chan::BlockFreeTx<Vec<f32>>,
}

///Given an input buffer, update the ffts, and prepare data for rendering.
fn process_callback(buf: &[f32], ctx: &mut AudioContext) {
fn process_callback(buf: &mut Audio<f32>, ctx: &mut AudioContext) {
    //lower the processing flag. If we quit in jack, this lets us not quit, which will segfault if the
    //audio thread is running.
    ctx.processing.store(true, SeqCst);


@@ 137,39 140,19 @@ fn process_callback(buf: &[f32], ctx: &mut AudioContext) {
    }

    //Pipewire are ok with throwing bad data at us. Cool!
    if buf.iter().any(|samp| samp.is_nan() || samp.is_infinite()) {return;}
    for samp in buf {
        //If we have a signal generator, generate samples, otherwise use the input sample.
        //Pass it to the fft
        //Decay the magnitude level 
        {
            let s = sig.as_mut().map(|s| s.gen_sample(samp_r, *samp)).unwrap_or(*samp);
            fft.process_sample(s);
            let m1 = (m*0.95).max(s.abs());
            m = m1*0.01+(*state*0.99);
            *state = m;
    if buf.input().frames().flatten().any(|samp| samp.is_nan() || samp.is_infinite()) {return;}
    for frame in buf.output().frames_mut() {
        let out = sig.as_mut().map(|s| s.gen_sample(samp_r, *frame.clone().nth(0).unwrap())).unwrap_or(0.0);
        let m1 = (m*0.95).max(out.abs());
        m = m1*0.01+(*state*0.99);
        *state = m;
        for samp in frame {
            *samp = out;
        }

        // If the fft has updated, we want to take our bins, get their magnitudes, and copy
        // them to a buffer that the UI can read
        if !fft.stale {
            let l = vec.len();
            //normalize the levels of each bin.
            fft.spectrum.iter().zip(vec.iter_mut()).for_each(|(x, y): (&Complex<f32>, &mut f32)| {
                let x1 = x.norm_sqr();
                *y = x1/l as f32;
            });
            //send the bins to the ui thread. We use this method so that we can avoid allocating a
            //new vec, and thus breaking our realtime requirements.
            ctx.bins_tx.write_by(vec, &mut |old: &mut Vec<f32>, new: &Vec<f32>| {
                old.copy_from_slice(new);
            });
            fft.stale = true;
        }
        //We also want to update the magnitude that the UI can see- it uses this to determine what
        //color the bins should be.
        ctx.mag.store(m, SeqCst);
    }
    fft.process(buf);
    ctx.mag.store(m, SeqCst);

    //lower the processing flag. If we quit in jack, this lets us not quit, which will segfault if the
    //audio thread is running.
    ctx.processing.store(false, SeqCst);


@@ 341,14 324,14 @@ fn main() {
    let active = Arc::new(AtomicBool::new(false));
    let mut ctx = {
        let overlap = ((FPS as u32*num_bins) as f32/SR as f32).ceil() as u32;
        let fft = FftState::new(FftSpec{num_bins, overlap}, fft::Windows::Hann);
        let fft = FftState::new(FftSpec{num_bins, overlap}, fft::Windows::Hann, bins_tx);
        let reset = Arc::new(AtomicBool::new(false));

        let sig = signal.map(|s| TestSignalContext::new(s));
        let state = 0.0;

        AudioContext{
            bins_tx, fft, mag: mag.clone(), reset: reset.clone(), sig, sr: sr.clone(), state, vec, processing: processing.clone(), active: active.clone(),
            fft, mag: mag.clone(), reset: reset.clone(), sig, sr: sr.clone(), state, vec, processing: processing.clone(), active: active.clone(),
        }
    };



@@ 375,12 358,23 @@ fn main() {
                                        .register_port("input", jack::AudioIn::default())
                                        .expect("Could not create input from JACK");
                        //Set up the audio callback for JACK.
                        let sr = s.load(SeqCst);
                        let len = (sr/FPS as f32).ceil() as usize;
                        let buf = vec![0.0;len];
                        let spec = ProcessSpec { samp_rate: sr as f64, max_frames: len as u32, max_input_channels: 1, max_output_channels: 0 };
                        let mut buf = Buffer::from_spec(&spec, Layout::Stacked, true);
                        let process_handler = {
                            jack::ClosureProcessHandler::new(
                                move |_client: &jack::Client, scope: &jack::ProcessScope| {
                                    if !quit.load(SeqCst) {
                                        let is = input.as_slice(scope);
                                        process_callback(is, &mut ctx);
                                        for (samp, input) in buf.chans_mut().flatten().zip(is.into_iter()) {
                                            *samp = *input;
                                        }
                                        let buf = Buffers::Single(&mut buf as _);
                                        let mut buf = Audio::new(buf, None);
                                        //copy is into buf, create buf
                                        process_callback(&mut buf, &mut ctx);
                                    }
                                    jack::Control::Continue
                            })


@@ 441,11 435,16 @@ fn main() {
            //If there is no backend, generate a frame's worth of samples and call the process
            //function once every frame;
            None => {
                let buf = vec![0.0;(s.load(SeqCst)/FPS as f32).ceil() as usize];
                let sr = s.load(SeqCst);
                let len = (sr/FPS as f32).ceil() as usize;
                let spec = ProcessSpec { samp_rate: sr as f64, max_frames: len as u32, max_input_channels: 1, max_output_channels: 0 };
                let mut buf = Buffer::from_spec(&spec, Layout::Stacked, true);
                a.store(true, SeqCst);
                while !quit.load(SeqCst) {
                    let start = std::time::Instant::now();
                    process_callback(&buf, &mut ctx);
                    let buf = Buffers::Single(&mut buf as _);
                    let mut buf = Audio::new(buf, None);
                    process_callback(&mut buf, &mut ctx);
                    std::thread::sleep(std::time::Duration::from_secs_f32(1.0/FPS as f32)-start.elapsed());
                }
            }