Notes
![]() ![]() Notes - notes.io |
import matplotlib.pyplot as plt
from numpy.fft import rfft, rfftfreq, irfft
import librosa.display
import librosa
import plotly.graph_objects as go
import streamlit_vertical_slider as svs
import streamlit as st
import pandas as pd
import altair as alt
import soundfile as sf
import time
import sounddevice as sd
from scipy.fft import fft, fftfreq
import plotly.express as px
from IPython.display import Audio
def to_librosa(file_uploaded):
if file_uploaded is not None:
y, sr = librosa.load(file_uploaded)
return y, sr
def generate_slider(arr_names, arr_values, n=1):
slider_values = []
col = st.columns(len(arr_names))
for i in range(len(arr_names)):
with col[i]:
tuple = arr_values[i]
slider = svs.vertical_slider(
key=arr_names[i], min_value=tuple[0], max_value=tuple[1], default_value=tuple[2], step=n)
slider_values.append(slider)
st.write(arr_names[i])
return slider_values
def fourier_transform(audio_file, sample_rate):
number_samples = len(audio_file)
T = 1 / sample_rate
magnitude = rfft(audio_file)
frequency = rfftfreq(number_samples, T)
return magnitude, frequency
def modifiy_general_signal(name, magnitude_freq, frequency_freq_domain, sliders_value, ranges):
for i in range(len(sliders_value)):
if sliders_value[i] == None:
sliders_value[i] = 1
for i in range(len(sliders_value)):
counter = 0
for value in frequency_freq_domain:
if value > ranges[name[i]][0] and value < ranges[name[i]][1]:
magnitude_freq[counter] = magnitude_freq[counter] * np.abs(sliders_value[i])
counter += 1
return magnitude_freq
def modifiy_medical_signal(Ecg_file, sliders_value):
fig1 = go.Figure()
fig1.update_xaxes(
title_text="frequency",
title_font={"size": 20},
title_standoff=25)
fig1.update_yaxes(
title_text="Amplitude(mv)",
title_font={"size": 20},
title_standoff=25)
for i in range(len(sliders_value)):
if sliders_value[i] == None:
sliders_value[i] = 1
time = Ecg_file.iloc[:, 0]
magnitude = Ecg_file.iloc[:, 1]
sample_period = time[1] - time[0]
n_samples = len(time)
fourier = rfft(magnitude)
frequencies = rfftfreq(n_samples, sample_period)
counter = 0
for value in frequencies:
if value > 130:
fourier[counter] *= (sliders_value[0])
if value < 130 and value > 80:
fourier[counter] *= (sliders_value[1])
if value < 80:
fourier[counter] *= (sliders_value[2])
counter += 1
time_domain_amplitude = np.real(irfft(fourier))
fig_sig = fig1.add_scatter(x=time, y=time_domain_amplitude)
st.plotly_chart(fig_sig, use_container_width=True)
return time_domain_amplitude
def modifiy_Pitch_signal(sample, sample_rate, sliders_value):
if sliders_value == None:
sliders_value = 5
Pitched_amplitude = librosa.effects.pitch_shift(
y=sample, sr=sample_rate, n_steps=sliders_value)
return Pitched_amplitude
def processing(mode, names, values_slider, magnitude_at_time, sample_rate, show_spec, ranges, pitch_step):
if mode == 'Frequency' or mode == 'Vowels' or mode == 'Music Instrument':
col1, col2 = st.columns(2)
col3, col4 = st.columns(2)
slider = generate_slider(names, values_slider)
magnitude_freq_domain, frequency_freq_domain = fourier_transform(
magnitude_at_time, sample_rate)
magnitude_after_modifiy = modifiy_general_signal(
names, magnitude_freq_domain, frequency_freq_domain, slider, ranges)
magnitude_time_after_inverse = Inverse_fourier_transform(
magnitude_after_modifiy)
elif mode == 'Pitch Shift':
magnitude_time_after_inverse = modifiy_Pitch_signal(
magnitude_at_time, sample_rate, pitch_step)
audio_after_show(magnitude_time_after_inverse, sample_rate)
with col1:
show_plot(magnitude_at_time, magnitude_time_after_inverse, sample_rate)
if show_spec == 1:
with col3:
st.pyplot(spectogram(
magnitude_at_time, "Before"))
with col4:
st.pyplot(spectogram(
magnitude_time_after_inverse, "After"))
def audio_after_show(magnitude_time_after_inverse, sample_rate):
st.sidebar.write("## Audio after")
sf.write("output.wav", magnitude_time_after_inverse, sample_rate)
st.sidebar.audio("output.wav")
def Inverse_fourier_transform(magnitude_freq_domain):
magnitude_time_domain = np.fft.irfft(magnitude_freq_domain)
return np.real(magnitude_time_domain)
def plot_animation(df):
brush = alt.selection_interval()
chart1 = alt.Chart(df).mark_line().encode(
x=alt.X('time', axis=alt.Axis(title='Time')),
).properties(
width=400,
height=200
).add_selection(
brush).interactive()
figure = chart1.encode(
y=alt.Y('amplitude', axis=alt.Axis(title='Amplitude'))) | chart1.encode(
y=alt.Y('amplitude after processing', axis=alt.Axis(title='Amplitude after'))).add_selection(
brush)
return figure
def currentState(df, size, num_of_element):
if st.session_state.size1 == 0:
step_df = df.iloc[0:num_of_element]
if st.session_state.flag == 0:
step_df = df.iloc[st.session_state.i: st.session_state.size1 - 1]
lines = plot_animation(step_df)
line_plot = st.altair_chart(lines)
line_plot = line_plot.altair_chart(lines)
return line_plot
def plotRep(df, size, start, num_of_element, line_plot):
for i in range(start, num_of_element - size):
st.session_state.start = i
st.session_state.startSize = i - 1
step_df = df.iloc[i:size + i]
st.session_state.size1 = size + i
lines = plot_animation(step_df)
line_plot.altair_chart(lines)
time.sleep(.1)
if st.session_state.size1 == num_of_element - 1:
st.session_state.flag = 1
step_df = df.iloc[0:num_of_element]
lines = plot_animation(step_df)
line_plot.altair_chart(lines)
def show_plot(samples, samples_after_modification, sampling_rate):
time_before = np.array(range(0, len(samples))) / (sampling_rate)
time_after = np.array(range(0, len(samples))) / (sampling_rate)
df_afterUpload = pd.DataFrame({'time': time_before[::500], 'amplitude': samples[::500]}, columns=['time', 'amplitude'])
df_afterInverse = pd.DataFrame({'time_after': time_after[::500], 'amplitude after processing': samples_after_modification[::500]}, columns=['time_after', 'amplitude after processing'])
common_df = df_afterUpload.merge(df_afterInverse, left_on='time', right_on='time_after')
common_df.pop("time_after")
num_of_element = common_df.shape[0]
burst = 10
size = burst
line_plot = currentState(common_df, size, num_of_element)
plotRep(common_df, size, st.session_state.start, num_of_element, line_plot)
def spectogram(y, title_of_graph):
D = librosa.stft(y)
S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)
ax.set(title=title_of_graph)
fig.colorbar(img, ax=ax, format="%+2.f dB")
return plt.gcf()
# Define Equalizer Bands and Their Initial Settings
equalizer_bands = {
'Bass': [20, 200, 0], # Frequency range: 20Hz - 200Hz, initial gain: 0 dB
'Midrange': [200, 2000, 0], # Frequency range: 200Hz - 2kHz, initial gain: 0 dB
'Treble': [2000, 20000, 0] # Frequency range: 2kHz - 20kHz, initial gain: 0 dB
}
# Modify Audio Based on Equalizer Settings
def apply_equalizer(audio, sample_rate, equalizer_settings):
magnitude_freq_domain, frequency_freq_domain = fourier_transform(audio, sample_rate)
for band, settings in equalizer_settings.items():
low_freq, high_freq, gain_db = settings
for i, freq in enumerate(frequency_freq_domain):
if low_freq <= freq <= high_freq:
magnitude_freq_domain[i] *= 10**(gain_db / 20.0)
return Inverse_fourier_transform(magnitude_freq_domain)
# Modify the main_processing function
def main_processing(file_uploaded, mode, equalizer_settings, show_spec, names, values_slider, ranges):
samples, sample_rate = to_librosa(file_uploaded)
if mode == 'Equalizer':
modified_audio = apply_equalizer(samples, sample_rate, equalizer_settings)
audio_after_show(modified_audio, sample_rate)
if show_spec == 1:
spectogram(samples, "Before")
spectogram(modified_audio, "After")
if mode == 'Frequency':
dictionary_values = {"0:1000": [0, 1000, 0],
"1000:2000": [1000, 2000, 0],
"2000:3000": [2000, 3000, 0],
"3000:4000": [3000, 4000, 0],
"4000:5000": [4000, 5000, 0]}
values_slider = [[0, 10, 1]] * len(list(dictionary_values.keys()))
names = list(dictionary_values.keys())
ranges = dictionary_values
elif mode == 'Vowels':
dictionary_values = {"h": [1900, 5000, 0],
"R": [1500, 3000, 0],
"O": [500, 2000, 0],
"Y": [490, 2800, 0]}
values_slider = [[0, 10, 1]] * len(list(dictionary_values.keys()))
names = list(dictionary_values.keys())
ranges = dictionary_values
elif mode == 'Music Instrument':
dictionary_values = {"Drum ": [0, 500, 0],
"Flute": [500, 1000, 0],
"Key": [1000, 2000, 0],
"Piano": [2000, 5000, 0]}
values_slider = [[0, 10, 1]] * len(list(dictionary_values.keys()))
names = list(dictionary_values.keys())
ranges = dictionary_values
elif mode == 'Pitch Shift':
dictnoary_values = {"Pitch Step": [0, 0]}
values_slider = [[-12, 12, 1]]
pitch_step = st.slider(label="Pitch Shift", max_value=12, min_value=-12, step=1, value=5)
magnitude_freq_domain, frequency_freq_domain = fourier_transform(samples, sample_rate)
magnitude_after_modifiy = modifiy_general_signal(
names, magnitude_freq_domain, frequency_freq_domain, values_slider, ranges)
magnitude_time_after_inverse = Inverse_fourier_transform(magnitude_after_modifiy)
audio_after_show(magnitude_time_after_inverse, sample_rate)
if show_spec == 1:
spectogram(samples, "Before")
spectogram(magnitude_time_after_inverse, "After")
# Save Original Audio Function
def save_original_audio(samples, sample_rate, output_filename):
sf.write(output_filename, samples, sample_rate)
# Modify and Save Modified Audio Function
def save_modified_audio(magnitude_time_after_inverse, sample_rate, output_filename):
sf.write(output_filename, magnitude_time_after_inverse, sample_rate)
if __name__ == "__main__":
file_uploaded = "/home/admin1/Downloads/File 12_mixdown.wav"
mode = "Equalizer"
show_spec = 1
equalizer_settings = equalizer_bands
# Define default values for names, values_slider, and ranges
names = []
values_slider = []
ranges = {}
if mode == 'Frequency':
names = list(dictionary_values.keys())
ranges = dictionary_values
values_slider = [[0, 10, 1]] * len(list(dictionary_values.keys()))
elif mode == 'Vowels':
names = list(dictionary_values.keys())
ranges = dictionary_values
values_slider = [[0, 10, 1]] * len(list(dictionary_values.keys()))
elif mode == 'Music Instrument':
names = list(dictionary_values.keys())
ranges = dictionary_values
values_slider = [[0, 10, 1]] * len(list(dictionary_values.keys()))
elif mode == 'Pitch Shift':
names = ["Pitch Step"]
values_slider = [[-12, 12, 1]]
samples, sample_rate = to_librosa(file_uploaded)
# Save the original audio
save_original_audio(samples, sample_rate, "/home/admin1/Downloads/original_audio.wav")
if mode == 'Equalizer':
modified_audio = apply_equalizer(samples, sample_rate, equalizer_settings)
# Save the modified audio
save_modified_audio(modified_audio, sample_rate, "/home/admin1/Downloads/modified_audio.wav")
main_processing(file_uploaded, mode, equalizer_settings, show_spec, names, values_slider, ranges)
![]() |
Notes is a web-based application for online taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000+ notes created and continuing...
With notes.io;
- * You can take a note from anywhere and any device with internet connection.
- * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
- * You can quickly share your contents without website, blog and e-mail.
- * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
- * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.
Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.
Easy: Notes.io doesn’t require installation. Just write and share note!
Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )
Free: Notes.io works for 14 years and has been free since the day it was started.
You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;
Email: [email protected]
Twitter: http://twitter.com/notesio
Instagram: http://instagram.com/notes.io
Facebook: http://facebook.com/notesio
Regards;
Notes.io Team