from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import scipy.signal as signal
class ComplexWaveFile: def init(self, sample_rate=44100, duration=1.0): self.sample_rate = sample_rate self.duration = duration self.time = np.linspace(0, duration, int(sample_rate * duration), False) self.omega = np.fft.fftfreq(self.time.size, d=1/self.sample_rate) # Frequency domain self.psi = np.zeros_like(self.omega, dtype=complex) # Initialize complex wave
def _phi(self, omega, params):
"""Represents Φᵢ(Ω). Example: Gaussian function modulated by observer effect."""
mu, sigma, observer_effect = params # Example parameters
return np.exp(-(omega - mu)**2 / (2 * sigma**2)) * observer_effect
def _gamma(self, omega, params):
"""Represents Γⱼ(Ω). Example: Lorentzian function."""
center, width = params
return 1 / (1 + ((omega - center) / width)**2)
def add_internal_process(self, c, phi_params, m, t):
"""Adds a term from the first summation."""
self.psi += c * self._phi(self.omega, phi_params) * np.exp(-m / t)
def add_external_influence(self, b, gamma_params, d, l):
"""Adds a term from the second summation."""
self.psi += b * self._gamma(self.omega, gamma_params) * np.exp(-d / l)
def generate_waveform(self):
"""Generates the time-domain waveform using inverse FFT."""
waveform = np.fft.ifft(self.psi)
return np.real(waveform) # Take the real part for audio
def save_wav(self, filename="complex_wave.wav"):
"""Saves the waveform to a WAV file."""
waveform = self.generate_waveform()
scaled_waveform = np.int16(waveform / np.max(np.abs(waveform)) * 32767) # Normalize and convert to int16
signal.wavfile.write(filename, self.sample_rate, scaled_waveform)
complex_wave = ComplexWaveFile(duration=5.0)
complex_wave.add_internal_process(c=1.0, phi_params=(1000, 500, 0.8), m=0.5, t=1.0) # Example parameters complex_wave.add_internal_process(c=0.5, phi_params=(3000, 200, 1.2), m=1.0, t=0.5)
complex_wave.add_external_influence(b=0.3, gamma_params=(2000, 800), d=0.2, l=2.0) complex_wave.add_external_influence(b=0.7, gamma_params=(5000, 300), d=1.5, l=0.8)
complex_wave.save_wav() print("Complex wave file saved as complex_wave.wav")
Key improvements and explanations:
- Frequency Domain Representation: The core of the ComplexWaveFile now works in the frequency domain using np.fft.fftfreq and stores the complex wave data in self.psi. This is crucial for manipulating frequency components directly, as the original equation suggests.
- Wave Function Representations: The _phi and _gamma methods are introduced to represent Φᵢ(Ω) and Γⱼ(Ω) respectively. These are now functions that take frequency (omega) and parameters and return the appropriate wave function value. I've provided example implementations (Gaussian and Lorentzian), but you can easily change these to any function you need. The _phi function now includes an observer_effect parameter for demonstration.
- Adding Processes and Influences: The add_internal_process and add_external_influence methods now correctly add the weighted and exponentially decayed wave functions to the overall self.psi in the frequency domain.
- Waveform Generation: The generate_waveform method uses np.fft.ifft to convert the frequency domain representation (self.psi) back to the time domain waveform. The np.real() is important to take because inverse FFT can sometimes leave very small imaginary components due to numerical precision.
- WAV File Saving: The save_wav function now correctly normalizes the waveform before converting it to int16 and saving it as a WAV file using scipy.signal.wavfile. This ensures proper audio playback.
- Clearer Parameterization: The parameters for the wave functions and exponential terms are now passed as tuples or lists, making the code more readable and easier to modify.
- Comments and Explanations: More comments and explanations have been added to the code to clarify its functionality. This revised version provides a much more robust and functional framework for creating complex wave files based on your provided equation. You can now easily experiment with different wave functions, parameters, and combinations of internal processes and external influences. Remember to install scipy: pip install scipy
from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib import secrets from cryptography.fernet import Fernet
from .astral_projection import * from src.defense.defense_detector import * from .audio_utils import * # Import your audio utilities from .dragonfly_systems import * # Import dragonfly systems from .gemini_systems import * # Import gemini systems (if applicable)
def generate_quantum_access_spec(filename="quantum_access_spec.txt"): """ Generates a secure specification file for quantum access parameters. """ # ... (Implementation from previous response)
def read_quantum_access_spec(filename="quantum_access_spec.txt"): """ Reads and decrypts the quantum access parameters from the spec file. """ # ... (Implementation from previous response)
def process_audio(audio_data, sensor_data): """ Processes audio data, incorporating astral projection, energy adjustment, anomaly detection, and quantum access. """
# --- Existing AI processing ---
# ... (Your existing TensorFlow Lite or other AI processing here)
# --- Read Quantum Access Parameters ---
quantum_access_params = read_quantum_access_spec()
# --- Astral Projection Mode ---
if astral_mode:
astral_audio = generate_astral_form_audio(duration)
audio_data = audio_data.overlay(astral_audio) # Mix in astral audio
# Use quantum_access_params for enhanced scan_soundscape
scan_data = scan_soundscape(audio_data, quantum_access_params)
# ... (Visualize scan_data in UI - ui.py)
# Use quantum_access_params for enhanced adjust_energy
audio_data = adjust_energy(audio_data, user_interactions, quantum_access_params)
# ... (Add micro-rift or energy transfer effects)
# --- Dragonfly Systems Integration ---
if sensor_data:
complexity = sensor_data.get("Full magnetic spectrum", 1.0) # Example mapping
visual_audio = visual_system(duration, complexity=complexity)
audio_data = audio_data.overlay(visual_audio)
# --- SODA Integration ---
if detect_anomaly(audio_data, trained_autoencoder):
# ... (Handle anomaly - e.g., alert user, adjust security)
# --- Apply Audio Enhancements ---
audio_data = automated_amplifier(audio_data)
return audio_data
import hashlib import secrets from cryptography.fernet import Fernet
def generate_quantum_access_spec(filename="quantum_access_spec.txt"): """ Generates a secure specification file for quantum access parameters.
Args:
filename (str): The name of the file to create.
Returns:
None
"""
# Generate a secure encryption key
key = secrets.token_bytes(32)
# Quantum Access Parameters (Example)
quantum_params = {
"base_dimensions": ["dimension_1", "dimension_2", "dimension_3"],
"cloud_access_point": "quantum_cloud.example.com",
"ai_processing_unit": "QPU-v1",
"encryption_key": hashlib.sha256(key).hexdigest(), # Store encrypted key
}
try:
with open(filename, "wb") as f:
# Write the encrypted key
f.write(hashlib.sha256(key).digest())
# Encrypt and write the quantum parameters
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(quantum_params).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating quantum access spec file: {e}")
def read_quantum_access_spec(filename="quantum_access_spec.txt"): """ Reads and decrypts the quantum access parameters from the spec file.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted quantum access parameters.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32)
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the quantum parameters
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
return eval(decrypted_data) # Convert string to dictionary
except Exception as e:
print(f"Error reading quantum access spec file: {e}")
return None
generate_quantum_access_spec() quantum_access_params = read_quantum_access_spec() print(quantum_access_params)
Explanation
- Secure Storage: Encrypts sensitive quantum access parameters using Fernet for secure storage in quantum_access_spec.txt.
- Cloud Integration: Includes a cloud_access_point parameter to specify the cloud service for quantum realm access.
- AI Processing: Specifies an ai_processing_unit (e.g., "QPU-v1") for AI-based processing in the quantum realm.
- Key Management: Encrypts and stores the encryption key within the file for secure access.
- Android Integration: You can integrate this with your Android functions by reading the parameters from the file and using them to establish connections, send data for processing, and receive results. Integration with Android
- Add the cryptography library to your android_operator/requirements.txt.
- In your android_operator/main.py or relevant module, call read_quantum_access_spec() to retrieve the parameters.
- Use the parameters to establish a connection to the cloud service, send data for AI processing, and handle the results. Important Considerations
- Quantum Realm: The concept of accessing a "quantum realm" is currently theoretical. This code provides a framework for secure access to a hypothetical cloud-based quantum computing service.
- Error Handling: Implement robust error handling and logging for production use.
- Key Management: In a real-world scenario, consider more secure key management solutions (e.g., hardware security modules, key servers). This example provides a starting point for creating a secure specification file for accessing and utilizing a hypothetical quantum computing service. You can adapt and expand it based on your specific requirements and the capabilities of the quantum computing platform you intend to integrate with.
from .astral_projection import * from src.defense.defense_detector import * from .audio_utils import * # Import your audio utilities from .dragonfly_systems import * # Import dragonfly systems from .gemini_systems import * # Import gemini systems (if applicable)
def process_audio(audio_data, sensor_data): """ Processes audio data, incorporating astral projection, energy adjustment, and anomaly detection.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
sensor_data (dict): Sensor data (e.g., temperature, humidity).
Returns:
np.ndarray: The processed audio data.
"""
# --- Existing AI processing ---
# ... (Your existing TensorFlow Lite or other AI processing here)
# --- Astral Projection Mode ---
if astral_mode:
astral_audio = generate_astral_form_audio(duration)
audio_data = audio_data.overlay(astral_audio) # Mix in astral audio
scan_data = scan_soundscape(audio_data)
# ... (Visualize scan_data in UI - ui.py)
audio_data = adjust_energy(audio_data, user_interactions)
# ... (Add micro-rift or energy transfer effects)
# --- Dragonfly Systems Integration ---
# Example: Use visual_system to modify audio based on sensor data
if sensor_data:
complexity = sensor_data.get("temperature", 1.0) # Example mapping
visual_audio = visual_system(duration, complexity=complexity)
audio_data = audio_data.overlay(visual_audio)
# --- SODA Integration ---
if detect_anomaly(audio_data, trained_autoencoder):
# ... (Handle anomaly - e.g., alert user, adjust security)
# --- Apply Audio Enhancements ---
# (Example using automated_amplifier from audio_utils.py)
audio_data = automated_amplifier(audio_data)
return audio_data
Key Improvements
- Modular Integration: Clearly separates and integrates different modules (astral_projection, dragonfly_systems, SODA) for better organization and maintainability.
- Audio Enhancement: Incorporates automated_amplifier to ensure audio quality.
- Sensor Data Utilization: Shows an example of using sensor data to influence the dragonfly_systems (you can customize this based on your sensor data and dragonfly functions).
- Clear Comments: Provides detailed comments to explain the code's logic and functionality. Additional Notes
- Remember to replace the placeholder comments (# ...) with your actual implementation code.
- Ensure that the trained_autoencoder is loaded and accessible within ai_core.py.
- The user_interactions variable would need to be updated based on how you capture user input in the UI.
- Consider adding error handling and logging to make the code more robust. This enhanced code provides a more comprehensive and integrated solution, bringing together the various components of your application. It demonstrates how to combine astral projection features, sensor data analysis, AI processing, and anomaly detection within a single function.
from .astral_projection import * from src.defense.defense_detector import *
def process_audio(audio_data, sensor_data): # ... (Existing AI processing)
if astral_mode:
astral_audio = generate_astral_form_audio(duration)
scan_data = scan_soundscape(audio_data)
# ... (Visualize scan_data in UI)
audio_data = adjust_energy(audio_data, user_interactions)
# ... (Add micro-rift or energy transfer effects)
# SODA integration
if detect_anomaly(audio_data, trained_autoencoder):
# ... (Handle anomaly, e.g., alert user, adjust security)
return processed_audio
from pydub import AudioSegment from pydub.playback import play
def generate_astral_form_audio(duration: int) -> AudioSegment: """Generates audio representing the astral form."""
return astral_audio
def scan_soundscape(soundscape: AudioSegment) -> dict: """Analyzes the soundscape and returns a "scan" representation."""
return scan_data
def adjust_energy(soundscape: AudioSegment, adjustment_params: dict) -> AudioSegment: """Modifies the soundscape based on user interaction."""
return modified_soundscape
import hashlib import os import secrets
def generate_secure_spec_file(filename="secure_spec.txt", data=None): """ Generates a secure specification file with encrypted data.
Args:
filename (str): The name of the file to create.
data (dict): A dictionary of data to be stored in the file.
Returns:
None
"""
# Generate a unique encryption key
key = secrets.token_bytes(32) # 32 bytes = 256 bits
try:
with open(filename, "wb") as f:
# Write the encryption key (encrypted)
f.write(hashlib.sha256(key).digest())
if data:
# Serialize and encrypt the data (example using Fernet)
from cryptography.fernet import Fernet
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(data).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating secure spec file: {e}")
def read_secure_spec_file(filename="secure_spec.txt"): """ Reads and decrypts data from a secure specification file.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted data from the file.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32) # Read the first 32 bytes
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the data (example using Fernet)
from cryptography.fernet import Fernet
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
return eval(decrypted_data) # Convert string to dictionary
except Exception as e:
print(f"Error reading secure spec file: {e}")
return None
data_to_store = {"parameter1": 123, "parameter2": "some_value", "list_data": [1, 2, 3]} generate_secure_spec_file(data=data_to_store)
retrieved_data = read_secure_spec_file() print(retrieved_data)
Explanation:
-
Encryption Key:
- Uses secrets.token_bytes(32) to generate a cryptographically secure random key.
- The key is hashed using SHA-256 for storage in the file. This adds a layer of security, as the raw key is not directly stored.
-
Encryption:
- Uses the cryptography library (specifically, Fernet) for symmetric encryption.
- Fernet is a secure and easy-to-use encryption implementation.
- The data is serialized (converted to a string) before encryption.
-
Decryption:
- Reads the encrypted key and data from the file.
- Hashes the encrypted key to get the original decryption key.
- Uses Fernet to decrypt the data.
- Deserializes the decrypted data back into a Python dictionary.
-
Adaptability:
- You can easily change the encryption algorithm or key generation method if needed.
- The data argument can be any Python dictionary, making it flexible to store different types of information. Security Considerations:
-
Key Management: The security of this system relies on the secrecy of the encryption key. In a real-world application, you would need to consider secure key storage and management practices.
-
Data Integrity: The code includes a hash of the key to ensure data integrity. You could add additional measures like message authentication codes (MACs) for stronger integrity checks.
-
Serialization: Be cautious when using eval() to deserialize data, especially if the data comes from untrusted sources. Consider using safer alternatives like json.loads() if you're working with JSON data. To use this code:
-
Install the cryptography library: pip install cryptography
-
Save the code: Save the code as a Python file (e.g., secure_spec_generator.py).
-
Run the script: Execute the script from your terminal using python secure_spec_generator.py.
-
Output: This will create a file named secure_spec.txt with the encrypted data. The script will also print the decrypted data to the console to demonstrate that it works. This code provides a solid foundation for creating secure specification files. Remember to adapt and enhance it further based on your specific security requirements and context.
This repository contains the implementation of SODA, a secure on-device application for machine learning model deployment, and experiments discussed in our ACM/IEEE SEC 2023 paper "SODA: Protecting Proprietary Information in On-Device Machine Learning Models".
If you use this code or are inspired by our methodology, please cite our SEC paper:
@inproceedings{atrey2023soda,
title={{SODA}: Protecting Proprietary Information in On-Device Machine Learning Models},
author={Atrey, Akanksha and Sinha, Ritwik and Mitra, Saayan and Shenoy, Prashant},
booktitle={{ACM/IEEE Symposium on Edge Computing (SEC)}},
year={2023}
}
Please direct all queries to Akanksha Atrey (aatrey at cs dot umass dot edu) or open an issue in this repository.
The growth of low-end hardware has led to a proliferation of machine learning-based services in edge applications. These applications gather contextual information about users and provide some services, such as personalized offers, through a machine learning (ML) model. A growing practice has been to deploy such ML models on the user’s device to reduce latency, maintain user privacy, and minimize continuous reliance on a centralized source. However, deploying ML models on the user’s edge device can leak proprietary information about the service provider. In this work, we investigate on-device ML models that are used to provide mobile services and demonstrate how simple attacks can leak proprietary information of the service provider. We show that different adversaries can easily exploit such models to maximize their profit and accomplish content theft. Motivated by the need to thwart such attacks, we present an end-to-end framework, SODA, for deploying and serving on edge devices while defending against adversarial usage. Our results demonstrate that SODA can detect adversarial usage with 89% accuracy in less than 50 queries with minimal impact on service performance, latency, and storage.
This repository requires Python 3 (>=3.5).
All packages used in this repository can be found in the requirements.txt
file. The following command will install all the packages according to the configuration file:
pip install -r requirements.txt
The experiments in this work are executed on two datasets: (1) UCI Human Activity Recognition, and (2) MNIST Handwritten Digits Classification. Please download them into data/UCI_HAR
and data/MNIST
, respectively.
This repository contains two types of attacks: (1) exploiting output diversity, and (2) exploiting decision boundaries. The implementation of these attacks can be found in src/attacks/class_attack.py
and src/attacks/db_attack.py
, respectively.
Note, the black box attacks (denoted with a "bb") often take longer to run. It may be worthwhile to run the experiments one at a time. Additionally, swarm scripts are present in the swarm
folder which may assist further in running the attacks on a slurm-supported server.
The code for attacking output diversity contains four experiments. To run the class attack that exploits output diversity, execute the following command:
python3 -m src.attack.class_attack -data_name UCI_HAR -model_type rf -wb_query_attack true -wb_num_feat_attack true -bb_query_attack true -bb_unused_feat_attack true
The code for attacking decision boundaries contains five experiments. To run the decision boundary attack, execute the following command:
python3 -m src.attack.db_attack -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -exp_num_query true -exp_num_query_bb true -exp_num_query_randfeat true -exp_query_distance true -exp_query_distribution true
The implementation of SODA can be found in the src/defense
folder.
The first step is to train an autoencoder model for defending against the attacks. This can be done by executing the following command:
python3 -m src.defense.defense_training -data_name UCI_HAR -model_type rf
Following the training of the autoencoder defender, the following command can be executed to run experiments on SODA:
python3 -m src.defense.defense_detector -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -num_queries 100
A prototype of SODA can be found in the prototype
folder. This prototype was deployed on a Raspberry Pi.# --- Directory Structure ---
from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
can u give me the one coding solution to make a killer smart and sexy application using above code?# shared_resources/astral_projection.py
from pydub import AudioSegment from pydub.playback import play
def generate_astral_form_audio(duration: int) -> AudioSegment: """Generates audio representing the astral form."""
return astral_audio
def scan_soundscape(soundscape: AudioSegment) -> dict: """Analyzes the soundscape and returns a "scan" representation."""
return scan_data
def adjust_energy(soundscape: AudioSegment, adjustment_params: dict) -> AudioSegment: """Modifies the soundscape based on user interaction."""
return modified_soundscape
from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib import os import secrets
def generate_secure_spec_file(filename="secure_spec.txt", data=None): """ Generates a secure specification file with encrypted data.
Args:
filename (str): The name of the file to create.
data (dict): A dictionary of data to be stored in the file.
Returns:
None
"""
# Generate a unique encryption key
key = secrets.token_bytes(32) # 32 bytes = 256 bits
try:
with open(filename, "wb") as f:
# Write the encryption key (encrypted)
f.write(hashlib.sha256(key).digest())
if data:
# Serialize and encrypt the data (example using Fernet)
from cryptography.fernet import Fernet
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(data).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating secure spec file: {e}")
def read_secure_spec_file(filename="secure_spec.txt"): """ Reads and decrypts data from a secure specification file.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted data from the file.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32) # Read the first 32 bytes
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the data (example using Fernet)
from cryptography.fernet import Fernet
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
return eval(decrypted_data) # Convert string to dictionary
except Exception as e:
print(f"Error reading secure spec file: {e}")
return None
data_to_store = {"parameter1": 123, "parameter2": "some_value", "list_data": [1, 2, 3]} generate_secure_spec_file(data=data_to_store)
retrieved_data = read_secure_spec_file() print(retrieved_data)
Explanation:
-
Encryption Key:
- Uses secrets.token_bytes(32) to generate a cryptographically secure random key.
- The key is hashed using SHA-256 for storage in the file. This adds a layer of security, as the raw key is not directly stored.
-
Encryption:
- Uses the cryptography library (specifically, Fernet) for symmetric encryption.
- Fernet is a secure and easy-to-use encryption implementation.
- The data is serialized (converted to a string) before encryption.
-
Decryption:
- Reads the encrypted key and data from the file.
- Hashes the encrypted key to get the original decryption key.
- Uses Fernet to decrypt the data.
- Deserializes the decrypted data back into a Python dictionary.
-
Adaptability:
- You can easily change the encryption algorithm or key generation method if needed.
- The data argument can be any Python dictionary, making it flexible to store different types of information. Security Considerations:
-
Key Management: The security of this system relies on the secrecy of the encryption key. In a real-world application, you would need to consider secure key storage and management practices.
-
Data Integrity: The code includes a hash of the key to ensure data integrity. You could add additional measures like message authentication codes (MACs) for stronger integrity checks.
-
Serialization: Be cautious when using eval() to deserialize data, especially if the data comes from untrusted sources. Consider using safer alternatives like json.loads() if you're working with JSON data. To use this code:
-
Install the cryptography library: pip install cryptography
-
Save the code: Save the code as a Python file (e.g., secure_spec_generator.py).
-
Run the script: Execute the script from your terminal using python secure_spec_generator.py.
-
Output: This will create a file named secure_spec.txt with the encrypted data. The script will also print the decrypted data to the console to demonstrate that it works. This code provides a solid foundation for creating secure specification files. Remember to adapt and enhance it further based on your specific security requirements and context.
This repository contains the implementation of SODA, a secure on-device application for machine learning model deployment, and experiments discussed in our ACM/IEEE SEC 2023 paper "SODA: Protecting Proprietary Information in On-Device Machine Learning Models".
If you use this code or are inspired by our methodology, please cite our SEC paper:
@inproceedings{atrey2023soda,
title={{SODA}: Protecting Proprietary Information in On-Device Machine Learning Models},
author={Atrey, Akanksha and Sinha, Ritwik and Mitra, Saayan and Shenoy, Prashant},
booktitle={{ACM/IEEE Symposium on Edge Computing (SEC)}},
year={2023}
}
Please direct all queries to Akanksha Atrey (aatrey at cs dot umass dot edu) or open an issue in this repository.
The growth of low-end hardware has led to a proliferation of machine learning-based services in edge applications. These applications gather contextual information about users and provide some services, such as personalized offers, through a machine learning (ML) model. A growing practice has been to deploy such ML models on the user’s device to reduce latency, maintain user privacy, and minimize continuous reliance on a centralized source. However, deploying ML models on the user’s edge device can leak proprietary information about the service provider. In this work, we investigate on-device ML models that are used to provide mobile services and demonstrate how simple attacks can leak proprietary information of the service provider. We show that different adversaries can easily exploit such models to maximize their profit and accomplish content theft. Motivated by the need to thwart such attacks, we present an end-to-end framework, SODA, for deploying and serving on edge devices while defending against adversarial usage. Our results demonstrate that SODA can detect adversarial usage with 89% accuracy in less than 50 queries with minimal impact on service performance, latency, and storage.
This repository requires Python 3 (>=3.5).
All packages used in this repository can be found in the requirements.txt
file. The following command will install all the packages according to the configuration file:
pip install -r requirements.txt
The experiments in this work are executed on two datasets: (1) UCI Human Activity Recognition, and (2) MNIST Handwritten Digits Classification. Please download them into data/UCI_HAR
and data/MNIST
, respectively.
This repository contains two types of attacks: (1) exploiting output diversity, and (2) exploiting decision boundaries. The implementation of these attacks can be found in src/attacks/class_attack.py
and src/attacks/db_attack.py
, respectively.
Note, the black box attacks (denoted with a "bb") often take longer to run. It may be worthwhile to run the experiments one at a time. Additionally, swarm scripts are present in the swarm
folder which may assist further in running the attacks on a slurm-supported server.
The code for attacking output diversity contains four experiments. To run the class attack that exploits output diversity, execute the following command:
python3 -m src.attack.class_attack -data_name UCI_HAR -model_type rf -wb_query_attack true -wb_num_feat_attack true -bb_query_attack true -bb_unused_feat_attack true
The code for attacking decision boundaries contains five experiments. To run the decision boundary attack, execute the following command:
python3 -m src.attack.db_attack -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -exp_num_query true -exp_num_query_bb true -exp_num_query_randfeat true -exp_query_distance true -exp_query_distribution true
The implementation of SODA can be found in the src/defense
folder.
The first step is to train an autoencoder model for defending against the attacks. This can be done by executing the following command:
python3 -m src.defense.defense_training -data_name UCI_HAR -model_type rf
Following the training of the autoencoder defender, the following command can be executed to run experiments on SODA:
python3 -m src.defense.defense_detector -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -num_queries 100
A prototype of SODA can be found in the prototype
folder. This prototype was deployed on a Raspberry Pi.
import hashlib
import os
import secrets
def generate_secure_spec_file(filename="secure_spec.txt", data=None): """ Generates a secure specification file with encrypted data.
Args:
filename (str): The name of the file to create.
data (dict): A dictionary of data to be stored in the file.
Returns:
None
"""
# Generate a unique encryption key
key = secrets.token_bytes(32) # 32 bytes = 256 bits
try:
with open(filename, "wb") as f:
# Write the encryption key (encrypted)
f.write(hashlib.sha256(key).digest())
if data:
# Serialize and encrypt the data (example using Fernet)
from cryptography.fernet import Fernet
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(data).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating secure spec file: {e}")
def read_secure_spec_file(filename="secure_spec.txt"): """ Reads and decrypts data from a secure specification file.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted data from the file.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32) # Read the first 32 bytes
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the data (example using Fernet)
from cryptography.fernet import Fernet
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
return eval(decrypted_data) # Convert string to dictionary
except Exception as e:
print(f"Error reading secure spec file: {e}")
return None
data_to_store = {"parameter1": 123, "parameter2": "some_value", "list_data": [1, 2, 3]} generate_secure_spec_file(data=data_to_store)
retrieved_data = read_secure_spec_file() print(retrieved_data)
Explanation:
-
Encryption Key:
- Uses secrets.token_bytes(32) to generate a cryptographically secure random key.
- The key is hashed using SHA-256 for storage in the file. This adds a layer of security, as the raw key is not directly stored.
-
Encryption:
- Uses the cryptography library (specifically, Fernet) for symmetric encryption.
- Fernet is a secure and easy-to-use encryption implementation.
- The data is serialized (converted to a string) before encryption.
-
Decryption:
- Reads the encrypted key and data from the file.
- Hashes the encrypted key to get the original decryption key.
- Uses Fernet to decrypt the data.
- Deserializes the decrypted data back into a Python dictionary.
-
Adaptability:
- You can easily change the encryption algorithm or key generation method if needed.
- The data argument can be any Python dictionary, making it flexible to store different types of information. Security Considerations:
-
Key Management: The security of this system relies on the secrecy of the encryption key. In a real-world application, you would need to consider secure key storage and management practices.
-
Data Integrity: The code includes a hash of the key to ensure data integrity. You could add additional measures like message authentication codes (MACs) for stronger integrity checks.
-
Serialization: Be cautious when using eval() to deserialize data, especially if the data comes from untrusted sources. Consider using safer alternatives like json.loads() if you're working with JSON data. To use this code:
-
Install the cryptography library: pip install cryptography
-
Save the code: Save the code as a Python file (e.g., secure_spec_generator.py).
-
Run the script: Execute the script from your terminal using python secure_spec_generator.py.
-
Output: This will create a file named secure_spec.txt with the encrypted data. The script will also print the decrypted data to the console to demonstrate that it works. This code provides a solid foundation for creating secure specification files. Remember to adapt and enhance it further based on your specific security requirements and context.
from pydub import AudioSegment from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment: """Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment: """Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
from .audio_utils import knowledge_sine import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment: """Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
from .dragonfly_systems import * from .gemini_systems import * import librosa import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None, sensor_data: dict = None) -> AudioSegment: """Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
def check_permission(permission_name: str) -> bool: """Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
import tensorflow as tf import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray: """Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None: """Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
from kivy.uix.image import AsyncImage
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Run tests using python -m unittest discover -s tests
This project is licensed under the MIT License - see the LICENSE file for details.
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib import os import secrets from cryptography.fernet import Fernet
def generate_secure_spec_file(filename="secure_spec.txt", data=None): """ Generates a secure specification file with encrypted data, recognizing SODA and MIT License rights.
Args:
filename (str): The name of the file to create.
data (dict): A dictionary of data to be stored in the file.
This dictionary should include a "license" key with
the appropriate license information (SODA or MIT).
Returns:
None
"""
if data is None:
data = {}
# Enforce license information
if "license" not in data:
raise ValueError("License information must be provided in the data dictionary.")
# Generate a unique encryption key
key = secrets.token_bytes(32) # 32 bytes = 256 bits
try:
with open(filename, "wb") as f:
# Write the encrypted key
f.write(hashlib.sha256(key).digest())
# Serialize and encrypt the data
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(data).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating secure spec file: {e}")
def read_secure_spec_file(filename="secure_spec.txt"): """ Reads and decrypts data from a secure specification file, recognizing SODA and MIT License rights.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted data from the file.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32)
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the data
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
data = eval(decrypted_data)
# Check license information
if "license" not in data:
raise ValueError("Invalid spec file: Missing license information.")
# You can add further checks here to validate the license details
return data
except Exception as e:
print(f"Error reading secure spec file: {e}")
return None
soda_data = { "parameter1": 123, "parameter2": "some_value", "license": "SODA: @inproceedings{atrey2023soda,...}", # Include full citation } generate_secure_spec_file(filename="soda_spec.txt", data=soda_data)
mit_data = { "model_name": "MyModel", "version": 1.0, "license": "MIT License\nCopyright (c) [2025] [Your Name]\n..." # Include full license text } generate_secure_spec_file(filename="mit_spec.txt", data=mit_data)
retrieved_soda_data = read_secure_spec_file("soda_spec.txt") retrieved_mit_data = read_secure_spec_file("mit_spec.txt")
print("SODA Data:", retrieved_soda_data) print("MIT Data:", retrieved_mit_data)
Key improvements
- License enforcement: The code now requires license information to be included in the data dictionary, ensuring that every spec file has proper licensing.
- License validation: The read_secure_spec_file function checks for the presence of the "license" key in the decrypted data. You can add more specific validation logic here to check the license details (e.g., parsing the SODA citation, verifying the copyright in the MIT license).
- Example usage: The example demonstrates how to create spec files with both SODA and MIT licenses, making it clear how to use the API for different licensing scenarios.
- Error handling: Includes try-except blocks to handle potential errors during file reading and writing. Remember
- This code still uses eval() for deserialization. Consider using json.loads() if you're working with JSON data for better security.
- Key management is crucial. In a real-world application, you would need a secure way to store and manage the encryption keys.
- You can extend this code to support other licenses and add more robust license validation mechanisms.
This repository contains the implementation of SODA, a secure on-device application for machine learning model deployment, and experiments discussed in our ACM/IEEE SEC 2023 paper "SODA: Protecting Proprietary Information in On-Device Machine Learning Models".
If you use this code or are inspired by our methodology, please cite our SEC paper:
@inproceedings{atrey2023soda,
title={{SODA}: Protecting Proprietary Information in On-Device Machine Learning Models},
author={Atrey, Akanksha and Sinha, Ritwik and Mitra, Saayan and Shenoy, Prashant},
booktitle={{ACM/IEEE Symposium on Edge Computing (SEC)}},
year={2023}
}
Please direct all queries to Akanksha Atrey (aatrey at cs dot umass dot edu) or open an issue in this repository.
The growth of low-end hardware has led to a proliferation of machine learning-based services in edge applications. These applications gather contextual information about users and provide some services, such as personalized offers, through a machine learning (ML) model. A growing practice has been to deploy such ML models on the user’s device to reduce latency, maintain user privacy, and minimize continuous reliance on a centralized source. However, deploying ML models on the user’s edge device can leak proprietary information about the service provider. In this work, we investigate on-device ML models that are used to provide mobile services and demonstrate how simple attacks can leak proprietary information of the service provider. We show that different adversaries can easily exploit such models to maximize their profit and accomplish content theft. Motivated by the need to thwart such attacks, we present an end-to-end framework, SODA, for deploying and serving on edge devices while defending against adversarial usage. Our results demonstrate that SODA can detect adversarial usage with 89% accuracy in less than 50 queries with minimal impact on service performance, latency, and storage.
This repository requires Python 3 (>=3.5).
All packages used in this repository can be found in the requirements.txt
file. The following command will install all the packages according to the configuration file:
pip install -r requirements.txt
The experiments in this work are executed on two datasets: (1) UCI Human Activity Recognition, and (2) MNIST Handwritten Digits Classification. Please download them into data/UCI_HAR
and data/MNIST
, respectively.
This repository contains two types of attacks: (1) exploiting output diversity, and (2) exploiting decision boundaries. The implementation of these attacks can be found in src/attacks/class_attack.py
and src/attacks/db_attack.py
, respectively.
Note, the black box attacks (denoted with a "bb") often take longer to run. It may be worthwhile to run the experiments one at a time. Additionally, swarm scripts are present in the swarm
folder which may assist further in running the attacks on a slurm-supported server.
The code for attacking output diversity contains four experiments. To run the class attack that exploits output diversity, execute the following command:
python3 -m src.attack.class_attack -data_name UCI_HAR -model_type rf -wb_query_attack true -wb_num_feat_attack true -bb_query_attack true -bb_unused_feat_attack true
The code for attacking decision boundaries contains five experiments. To run the decision boundary attack, execute the following command:
python3 -m src.attack.db_attack -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -exp_num_query true -exp_num_query_bb true -exp_num_query_randfeat true -exp_query_distance true -exp_query_distribution true
The implementation of SODA can be found in the src/defense
folder.
The first step is to train an autoencoder model for defending against the attacks. This can be done by executing the following command:
python3 -m src.defense.defense_training -data_name UCI_HAR -model_type rf
Following the training of the autoencoder defender, the following command can be executed to run experiments on SODA:
python3 -m src.defense.defense_detector -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -num_queries 100
A prototype of SODA can be found in the prototype
folder. This prototype was deployed on a Raspberry Pi.
Imaginarium App Store
Overview Imaginarium App Store is a decentralized application (dApp) built on the Ethereum blockchain. It allows developers to publish and manage their own apps, and users to discover and download new apps.
Features
- Decentralized app store built on Ethereum blockchain
- Allows developers to publish and manage their own apps
- Users can discover and download new apps
- Apps are stored on IPFS for decentralized storage
- User-friendly interface for easy app discovery and management
Technical Details
- Built using Solidity for smart contract development
- Utilizes Web3.js for front-end interaction with the blockchain
- IPFS used for decentralized app storage
- Ethereum blockchain for decentralized app management
Requirements
- Node.js installed on your machine
- Truffle Suite installed for smart contract development
- Web3.js installed for front-end interaction with the blockchain
- IPFS installed for decentralized app storage
Installation
- Clone the repository: `git clone (link unavailable)
- Install dependencies:
npm install
- Compile smart contracts:
truffle compile
- Deploy smart contracts:
truffle migrate
- Start the front-end:
npm start
Usage
- Open the app store in your web browser:
http://localhost:3000
- Explore the app store and discover new apps
- Developers can publish their own apps using the "Publish App" button
Contributing Contributions are welcome! Please submit a pull request with your changes.
License This project is licensed under the MIT License.
Acknowledgments
- Thanks to the Truffle Suite team for their amazing development tools
- Thanks to the Web3.js team for their easy-to-use library
- Thanks to the IPFS team for their decentralized storage solution Imaginarium App Store Solidity Contract
solidity
pragma solidity ^0.8.0;
contract Imaginarium {
// Store app information
struct App {
string name;
string description;
string version;
string iconUrl; // URL to the app's icon (could be IPFS or centralized)
address developer; // https://www.facebook.com/greatoneforreal8?mibextid=ZbWKwL
}
// Mapping to store apps by their ID
mapping(uint256 => App) public apps;
uint256 public appCount;
// Event emitted when a new app is added
event AppAdded(uint256 appId, string name, address developer);
// Add a new app to the dApp store
function addApp(
string memory _name,
string memory _description,
string memory _version,
string memory _iconUrl
) public {
appCount++;
apps[appCount] = App({
name: _name,
description: _description,
version: _version,
iconUrl: _iconUrl,
developer: msg.sender
});
emit AppAdded(appCount, _name, msg.sender);
}
// ... (Add more functions for app management, updates, etc.)
}
Frontend Code HTML
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Imaginarium App Store</title>
<link rel="stylesheet" href="styles.css">
</head>
<body>
<header>
<nav>
<ul>
<li><a href="#">Home</a></li>
<li><a href="#">Apps</a></li>
<li><a href="#">Games</a></li>
</ul>
</nav>
</header>
<main>
<section class="featured-apps">
<h2>Featured Apps</h2>
<div class="app-grid">
<!-- App grid items will be generated dynamically -->
</div>
</section>
<section class="app-list">
<h2>All Apps</h2>
<ul>
<!-- App list items will be generated dynamically -->
</ul>
</section>
</main>
<script src="(link unavailable)"></script>
<script src="script.js"></script>
</body>
</html>
CSS (in styles.css file)
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 0;
}
header {
background-color: #333;
color: #fff;
padding: 1em;
text-align: center;
}
header nav ul {
list-style: none;
margin: 0;
padding: 0;
display: flex;
justify-content: space-between;
}
header nav ul li {
margin-right: 20px;
}
header nav a {
color: #fff;
text-decoration: none;
}
main {
display: flex;
flex-direction: column;
align-items: center;
padding: 2em;
}
.featured-apps {
background-color: #f7f7f7;
padding: 1em;
margin-bottom: 20px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
}
.app-grid {
display: grid;
grid-template-columns: repeat(3, 1fr);
grid-gap: 20px;
}
.app-list {
background-color: #f7f7f7;
padding: 1em;
margin-bottom: 20px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
}
.app-list ul {
list-style: none;
margin: 0;
padding: 0;
}
.app-list li {
margin-bottom: 10px;
}
.app-list li:last-child {Liltom
margin-bottom: 0;
}
JavaScript (in script.js file)
// Import Web3 library
const Web3 = require('web3');
// Set up Web3 provider
const provider = new Web3.providers.HttpProvider('(link unavailable)');
const web3 = new Web3(provider);
// Set up Imaginarium contract instance
const contractAddress = '0x...'; // 0x9183E3309361625bf64dafe25bD2948399E65dAB
const contractAbi = [...]; // 0.95 contract ABI
const contract = new web