136 lines
5.3 KiB
Dart
136 lines
5.3 KiB
Dart
import 'dart:async';
|
|
import 'dart:typed_data';
|
|
import 'package:flutter/services.dart';
|
|
import '../utils/audio_math.dart';
|
|
import '../utils/constants.dart';
|
|
|
|
/// Flutter-side wrapper around the Android [AudioEngine] native platform channel.
|
|
///
|
|
/// Responsibilities:
|
|
/// • Request / release AudioRecord and AudioTrack via MethodChannel.
|
|
/// • Stream captured PCM chunks as [Float64List] via [captureStream].
|
|
/// • Accept [Float64List] playback buffers and forward them as Int16 bytes.
|
|
/// • Manage speaker / earpiece routing.
|
|
///
|
|
/// All audio is 8 kHz, mono, 16-bit PCM (Int16 LE).
|
|
class AudioService {
|
|
// ── Platform channels ──────────────────────────────────────────────
|
|
static const MethodChannel _method =
|
|
MethodChannel('com.example.call/audio_control');
|
|
static const EventChannel _event =
|
|
EventChannel('com.example.call/audio_capture');
|
|
|
|
// ── Capture stream ─────────────────────────────────────────────────
|
|
StreamSubscription<dynamic>? _captureSub;
|
|
final _captureController = StreamController<Float64List>.broadcast();
|
|
|
|
/// Broadcast stream of decoded Float64 audio chunks from the microphone.
|
|
/// Each chunk is [C.lpcSubframeSamples] (160) samples = 20 ms.
|
|
Stream<Float64List> get captureStream => _captureController.stream;
|
|
|
|
// ── State ──────────────────────────────────────────────────────────
|
|
bool _capturing = false;
|
|
bool _playing = false;
|
|
bool _speakerOn = true;
|
|
|
|
bool get isCapturing => _capturing;
|
|
bool get isPlaying => _playing;
|
|
bool get speakerOn => _speakerOn;
|
|
|
|
// ── Capture ────────────────────────────────────────────────────────
|
|
|
|
/// Start microphone capture at [C.sampleRate] Hz.
|
|
///
|
|
/// [source] is the Android [AudioSource] constant (default = UNPROCESSED = 9).
|
|
/// The stream emits [Float64List] chunks of [C.lpcSubframeSamples] samples.
|
|
Future<void> startCapture({int source = 9}) async {
|
|
if (_capturing) return;
|
|
_capturing = true;
|
|
|
|
// Subscribe to the native EventChannel BEFORE calling startCapture so no
|
|
// audio chunks are dropped between the two calls.
|
|
_captureSub = _event.receiveBroadcastStream().listen(
|
|
(dynamic data) {
|
|
if (data is Uint8List) {
|
|
final floats = AudioMath.int16BytesToFloat(data);
|
|
_captureController.add(floats);
|
|
}
|
|
},
|
|
onError: (Object e) {
|
|
_captureController.addError(e);
|
|
},
|
|
);
|
|
|
|
await _method.invokeMethod<String>('startCapture', {
|
|
'sampleRate': C.sampleRate,
|
|
'source': source,
|
|
});
|
|
}
|
|
|
|
/// Stop microphone capture.
|
|
Future<void> stopCapture() async {
|
|
if (!_capturing) return;
|
|
_capturing = false;
|
|
await _method.invokeMethod<String>('stopCapture');
|
|
await _captureSub?.cancel();
|
|
_captureSub = null;
|
|
}
|
|
|
|
// ── Playback ───────────────────────────────────────────────────────
|
|
|
|
/// Start the AudioTrack in streaming mode.
|
|
Future<void> startPlayback() async {
|
|
if (_playing) return;
|
|
_playing = true;
|
|
await _method.invokeMethod<String>('startPlayback', {
|
|
'sampleRate': C.sampleRate,
|
|
});
|
|
}
|
|
|
|
/// Write a Float64 buffer to the AudioTrack for immediate playback.
|
|
///
|
|
/// Converts to Int16 LE bytes before sending over the platform channel.
|
|
Future<void> writePlayback(Float64List samples) async {
|
|
if (!_playing) return;
|
|
final bytes = AudioMath.floatToInt16Bytes(samples);
|
|
await _method.invokeMethod<String>('writePlayback', {'samples': bytes});
|
|
}
|
|
|
|
/// Write raw Int16 LE PCM bytes directly (no conversion needed).
|
|
Future<void> writePlaybackBytes(Uint8List pcmBytes) async {
|
|
if (!_playing) return;
|
|
await _method.invokeMethod<String>('writePlayback', {'samples': pcmBytes});
|
|
}
|
|
|
|
/// Stop the AudioTrack.
|
|
Future<void> stopPlayback() async {
|
|
if (!_playing) return;
|
|
_playing = false;
|
|
await _method.invokeMethod<String>('stopPlayback');
|
|
}
|
|
|
|
// ── Audio routing ──────────────────────────────────────────────────
|
|
|
|
/// Enable loudspeaker mode (required for acoustic FSK coupling).
|
|
Future<void> setSpeakerMode(bool enabled) async {
|
|
_speakerOn = enabled;
|
|
await _method.invokeMethod<String>('setSpeakerMode', {'enabled': enabled});
|
|
}
|
|
|
|
/// Query current audio route info from the native layer.
|
|
Future<Map<String, dynamic>> getAudioRouteInfo() async {
|
|
final result = await _method.invokeMapMethod<String, dynamic>(
|
|
'getAudioRouteInfo');
|
|
return result ?? {};
|
|
}
|
|
|
|
// ── Lifecycle ──────────────────────────────────────────────────────
|
|
|
|
/// Stop all audio and close the capture stream.
|
|
Future<void> dispose() async {
|
|
await stopCapture();
|
|
await stopPlayback();
|
|
await _captureController.close();
|
|
}
|
|
}
|