215 lines
7.1 KiB
Dart
215 lines
7.1 KiB
Dart
import 'dart:async';
|
||
import 'dart:typed_data';
|
||
import '../core/codec/lpc_codec.dart';
|
||
import '../core/crypto/key_manager.dart';
|
||
import '../core/framing/deframer.dart';
|
||
import '../core/framing/framer.dart';
|
||
import '../utils/audio_math.dart';
|
||
import '../utils/constants.dart';
|
||
import 'audio_service.dart';
|
||
|
||
/// Channel state machine.
|
||
enum ChannelState {
|
||
idle, // not started
|
||
txReady, // key loaded, ready to transmit
|
||
transmitting,
|
||
receiving,
|
||
error,
|
||
}
|
||
|
||
/// High-level secure voice channel.
|
||
///
|
||
/// Owns the complete TX and RX pipelines:
|
||
/// TX: microphone PCM → LPC encode → Framer → FSK audio → speaker
|
||
/// RX: microphone PCM → Deframer → LPC decode → speaker (earpiece)
|
||
///
|
||
/// Operation is half-duplex (push-to-talk).
|
||
/// Call [startTransmit] / [stopTransmit] to switch directions.
|
||
class SecureChannel {
|
||
final AudioService _audio;
|
||
final KeyManager _keys;
|
||
|
||
late final Framer _framer;
|
||
late final Deframer _deframer;
|
||
late final LpcCodec _encoder;
|
||
late final LpcCodec _decoder;
|
||
|
||
ChannelState _state = ChannelState.idle;
|
||
ChannelState get state => _state;
|
||
|
||
// Accumulate microphone sub-frames into a full LPC super-frame.
|
||
final _txBuffer = <Uint8List>[]; // list of 20-ms Int16 LE chunks
|
||
|
||
StreamSubscription<dynamic>? _captureSub;
|
||
|
||
// Stats exposed to UI.
|
||
int txFrames = 0;
|
||
int rxFrames = 0;
|
||
int rxErrors = 0;
|
||
double txRms = 0.0;
|
||
double rxSignalRms = 0.0;
|
||
|
||
// Notifier for state / stats updates.
|
||
final _stateController = StreamController<ChannelState>.broadcast();
|
||
Stream<ChannelState> get onStateChange => _stateController.stream;
|
||
|
||
// 4-byte session ID (timestamp-derived at session creation).
|
||
late final Uint8List _sessionId;
|
||
|
||
SecureChannel(this._audio, this._keys) {
|
||
_sessionId = _buildSessionId();
|
||
_framer = Framer(_keys, _sessionId);
|
||
_deframer = Deframer(_keys, _sessionId);
|
||
_encoder = LpcCodec();
|
||
_decoder = LpcCodec();
|
||
}
|
||
|
||
// ── Lifecycle ──────────────────────────────────────────────────────
|
||
|
||
/// Initialise audio hardware (speaker ON, playback started).
|
||
Future<void> open() async {
|
||
await _audio.setSpeakerMode(true); // loudspeaker for acoustic coupling
|
||
await _audio.startPlayback();
|
||
_setState(ChannelState.txReady);
|
||
}
|
||
|
||
/// Stop all audio, clean up.
|
||
Future<void> close() async {
|
||
await _stopCapture();
|
||
await _audio.stopPlayback();
|
||
_framer.reset();
|
||
_deframer.reset();
|
||
_txBuffer.clear();
|
||
_setState(ChannelState.idle);
|
||
}
|
||
|
||
// ── Transmit (PTT press) ───────────────────────────────────────────
|
||
|
||
/// Begin transmitting: capture mic audio, encode LPC, modulate FSK, play.
|
||
Future<void> startTransmit() async {
|
||
if (_state == ChannelState.transmitting) return;
|
||
_txBuffer.clear();
|
||
_setState(ChannelState.transmitting);
|
||
|
||
// Start mic capture using UNPROCESSED source (no AEC / noise suppression).
|
||
await _audio.startCapture(source: 9 /* AudioSource.UNPROCESSED */);
|
||
|
||
_captureSub = _audio.captureStream.listen(_onTxSamples);
|
||
}
|
||
|
||
/// Stop transmitting (PTT release).
|
||
Future<void> stopTransmit() async {
|
||
if (_state != ChannelState.transmitting) return;
|
||
await _stopCapture();
|
||
_txBuffer.clear();
|
||
_setState(ChannelState.txReady);
|
||
}
|
||
|
||
// ── Receive ────────────────────────────────────────────────────────
|
||
|
||
/// Begin receive mode: capture mic audio (which picks up the earpiece FSK).
|
||
Future<void> startReceive() async {
|
||
if (_state == ChannelState.receiving) return;
|
||
_deframer.reset();
|
||
_setState(ChannelState.receiving);
|
||
|
||
// VOICE_COMMUNICATION (7) lets us capture audio while the call is active.
|
||
// Falls back to UNPROCESSED (9) → MIC (1) inside AudioEngine.
|
||
await _audio.startCapture(source: 7 /* AudioSource.VOICE_COMMUNICATION */);
|
||
_captureSub = _audio.captureStream.listen(_onRxSamples);
|
||
}
|
||
|
||
/// Stop receive mode.
|
||
Future<void> stopReceive() async {
|
||
if (_state != ChannelState.receiving) return;
|
||
await _stopCapture();
|
||
_setState(ChannelState.txReady);
|
||
}
|
||
|
||
// ── TX pipeline ────────────────────────────────────────────────────
|
||
|
||
/// Called for each 160-sample (20 ms) microphone chunk during TX.
|
||
void _onTxSamples(Float64List samples) {
|
||
txRms = AudioMath.rms(samples);
|
||
|
||
// Convert Float64 → Int16 LE bytes for LPC encoder.
|
||
final pcm = AudioMath.floatToInt16Bytes(samples);
|
||
_txBuffer.add(pcm);
|
||
|
||
// Once we have a full super-frame (10 sub-frames × 20 ms = 200 ms) encode.
|
||
if (_txBuffer.length >= C.lpcSubframesPerSuper) {
|
||
// Concatenate all sub-frame PCM into one 3200-byte buffer.
|
||
final superPcm = Uint8List(C.lpcSubframeSamples * C.lpcSubframesPerSuper * 2);
|
||
for (int i = 0; i < C.lpcSubframesPerSuper; i++) {
|
||
superPcm.setRange(
|
||
i * C.lpcSubframeSamples * 2,
|
||
(i + 1) * C.lpcSubframeSamples * 2,
|
||
_txBuffer[i],
|
||
);
|
||
}
|
||
_txBuffer.clear();
|
||
|
||
// LPC encode → 9 bytes.
|
||
final lpcBits = _encoder.encode(superPcm);
|
||
txFrames++;
|
||
|
||
// Framer: encrypt + RS FEC + FSK modulate → audio samples.
|
||
final fskAudio = _framer.frameAndModulate(lpcBits);
|
||
|
||
// Play FSK audio through loudspeaker → goes into the cellular mic.
|
||
_audio.writePlayback(fskAudio);
|
||
}
|
||
}
|
||
|
||
// ── RX pipeline ────────────────────────────────────────────────────
|
||
|
||
/// Called for each 160-sample (20 ms) microphone chunk during RX.
|
||
void _onRxSamples(Float64List samples) {
|
||
rxSignalRms = AudioMath.rms(samples);
|
||
|
||
// Feed into FSK demodulator / deframer.
|
||
_deframer.pushAudio(samples);
|
||
|
||
// Drain any decoded LPC voice payloads.
|
||
while (_deframer.voiceQueue.isNotEmpty) {
|
||
final lpcBits = _deframer.voiceQueue.removeAt(0);
|
||
final pcm = _decoder.decode(lpcBits);
|
||
rxFrames++;
|
||
|
||
// Play decoded voice through earpiece.
|
||
_audio.writePlaybackBytes(pcm);
|
||
}
|
||
|
||
// Mirror deframer error counts to our own stats.
|
||
rxErrors = _deframer.rxErrors;
|
||
}
|
||
|
||
// ── Helpers ────────────────────────────────────────────────────────
|
||
|
||
Future<void> _stopCapture() async {
|
||
await _captureSub?.cancel();
|
||
_captureSub = null;
|
||
await _audio.stopCapture();
|
||
}
|
||
|
||
void _setState(ChannelState s) {
|
||
_state = s;
|
||
_stateController.add(s);
|
||
}
|
||
|
||
static Uint8List _buildSessionId() {
|
||
final ts = DateTime.now().microsecondsSinceEpoch;
|
||
final sid = Uint8List(4);
|
||
sid[0] = (ts >> 24) & 0xFF;
|
||
sid[1] = (ts >> 16) & 0xFF;
|
||
sid[2] = (ts >> 8) & 0xFF;
|
||
sid[3] = ts & 0xFF;
|
||
return sid;
|
||
}
|
||
|
||
Future<void> dispose() async {
|
||
await close();
|
||
await _stateController.close();
|
||
}
|
||
}
|