-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathTGSineWaveToneGenerator.m
213 lines (180 loc) · 7.64 KB
/
TGSineWaveToneGenerator.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
//
// TGSineWaveToneGenerator.m
// Tone Generator
//
// Created by Anthony Picciano on 6/12/13.
// Copyright (c) 2013 Anthony Picciano. All rights reserved.
//
// Major contributions and updates by Simon Grätzer on 12/23/14.
//
// Based upon work by Matt Gallagher on 2010/10/20.
// Copyright 2010 Matt Gallagher. All rights reserved.
//
// Permission is given to use this source code file, free of charge, in any
// project, commercial or otherwise, entirely at your risk, with the condition
// that any redistribution (in part or whole) of source code must retain
// this copyright and permission notice. Attribution in compiled projects is
// appreciated but not required.
//
#import "TGSineWaveToneGenerator.h"
#import <AudioToolbox/AudioToolbox.h>
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Get the tone parameters out of the object
TGSineWaveToneGenerator *toneGenerator = (__bridge TGSineWaveToneGenerator *)inRefCon;
assert(ioData->mNumberBuffers == toneGenerator->_numChannels);
for (size_t chan = 0; chan < toneGenerator->_numChannels; chan++) {
double theta = toneGenerator->_channels[chan].theta;
double amplitude = toneGenerator->_channels[chan].amplitude;
double theta_increment = 2.0 * M_PI * toneGenerator->_channels[chan].frequency / toneGenerator->_sampleRate;
Float32 *buffer = (Float32 *)ioData->mBuffers[chan].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++) {
buffer[frame] = sin(theta) * amplitude;
theta += theta_increment;
// Basically do modulo
if (theta > 2.0 * M_PI) {
theta -= 2.0 * M_PI;
}
}
// Store the theta back in the view controller
toneGenerator->_channels[chan].theta = theta;
}
return noErr;
}
@implementation TGSineWaveToneGenerator
- (id)init
{
return [self initWithFrequency:SINE_WAVE_TONE_GENERATOR_FREQUENCY_DEFAULT amplitude:SINE_WAVE_TONE_GENERATOR_AMPLITUDE_DEFAULT];
}
- (id)initWithFrequency:(double)hertz amplitude:(double)volume {
if (self = [super init]) {
_numChannels = 1;
_channels = calloc(sizeof(TGChannelInfo), _numChannels);
if (_channels == NULL) return nil;
_channels[0].frequency = hertz;
_channels[0].amplitude = volume;
_sampleRate = SINE_WAVE_TONE_GENERATOR_SAMPLE_RATE_DEFAULT;
[self _setupAudioSession];
// OSStatus result = AudioSessionInitialize(NULL, NULL, ToneInterruptionListener, (__bridge void *)(self));
// if (result == kAudioSessionNoError)
// {
// UInt32 sessionCategory = kAudioSessionCategory_MediaPlayback;
// AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);
// }
// AudioSessionSetActive(true);
}
return self;
}
- (id)initWithChannels:(UInt32)size {
if (self = [super init]) {
_numChannels = size;
_channels = calloc(sizeof(TGChannelInfo), _numChannels);
if (_channels == NULL) return nil;
for (size_t i = 0; i < _numChannels; i++) {
_channels[i].frequency = SINE_WAVE_TONE_GENERATOR_FREQUENCY_DEFAULT / ( i + 0.4);//Just because
_channels[i].amplitude = SINE_WAVE_TONE_GENERATOR_AMPLITUDE_DEFAULT;
}
_sampleRate = SINE_WAVE_TONE_GENERATOR_SAMPLE_RATE_DEFAULT;
[self _setupAudioSession];
}
return self;
}
- (void)dealloc {
[[NSNotificationCenter defaultCenter] removeObserver:self];
if (_channels != NULL) {
free(_channels);
}
}
- (void)playForDuration:(NSTimeInterval)time {
[self play];
[self performSelector:@selector(stop) withObject:nil afterDelay:time];
}
- (void)play {
if (!_toneUnit) {
[self _createToneUnit];
// Stop changing parameters on the unit
OSErr err = AudioUnitInitialize(_toneUnit);
NSAssert1(err == noErr, @"Error initializing unit: %hd", err);
// Start playback
err = AudioOutputUnitStart(_toneUnit);
NSAssert1(err == noErr, @"Error starting unit: %hd", err);
}
}
- (void)stop {
if (_toneUnit) {
AudioOutputUnitStop(_toneUnit);
AudioUnitUninitialize(_toneUnit);
AudioComponentInstanceDispose(_toneUnit);
_toneUnit = nil;
}
}
- (void)_setupAudioSession {
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
BOOL ok;
NSError *setCategoryError = nil;
ok = [audioSession setCategory:AVAudioSessionCategoryPlayback error:&setCategoryError];
NSAssert1(ok, @"Audio error %@", setCategoryError);
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(_handleInterruption:)
name:AVAudioSessionInterruptionNotification
object:audioSession];
}
- (void)_handleInterruption:(id)sender {
[self stop];
}
- (void)_createToneUnit {
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, @"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
NSAssert1(_toneUnit, @"Error creating unit: %hd", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void *)(self);
err = AudioUnitSetProperty(_toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, @"Error setting callback: %hd", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = _sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = _numChannels;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (_toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, @"Error setting stream format: %hd", err);
}
@end