Mercurial > SDL_sound_CoreAudio
annotate decoders/timidity/playmidi.c @ 474:c66080364dff
Most decoders now report total sample play time, now. Technically, this
breaks binary compatibility with the 1.0 branch, since it extends the
Sound_Sample struct, but most (all?) programs are just passing pointers
allocated by SDL_sound around, and might be okay.
Source-level compatibility is not broken...yet! :)
--ryan.
-------- Original Message --------
Subject: SDL_sound patch: Finding total length of time of sound file.
Date: Sun, 26 Jan 2003 09:31:17 -0800 (PST)
Hi Ryan,
I am working with Eric Wing and helping him modify
SDL_sound. AS part of our efforts in improving and
enhancing SDL_sound, we like to submit this patch. We
modified the codecs to find the total time of a sound
file. Below is the explanation of the patch. The
patch is appended as an attachment to this email.
* MOTIVATION:
We needed the ability to get the total play time of a
sample (And we noticed that we're not the only ones).
Since SDL_sound blocks direct access to the specific
decoders, there is no way for a user to know this
information short of decoding the whole thing.
Because of this, we believe this will be a useful
addition, even though the accuracy may not be perfect
(subject to each decoder) or the information may not
always be available.
* CONTRIBUTORS:
Wesley Leong (modified the majority of the codecs and
verified the results)
Eric Wing (showed everyone how to do modify codec,
modified mikmod)
Wang Lam (modified a handful of codecs, researched
into specs and int overflow)
Ahilan Anantha (modified a few codecs and helped with
integer math)
* GENERAL ISSUES:
We chose the value to be milliseconds as an Sint32.
Milliseconds because that's what Sound_Seek takes as a
parameter and -1 to allow for instances/codecs where
the value could not be determined. We are
not sure if this is the final convention you want, so
we are willing to work with you on this.
We also expect the total_time field to be set on open
and never again modified by SDL_sound. Users may
access it directly much like the sample buffer and
buffer_size. We thought about recomputing the time
on DecodeAll, but since users may seek or decode small
chunks first, not all the data may be there. So this
is better done by the user. This may be good
information to document.
Currently, all the main codecs are implemented except
for QuickTime.
author | Ryan C. Gordon <icculus@icculus.org> |
---|---|
date | Sat, 08 May 2004 08:19:50 +0000 |
parents | cbc2a4ffeeec |
children |
rev | line source |
---|---|
199 | 1 /* |
2 | |
3 TiMidity -- Experimental MIDI to WAVE converter | |
4 Copyright (C) 1995 Tuukka Toivonen <toivonen@clinet.fi> | |
5 | |
6 This program is free software; you can redistribute it and/or modify | |
7 it under the terms of the GNU General Public License as published by | |
8 the Free Software Foundation; either version 2 of the License, or | |
9 (at your option) any later version. | |
10 | |
11 This program is distributed in the hope that it will be useful, | |
12 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 GNU General Public License for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with this program; if not, write to the Free Software | |
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
19 | |
20 playmidi.c -- random stuff in need of rearrangement | |
21 | |
22 */ | |
23 | |
24 #if HAVE_CONFIG_H | |
25 # include <config.h> | |
26 #endif | |
27 | |
28 #include <stdio.h> | |
29 #include <stdlib.h> | |
30 #include <string.h> | |
31 | |
32 #include "SDL_sound.h" | |
33 | |
34 #define __SDL_SOUND_INTERNAL__ | |
35 #include "SDL_sound_internal.h" | |
36 | |
37 #include "timidity.h" | |
38 #include "options.h" | |
39 #include "instrum.h" | |
40 #include "playmidi.h" | |
41 #include "output.h" | |
42 #include "mix.h" | |
43 #include "tables.h" | |
44 | |
45 static void adjust_amplification(MidiSong *song) | |
46 { | |
47 song->master_volume = (float)(song->amplification) / (float)100.0; | |
48 } | |
49 | |
50 static void reset_voices(MidiSong *song) | |
51 { | |
52 int i; | |
53 for (i=0; i<MAX_VOICES; i++) | |
54 song->voice[i].status=VOICE_FREE; | |
55 } | |
56 | |
57 /* Process the Reset All Controllers event */ | |
58 static void reset_controllers(MidiSong *song, int c) | |
59 { | |
60 song->channel[c].volume=90; /* Some standard says, although the SCC docs say 0. */ | |
61 song->channel[c].expression=127; /* SCC-1 does this. */ | |
62 song->channel[c].sustain=0; | |
63 song->channel[c].pitchbend=0x2000; | |
64 song->channel[c].pitchfactor=0; /* to be computed */ | |
65 } | |
66 | |
67 static void reset_midi(MidiSong *song) | |
68 { | |
69 int i; | |
70 for (i=0; i<16; i++) | |
71 { | |
72 reset_controllers(song, i); | |
73 /* The rest of these are unaffected by the Reset All Controllers event */ | |
74 song->channel[i].program=song->default_program; | |
75 song->channel[i].panning=NO_PANNING; | |
76 song->channel[i].pitchsens=2; | |
77 song->channel[i].bank=0; /* tone bank or drum set */ | |
78 } | |
79 reset_voices(song); | |
80 } | |
81 | |
455
cbc2a4ffeeec
* Added support for loading DLS format instruments:
hercules
parents:
312
diff
changeset
|
82 static void select_sample(MidiSong *song, int v, Instrument *ip, int vel) |
199 | 83 { |
84 Sint32 f, cdiff, diff; | |
85 int s,i; | |
86 Sample *sp, *closest; | |
87 | |
88 s=ip->samples; | |
89 sp=ip->sample; | |
90 | |
91 if (s==1) | |
92 { | |
93 song->voice[v].sample=sp; | |
94 return; | |
95 } | |
96 | |
97 f=song->voice[v].orig_frequency; | |
98 for (i=0; i<s; i++) | |
99 { | |
455
cbc2a4ffeeec
* Added support for loading DLS format instruments:
hercules
parents:
312
diff
changeset
|
100 if (sp->low_vel <= vel && sp->high_vel >= vel && |
cbc2a4ffeeec
* Added support for loading DLS format instruments:
hercules
parents:
312
diff
changeset
|
101 sp->low_freq <= f && sp->high_freq >= f) |
199 | 102 { |
103 song->voice[v].sample=sp; | |
104 return; | |
105 } | |
106 sp++; | |
107 } | |
108 | |
109 /* | |
110 No suitable sample found! We'll select the sample whose root | |
111 frequency is closest to the one we want. (Actually we should | |
112 probably convert the low, high, and root frequencies to MIDI note | |
113 values and compare those.) */ | |
114 | |
115 cdiff=0x7FFFFFFF; | |
116 closest=sp=ip->sample; | |
117 for(i=0; i<s; i++) | |
118 { | |
119 diff=sp->root_freq - f; | |
120 if (diff<0) diff=-diff; | |
121 if (diff<cdiff) | |
122 { | |
123 cdiff=diff; | |
124 closest=sp; | |
125 } | |
126 sp++; | |
127 } | |
128 song->voice[v].sample=closest; | |
129 return; | |
130 } | |
131 | |
132 static void recompute_freq(MidiSong *song, int v) | |
133 { | |
134 int | |
135 sign=(song->voice[v].sample_increment < 0), /* for bidirectional loops */ | |
136 pb=song->channel[song->voice[v].channel].pitchbend; | |
137 double a; | |
138 | |
139 if (!song->voice[v].sample->sample_rate) | |
140 return; | |
141 | |
142 if (song->voice[v].vibrato_control_ratio) | |
143 { | |
144 /* This instrument has vibrato. Invalidate any precomputed | |
145 sample_increments. */ | |
146 | |
147 int i=VIBRATO_SAMPLE_INCREMENTS; | |
148 while (i--) | |
149 song->voice[v].vibrato_sample_increment[i]=0; | |
150 } | |
151 | |
152 if (pb==0x2000 || pb<0 || pb>0x3FFF) | |
153 song->voice[v].frequency = song->voice[v].orig_frequency; | |
154 else | |
155 { | |
156 pb-=0x2000; | |
157 if (!(song->channel[song->voice[v].channel].pitchfactor)) | |
158 { | |
159 /* Damn. Somebody bent the pitch. */ | |
160 Sint32 i=pb*song->channel[song->voice[v].channel].pitchsens; | |
161 if (pb<0) | |
162 i=-i; | |
163 song->channel[song->voice[v].channel].pitchfactor= | |
164 (float)(bend_fine[(i>>5) & 0xFF] * bend_coarse[i>>13]); | |
165 } | |
166 if (pb>0) | |
167 song->voice[v].frequency= | |
168 (Sint32)(song->channel[song->voice[v].channel].pitchfactor * | |
169 (double)(song->voice[v].orig_frequency)); | |
170 else | |
171 song->voice[v].frequency= | |
172 (Sint32)((double)(song->voice[v].orig_frequency) / | |
173 song->channel[song->voice[v].channel].pitchfactor); | |
174 } | |
175 | |
176 a = FSCALE(((double)(song->voice[v].sample->sample_rate) * | |
177 (double)(song->voice[v].frequency)) / | |
178 ((double)(song->voice[v].sample->root_freq) * | |
179 (double)(song->rate)), | |
180 FRACTION_BITS); | |
181 | |
182 if (sign) | |
183 a = -a; /* need to preserve the loop direction */ | |
184 | |
185 song->voice[v].sample_increment = (Sint32)(a); | |
186 } | |
187 | |
188 static void recompute_amp(MidiSong *song, int v) | |
189 { | |
190 Sint32 tempamp; | |
191 | |
192 /* TODO: use fscale */ | |
193 | |
194 tempamp= (song->voice[v].velocity * | |
195 song->channel[song->voice[v].channel].volume * | |
196 song->channel[song->voice[v].channel].expression); /* 21 bits */ | |
197 | |
198 if (!(song->encoding & PE_MONO)) | |
199 { | |
200 if (song->voice[v].panning > 60 && song->voice[v].panning < 68) | |
201 { | |
202 song->voice[v].panned=PANNED_CENTER; | |
203 | |
204 song->voice[v].left_amp= | |
205 FSCALENEG((double)(tempamp) * song->voice[v].sample->volume * song->master_volume, | |
206 21); | |
207 } | |
208 else if (song->voice[v].panning<5) | |
209 { | |
210 song->voice[v].panned = PANNED_LEFT; | |
211 | |
212 song->voice[v].left_amp= | |
213 FSCALENEG((double)(tempamp) * song->voice[v].sample->volume * song->master_volume, | |
214 20); | |
215 } | |
216 else if (song->voice[v].panning>123) | |
217 { | |
218 song->voice[v].panned = PANNED_RIGHT; | |
219 | |
220 song->voice[v].left_amp= /* left_amp will be used */ | |
221 FSCALENEG((double)(tempamp) * song->voice[v].sample->volume * song->master_volume, | |
222 20); | |
223 } | |
224 else | |
225 { | |
226 song->voice[v].panned = PANNED_MYSTERY; | |
227 | |
228 song->voice[v].left_amp= | |
229 FSCALENEG((double)(tempamp) * song->voice[v].sample->volume * song->master_volume, | |
230 27); | |
231 song->voice[v].right_amp = song->voice[v].left_amp * (song->voice[v].panning); | |
232 song->voice[v].left_amp *= (float)(127 - song->voice[v].panning); | |
233 } | |
234 } | |
235 else | |
236 { | |
237 song->voice[v].panned = PANNED_CENTER; | |
238 | |
239 song->voice[v].left_amp= | |
240 FSCALENEG((double)(tempamp) * song->voice[v].sample->volume * song->master_volume, | |
241 21); | |
242 } | |
243 } | |
244 | |
245 static void start_note(MidiSong *song, MidiEvent *e, int i) | |
246 { | |
247 Instrument *ip; | |
248 int j; | |
249 | |
250 if (ISDRUMCHANNEL(song, e->channel)) | |
251 { | |
252 if (!(ip=song->drumset[song->channel[e->channel].bank]->instrument[e->a])) | |
253 { | |
254 if (!(ip=song->drumset[0]->instrument[e->a])) | |
255 return; /* No instrument? Then we can't play. */ | |
256 } | |
257 if (ip->samples != 1) | |
258 { | |
259 SNDDBG(("Strange: percussion instrument with %d samples!", | |
260 ip->samples)); | |
261 } | |
262 | |
263 if (ip->sample->note_to_use) /* Do we have a fixed pitch? */ | |
264 song->voice[i].orig_frequency = freq_table[(int)(ip->sample->note_to_use)]; | |
265 else | |
266 song->voice[i].orig_frequency = freq_table[e->a & 0x7F]; | |
267 | |
268 /* drums are supposed to have only one sample */ | |
269 song->voice[i].sample = ip->sample; | |
270 } | |
271 else | |
272 { | |
273 if (song->channel[e->channel].program == SPECIAL_PROGRAM) | |
274 ip=song->default_instrument; | |
275 else if (!(ip=song->tonebank[song->channel[e->channel].bank]-> | |
276 instrument[song->channel[e->channel].program])) | |
277 { | |
278 if (!(ip=song->tonebank[0]->instrument[song->channel[e->channel].program])) | |
279 return; /* No instrument? Then we can't play. */ | |
280 } | |
281 | |
282 if (ip->sample->note_to_use) /* Fixed-pitch instrument? */ | |
283 song->voice[i].orig_frequency = freq_table[(int)(ip->sample->note_to_use)]; | |
284 else | |
285 song->voice[i].orig_frequency = freq_table[e->a & 0x7F]; | |
455
cbc2a4ffeeec
* Added support for loading DLS format instruments:
hercules
parents:
312
diff
changeset
|
286 select_sample(song, i, ip, e->b); |
199 | 287 } |
288 | |
289 song->voice[i].status = VOICE_ON; | |
290 song->voice[i].channel = e->channel; | |
291 song->voice[i].note = e->a; | |
292 song->voice[i].velocity = e->b; | |
293 song->voice[i].sample_offset = 0; | |
294 song->voice[i].sample_increment = 0; /* make sure it isn't negative */ | |
295 | |
296 song->voice[i].tremolo_phase = 0; | |
297 song->voice[i].tremolo_phase_increment = song->voice[i].sample->tremolo_phase_increment; | |
298 song->voice[i].tremolo_sweep = song->voice[i].sample->tremolo_sweep_increment; | |
299 song->voice[i].tremolo_sweep_position = 0; | |
300 | |
301 song->voice[i].vibrato_sweep = song->voice[i].sample->vibrato_sweep_increment; | |
302 song->voice[i].vibrato_sweep_position = 0; | |
303 song->voice[i].vibrato_control_ratio = song->voice[i].sample->vibrato_control_ratio; | |
304 song->voice[i].vibrato_control_counter = song->voice[i].vibrato_phase = 0; | |
305 for (j=0; j<VIBRATO_SAMPLE_INCREMENTS; j++) | |
306 song->voice[i].vibrato_sample_increment[j] = 0; | |
307 | |
308 if (song->channel[e->channel].panning != NO_PANNING) | |
309 song->voice[i].panning = song->channel[e->channel].panning; | |
310 else | |
311 song->voice[i].panning = song->voice[i].sample->panning; | |
312 | |
313 recompute_freq(song, i); | |
314 recompute_amp(song, i); | |
315 if (song->voice[i].sample->modes & MODES_ENVELOPE) | |
316 { | |
317 /* Ramp up from 0 */ | |
318 song->voice[i].envelope_stage = 0; | |
319 song->voice[i].envelope_volume = 0; | |
320 song->voice[i].control_counter = 0; | |
321 recompute_envelope(song, i); | |
322 apply_envelope_to_amp(song, i); | |
323 } | |
324 else | |
325 { | |
326 song->voice[i].envelope_increment = 0; | |
327 apply_envelope_to_amp(song, i); | |
328 } | |
329 } | |
330 | |
331 static void kill_note(MidiSong *song, int i) | |
332 { | |
333 song->voice[i].status = VOICE_DIE; | |
334 } | |
335 | |
336 /* Only one instance of a note can be playing on a single channel. */ | |
337 static void note_on(MidiSong *song) | |
338 { | |
339 int i = song->voices, lowest=-1; | |
340 Sint32 lv=0x7FFFFFFF, v; | |
341 MidiEvent *e = song->current_event; | |
342 | |
343 while (i--) | |
344 { | |
345 if (song->voice[i].status == VOICE_FREE) | |
346 lowest=i; /* Can't get a lower volume than silence */ | |
347 else if (song->voice[i].channel==e->channel && | |
348 (song->voice[i].note==e->a || song->channel[song->voice[i].channel].mono)) | |
349 kill_note(song, i); | |
350 } | |
351 | |
352 if (lowest != -1) | |
353 { | |
354 /* Found a free voice. */ | |
355 start_note(song,e,lowest); | |
356 return; | |
357 } | |
358 | |
359 /* Look for the decaying note with the lowest volume */ | |
360 i = song->voices; | |
361 while (i--) | |
362 { | |
363 if ((song->voice[i].status != VOICE_ON) && | |
364 (song->voice[i].status != VOICE_DIE)) | |
365 { | |
366 v = song->voice[i].left_mix; | |
367 if ((song->voice[i].panned == PANNED_MYSTERY) | |
368 && (song->voice[i].right_mix > v)) | |
369 v = song->voice[i].right_mix; | |
370 if (v<lv) | |
371 { | |
372 lv=v; | |
373 lowest=i; | |
374 } | |
375 } | |
376 } | |
377 | |
378 if (lowest != -1) | |
379 { | |
380 /* This can still cause a click, but if we had a free voice to | |
381 spare for ramping down this note, we wouldn't need to kill it | |
382 in the first place... Still, this needs to be fixed. Perhaps | |
383 we could use a reserve of voices to play dying notes only. */ | |
384 | |
385 song->cut_notes++; | |
386 song->voice[lowest].status=VOICE_FREE; | |
387 start_note(song,e,lowest); | |
388 } | |
389 else | |
390 song->lost_notes++; | |
391 } | |
392 | |
393 static void finish_note(MidiSong *song, int i) | |
394 { | |
395 if (song->voice[i].sample->modes & MODES_ENVELOPE) | |
396 { | |
397 /* We need to get the envelope out of Sustain stage */ | |
398 song->voice[i].envelope_stage = 3; | |
399 song->voice[i].status = VOICE_OFF; | |
400 recompute_envelope(song, i); | |
401 apply_envelope_to_amp(song, i); | |
402 } | |
403 else | |
404 { | |
405 /* Set status to OFF so resample_voice() will let this voice out | |
406 of its loop, if any. In any case, this voice dies when it | |
407 hits the end of its data (ofs>=data_length). */ | |
408 song->voice[i].status = VOICE_OFF; | |
409 } | |
410 } | |
411 | |
412 static void note_off(MidiSong *song) | |
413 { | |
414 int i = song->voices; | |
415 MidiEvent *e = song->current_event; | |
416 | |
417 while (i--) | |
418 if (song->voice[i].status == VOICE_ON && | |
419 song->voice[i].channel == e->channel && | |
420 song->voice[i].note == e->a) | |
421 { | |
422 if (song->channel[e->channel].sustain) | |
423 { | |
424 song->voice[i].status = VOICE_SUSTAINED; | |
425 } | |
426 else | |
427 finish_note(song, i); | |
428 return; | |
429 } | |
430 } | |
431 | |
432 /* Process the All Notes Off event */ | |
433 static void all_notes_off(MidiSong *song) | |
434 { | |
435 int i = song->voices; | |
436 int c = song->current_event->channel; | |
437 | |
438 SNDDBG(("All notes off on channel %d", c)); | |
439 while (i--) | |
440 if (song->voice[i].status == VOICE_ON && | |
441 song->voice[i].channel == c) | |
442 { | |
443 if (song->channel[c].sustain) | |
444 song->voice[i].status = VOICE_SUSTAINED; | |
445 else | |
446 finish_note(song, i); | |
447 } | |
448 } | |
449 | |
450 /* Process the All Sounds Off event */ | |
451 static void all_sounds_off(MidiSong *song) | |
452 { | |
453 int i = song->voices; | |
454 int c = song->current_event->channel; | |
455 | |
456 while (i--) | |
457 if (song->voice[i].channel == c && | |
458 song->voice[i].status != VOICE_FREE && | |
459 song->voice[i].status != VOICE_DIE) | |
460 { | |
461 kill_note(song, i); | |
462 } | |
463 } | |
464 | |
465 static void adjust_pressure(MidiSong *song) | |
466 { | |
467 MidiEvent *e = song->current_event; | |
468 int i = song->voices; | |
469 | |
470 while (i--) | |
471 if (song->voice[i].status == VOICE_ON && | |
472 song->voice[i].channel == e->channel && | |
473 song->voice[i].note == e->a) | |
474 { | |
475 song->voice[i].velocity = e->b; | |
476 recompute_amp(song, i); | |
477 apply_envelope_to_amp(song, i); | |
478 return; | |
479 } | |
480 } | |
481 | |
482 static void drop_sustain(MidiSong *song) | |
483 { | |
484 int i = song->voices; | |
485 int c = song->current_event->channel; | |
486 | |
487 while (i--) | |
488 if (song->voice[i].status == VOICE_SUSTAINED && song->voice[i].channel == c) | |
489 finish_note(song, i); | |
490 } | |
491 | |
492 static void adjust_pitchbend(MidiSong *song) | |
493 { | |
494 int c = song->current_event->channel; | |
495 int i = song->voices; | |
496 | |
497 while (i--) | |
498 if (song->voice[i].status != VOICE_FREE && song->voice[i].channel == c) | |
499 { | |
500 recompute_freq(song, i); | |
501 } | |
502 } | |
503 | |
504 static void adjust_volume(MidiSong *song) | |
505 { | |
506 int c = song->current_event->channel; | |
507 int i = song->voices; | |
508 | |
509 while (i--) | |
510 if (song->voice[i].channel == c && | |
511 (song->voice[i].status==VOICE_ON || song->voice[i].status==VOICE_SUSTAINED)) | |
512 { | |
513 recompute_amp(song, i); | |
514 apply_envelope_to_amp(song, i); | |
515 } | |
516 } | |
517 | |
518 static void seek_forward(MidiSong *song, Sint32 until_time) | |
519 { | |
520 reset_voices(song); | |
521 while (song->current_event->time < until_time) | |
522 { | |
523 switch(song->current_event->type) | |
524 { | |
525 /* All notes stay off. Just handle the parameter changes. */ | |
526 | |
527 case ME_PITCH_SENS: | |
528 song->channel[song->current_event->channel].pitchsens = | |
529 song->current_event->a; | |
530 song->channel[song->current_event->channel].pitchfactor = 0; | |
531 break; | |
532 | |
533 case ME_PITCHWHEEL: | |
534 song->channel[song->current_event->channel].pitchbend = | |
535 song->current_event->a + song->current_event->b * 128; | |
536 song->channel[song->current_event->channel].pitchfactor = 0; | |
537 break; | |
538 | |
539 case ME_MAINVOLUME: | |
540 song->channel[song->current_event->channel].volume = | |
541 song->current_event->a; | |
542 break; | |
543 | |
544 case ME_PAN: | |
545 song->channel[song->current_event->channel].panning = | |
546 song->current_event->a; | |
547 break; | |
548 | |
549 case ME_EXPRESSION: | |
550 song->channel[song->current_event->channel].expression = | |
551 song->current_event->a; | |
552 break; | |
553 | |
554 case ME_PROGRAM: | |
555 if (ISDRUMCHANNEL(song, song->current_event->channel)) | |
556 /* Change drum set */ | |
557 song->channel[song->current_event->channel].bank = | |
558 song->current_event->a; | |
559 else | |
560 song->channel[song->current_event->channel].program = | |
561 song->current_event->a; | |
562 break; | |
563 | |
564 case ME_SUSTAIN: | |
565 song->channel[song->current_event->channel].sustain = | |
566 song->current_event->a; | |
567 break; | |
568 | |
569 case ME_RESET_CONTROLLERS: | |
570 reset_controllers(song, song->current_event->channel); | |
571 break; | |
572 | |
573 case ME_TONE_BANK: | |
574 song->channel[song->current_event->channel].bank = | |
575 song->current_event->a; | |
576 break; | |
577 | |
578 case ME_EOT: | |
579 song->current_sample = song->current_event->time; | |
580 return; | |
581 } | |
582 song->current_event++; | |
583 } | |
584 /*song->current_sample=song->current_event->time;*/ | |
585 if (song->current_event != song->events) | |
586 song->current_event--; | |
587 song->current_sample=until_time; | |
588 } | |
589 | |
590 static void skip_to(MidiSong *song, Sint32 until_time) | |
591 { | |
592 if (song->current_sample > until_time) | |
593 song->current_sample = 0; | |
594 | |
595 reset_midi(song); | |
596 song->buffered_count = 0; | |
597 song->buffer_pointer = song->common_buffer; | |
598 song->current_event = song->events; | |
599 | |
600 if (until_time) | |
601 seek_forward(song, until_time); | |
602 } | |
603 | |
604 static void do_compute_data(MidiSong *song, Sint32 count) | |
605 { | |
606 int i; | |
607 memset(song->buffer_pointer, 0, | |
608 (song->encoding & PE_MONO) ? (count * 4) : (count * 8)); | |
609 for (i = 0; i < song->voices; i++) | |
610 { | |
611 if(song->voice[i].status != VOICE_FREE) | |
612 mix_voice(song, song->buffer_pointer, i, count); | |
613 } | |
614 song->current_sample += count; | |
615 } | |
616 | |
617 /* count=0 means flush remaining buffered data to output device, then | |
618 flush the device itself */ | |
619 static void compute_data(MidiSong *song, void *stream, Sint32 count) | |
620 { | |
621 int channels; | |
622 | |
623 if ( song->encoding & PE_MONO ) | |
624 channels = 1; | |
625 else | |
626 channels = 2; | |
627 | |
628 if (!count) | |
629 { | |
630 if (song->buffered_count) | |
631 song->write(stream, song->common_buffer, channels * song->buffered_count); | |
632 song->buffer_pointer = song->common_buffer; | |
633 song->buffered_count = 0; | |
634 return; | |
635 } | |
636 | |
637 while ((count + song->buffered_count) >= song->buffer_size) | |
638 { | |
639 do_compute_data(song, song->buffer_size - song->buffered_count); | |
640 count -= song->buffer_size - song->buffered_count; | |
641 song->write(stream, song->common_buffer, channels * song->buffer_size); | |
642 song->buffer_pointer = song->common_buffer; | |
643 song->buffered_count = 0; | |
644 } | |
645 if (count>0) | |
646 { | |
647 do_compute_data(song, count); | |
648 song->buffered_count += count; | |
649 song->buffer_pointer += (song->encoding & PE_MONO) ? count : count*2; | |
650 } | |
651 } | |
652 | |
653 void Timidity_Start(MidiSong *song) | |
654 { | |
655 song->playing = 1; | |
656 adjust_amplification(song); | |
657 skip_to(song, 0); | |
658 } | |
659 | |
312 | 660 void Timidity_Seek(MidiSong *song, Uint32 ms) |
661 { | |
662 skip_to(song, (ms * song->rate) / 1000); | |
663 } | |
664 | |
474
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
665 Uint32 Timidity_GetSongLength(MidiSong *song) |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
666 { |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
667 MidiEvent *last_event = &song->events[song->groomed_event_count - 1]; |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
668 /* We want last_event->time * 1000 / song->rate */ |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
669 Uint32 retvalue = (last_event->time / song->rate) * 1000; |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
670 retvalue += (last_event->time % song->rate) * 1000 / song->rate; |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
671 return retvalue; |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
672 } |
c66080364dff
Most decoders now report total sample play time, now. Technically, this
Ryan C. Gordon <icculus@icculus.org>
parents:
455
diff
changeset
|
673 |
199 | 674 int Timidity_PlaySome(MidiSong *song, void *stream, Sint32 len) |
675 { | |
676 Sint32 start_sample, end_sample, samples; | |
677 int bytes_per_sample; | |
678 | |
679 if (!song->playing) | |
680 return 0; | |
681 | |
682 bytes_per_sample = | |
683 ((song->encoding & PE_MONO) ? 1 : 2) | |
684 * ((song->encoding & PE_16BIT) ? 2 : 1); | |
685 samples = len / bytes_per_sample; | |
686 | |
687 start_sample = song->current_sample; | |
688 end_sample = song->current_sample+samples; | |
689 while ( song->current_sample < end_sample ) { | |
690 /* Handle all events that should happen at this time */ | |
691 while (song->current_event->time <= song->current_sample) { | |
692 switch(song->current_event->type) { | |
693 | |
694 /* Effects affecting a single note */ | |
695 | |
696 case ME_NOTEON: | |
697 if (!(song->current_event->b)) /* Velocity 0? */ | |
698 note_off(song); | |
699 else | |
700 note_on(song); | |
701 break; | |
702 | |
703 case ME_NOTEOFF: | |
704 note_off(song); | |
705 break; | |
706 | |
707 case ME_KEYPRESSURE: | |
708 adjust_pressure(song); | |
709 break; | |
710 | |
711 /* Effects affecting a single channel */ | |
712 | |
713 case ME_PITCH_SENS: | |
714 song->channel[song->current_event->channel].pitchsens = | |
715 song->current_event->a; | |
716 song->channel[song->current_event->channel].pitchfactor = 0; | |
717 break; | |
718 | |
719 case ME_PITCHWHEEL: | |
720 song->channel[song->current_event->channel].pitchbend = | |
721 song->current_event->a + song->current_event->b * 128; | |
722 song->channel[song->current_event->channel].pitchfactor = 0; | |
723 /* Adjust pitch for notes already playing */ | |
724 adjust_pitchbend(song); | |
725 break; | |
726 | |
727 case ME_MAINVOLUME: | |
728 song->channel[song->current_event->channel].volume = | |
729 song->current_event->a; | |
730 adjust_volume(song); | |
731 break; | |
732 | |
733 case ME_PAN: | |
734 song->channel[song->current_event->channel].panning = | |
735 song->current_event->a; | |
736 break; | |
737 | |
738 case ME_EXPRESSION: | |
739 song->channel[song->current_event->channel].expression = | |
740 song->current_event->a; | |
741 adjust_volume(song); | |
742 break; | |
743 | |
744 case ME_PROGRAM: | |
745 if (ISDRUMCHANNEL(song, song->current_event->channel)) { | |
746 /* Change drum set */ | |
747 song->channel[song->current_event->channel].bank = | |
748 song->current_event->a; | |
749 } | |
750 else | |
751 song->channel[song->current_event->channel].program = | |
752 song->current_event->a; | |
753 break; | |
754 | |
755 case ME_SUSTAIN: | |
756 song->channel[song->current_event->channel].sustain = | |
757 song->current_event->a; | |
758 if (!song->current_event->a) | |
759 drop_sustain(song); | |
760 break; | |
761 | |
762 case ME_RESET_CONTROLLERS: | |
763 reset_controllers(song, song->current_event->channel); | |
764 break; | |
765 | |
766 case ME_ALL_NOTES_OFF: | |
767 all_notes_off(song); | |
768 break; | |
769 | |
770 case ME_ALL_SOUNDS_OFF: | |
771 all_sounds_off(song); | |
772 break; | |
773 | |
774 case ME_TONE_BANK: | |
775 song->channel[song->current_event->channel].bank = | |
776 song->current_event->a; | |
777 break; | |
778 | |
779 case ME_EOT: | |
780 /* Give the last notes a couple of seconds to decay */ | |
781 SNDDBG(("Playing time: ~%d seconds\n", | |
782 song->current_sample/song->rate+2)); | |
783 SNDDBG(("Notes cut: %d\n", song->cut_notes)); | |
784 SNDDBG(("Notes lost totally: %d\n", song->lost_notes)); | |
785 song->playing = 0; | |
786 return (song->current_sample - start_sample) * bytes_per_sample; | |
787 } | |
788 song->current_event++; | |
789 } | |
790 if (song->current_event->time > end_sample) | |
791 compute_data(song, stream, end_sample-song->current_sample); | |
792 else | |
793 compute_data(song, stream, song->current_event->time-song->current_sample); | |
794 } | |
795 return samples * bytes_per_sample; | |
796 } | |
797 | |
798 void Timidity_SetVolume(MidiSong *song, int volume) | |
799 { | |
800 int i; | |
801 if (volume > MAX_AMPLIFICATION) | |
802 song->amplification = MAX_AMPLIFICATION; | |
803 else | |
804 if (volume < 0) | |
805 song->amplification = 0; | |
806 else | |
807 song->amplification = volume; | |
808 adjust_amplification(song); | |
809 for (i = 0; i < song->voices; i++) | |
810 if (song->voice[i].status != VOICE_FREE) | |
811 { | |
812 recompute_amp(song, i); | |
813 apply_envelope_to_amp(song, i); | |
814 } | |
815 } |