mirror of
https://github.com/pierre42100/comunic
synced 2025-06-23 10:35:18 +00:00
First commit
This commit is contained in:
BIN
tools/speaker/msie_flashFallback/as3/.DS_Store
vendored
Executable file
BIN
tools/speaker/msie_flashFallback/as3/.DS_Store
vendored
Executable file
Binary file not shown.
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/.DS_Store
vendored
Executable file
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/.DS_Store
vendored
Executable file
Binary file not shown.
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/.DS_Store
vendored
Executable file
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/.DS_Store
vendored
Executable file
Binary file not shown.
68
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/meSpeakFallback.as
Executable file
68
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/meSpeakFallback.as
Executable file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
meSpeakFallback.as
|
||||
An experimental fallback option for meSpeak to play wav files
|
||||
Wav-files are sent as a plain array of uint 8-bit data via ExternalInterface
|
||||
|
||||
JS interface:
|
||||
meSpeakFallback.play( <wav-array> );
|
||||
meSpeak.setVolume( value ) // 0 >= value <= 1
|
||||
|
||||
Handshake: calls JS function 'meSpeakFallbackHandshake()', when initialized and ready
|
||||
|
||||
Norbert Landsteiner, www.masswerk.at, July 2013
|
||||
*/
|
||||
package {
|
||||
import flash.display.Sprite;
|
||||
import flash.events.Event;
|
||||
import flash.external.ExternalInterface;
|
||||
import flash.media.Sound;
|
||||
import flash.media.SoundTransform;
|
||||
import flash.utils.ByteArray;
|
||||
import flash.utils.setTimeout;
|
||||
|
||||
import org.as3wavsound.WavSound;
|
||||
import org.as3wavsound.WavSoundChannel;
|
||||
|
||||
public class meSpeakFallback extends Sprite
|
||||
{
|
||||
private var sndTransform:SoundTransform=new SoundTransform(1, 0);
|
||||
|
||||
|
||||
public function meSpeakFallback()
|
||||
{
|
||||
initExtIF();
|
||||
}
|
||||
|
||||
private function initExtIF():void {
|
||||
var available:Boolean=false;
|
||||
try {
|
||||
if (ExternalInterface.available) {
|
||||
ExternalInterface.addCallback("play", play);
|
||||
ExternalInterface.addCallback("setVolume", setVolume);
|
||||
ExternalInterface.call("meSpeakFallbackHandshake");
|
||||
available=true;
|
||||
}
|
||||
}
|
||||
catch (e:Error) {}
|
||||
if (!available) setTimeout(initExtIF, 100);
|
||||
}
|
||||
|
||||
public function setVolume(v:Number=0):void {
|
||||
if (v>=0 && v<=1) sndTransform.volume=v;
|
||||
}
|
||||
|
||||
public function play(data:Array=null):void {
|
||||
if (!data) return;
|
||||
var l:uint=data.length;
|
||||
if (!l) return;
|
||||
// copy data to a ByteArray (oops: time, memory!)
|
||||
var ba:ByteArray=new ByteArray();
|
||||
ba.length=l;
|
||||
for (var i:uint=0; i<l; i++) ba.writeByte(data[i]);
|
||||
// play sound with global volume
|
||||
var snd:WavSound = new WavSound(ba);
|
||||
var chnl:WavSoundChannel=snd.play(0, 0, sndTransform);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/org/.DS_Store
vendored
Executable file
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/org/.DS_Store
vendored
Executable file
Binary file not shown.
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/org/as3wavsound/.DS_Store
vendored
Executable file
BIN
tools/speaker/msie_flashFallback/as3/meSpeakFallback/src/org/as3wavsound/.DS_Store
vendored
Executable file
Binary file not shown.
@ -0,0 +1,201 @@
|
||||
/*
|
||||
* --------------------------------------
|
||||
* Benny Bottema -- WavSound Sound adaption
|
||||
* http://blog.projectnibble.org/
|
||||
* --------------------------------------
|
||||
* sazameki -- audio manipulating library
|
||||
* http://sazameki.org/
|
||||
* --------------------------------------
|
||||
*
|
||||
* - developed by:
|
||||
* Benny Bottema
|
||||
* blog.projectnibble.org
|
||||
* hosted by:
|
||||
* Google Code (code.google.com)
|
||||
* code.google.com/p/as3wavsound/
|
||||
*
|
||||
* - audio library in its original state developed by:
|
||||
* Takaaki Yamazaki
|
||||
* www.zkdesign.jp
|
||||
* hosted by:
|
||||
* Spark project (www.libspark.org)
|
||||
* www.libspark.org/svn/as3/sazameki/branches/fp10/
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed under the MIT License
|
||||
*
|
||||
* Copyright (c) 2008 Takaaki Yamazaki
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
package org.as3wavsound {
|
||||
import flash.media.SoundTransform;
|
||||
import flash.utils.ByteArray;
|
||||
import org.as3wavsound.sazameki.core.AudioSamples;
|
||||
import org.as3wavsound.sazameki.core.AudioSetting;
|
||||
import org.as3wavsound.sazameki.format.wav.Wav;
|
||||
|
||||
/**
|
||||
* Sound extension that directly plays WAVE data. Also backwards compatible with
|
||||
* MP3's played through the load() function. This class acts as facade to loading,
|
||||
* extracting, decoding and playing wav sound data and represents a single sound.
|
||||
*
|
||||
* This class is analog to Adobe's Sound class and is designed to function the same
|
||||
* way.
|
||||
*
|
||||
* Usage:
|
||||
* Simply embed .wav files as you would mp3's and play with this Sound class.
|
||||
* Make sure you provide mimetype 'application/octet-stream' when embedding to
|
||||
* ensure Flash embeds the data as ByteArray.
|
||||
*
|
||||
* Example:
|
||||
* [Embed(source = "drumloop.wav", mimeType = "application/octet-stream")]
|
||||
* public const DrumLoop:Class;
|
||||
* public const rain:WavSound = new WavSound(new DrumLoop() as ByteArray);
|
||||
*
|
||||
*
|
||||
* @author Benny Bottema
|
||||
*/
|
||||
public class WavSound {
|
||||
|
||||
// the master Sound player, which mixes all playing WavSound samples on any given moment
|
||||
private static const player:WavSoundPlayer = new WavSoundPlayer();
|
||||
|
||||
// length of the original encoded wav data
|
||||
private var _bytesTotal:Number;
|
||||
// extracted sound data for mixing
|
||||
private var _samples:AudioSamples;
|
||||
// each sound can be configured to be played mono/stereo using AudioSetting
|
||||
private var _playbackSettings:AudioSetting;
|
||||
// calculated length of the entire sound in milliseconds, made global to avoid recalculating all the time
|
||||
private var _length:Number;
|
||||
|
||||
/**
|
||||
* Constructor: loads wavdata using load().
|
||||
*
|
||||
* loads WAVE data and decodes it into playable samples. Finally calculates
|
||||
* the length of the sound in milliseconds.
|
||||
*
|
||||
* @param wavData A ByteArray containing uncmopressed wav data.
|
||||
* @param audioSettings An optional playback configuration (mono/stereo,
|
||||
* sample rate and bit rate).
|
||||
*/
|
||||
public function WavSound(wavData:ByteArray, audioSettings:AudioSetting = null) {
|
||||
load(wavData, audioSettings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Key function: loads WAVE data and decodes it into playable samples.
|
||||
* Finally calculates the length of the sound in milliseconds.
|
||||
*
|
||||
* @param wavData The byte array that is the embedded .was file (octet-stream).
|
||||
* @param audioSettings Optional settings for playback (samplerate will enforced
|
||||
* if it differs from the .wav header data or header is missing).
|
||||
* @see Wav#decode(ByteArray)
|
||||
*/
|
||||
internal function load(wavData:ByteArray, audioSettings:AudioSetting = null): void {
|
||||
this._bytesTotal = wavData.length;
|
||||
this._samples = new Wav().decode(wavData, audioSettings);
|
||||
this._playbackSettings = (audioSettings != null) ? audioSettings : new AudioSetting();
|
||||
this._length = samples.length / samples.setting.sampleRate * 1000;
|
||||
}
|
||||
|
||||
/**
|
||||
* See Adobe's Sound.play(): http://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/flash/media/Sound.html#play().
|
||||
*
|
||||
* Playback function that performs the following tasks:
|
||||
*
|
||||
* - calculates the startingPhase, bases on startTime in ms.
|
||||
* - initializes loopsLeft variable
|
||||
* - adds the playing channel in combination with its originating WavSound to the playingWavSounds
|
||||
*
|
||||
* @param startTime The starting time in milliseconds, applies to each loop (as with regular MP3 Sounds).
|
||||
* @param loops The number of loops to take in *addition* to the default playback (loops == 2 means 3 playthroughs).
|
||||
* @param sndTransform An optional soundtransform to apply for playback that controls volume and panning.
|
||||
* @return The SoundChannel used for playing back the sound (and stopping the sound).
|
||||
*/
|
||||
public function play(startTime:Number = 0, loops:int = 0, sndTransform:SoundTransform = null): WavSoundChannel {
|
||||
return player.play(this, startTime, loops, sndTransform);
|
||||
}
|
||||
|
||||
/**
|
||||
* No idea if this works. Alpha state. Read up on Sound.extract():
|
||||
* http://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/flash/media/Sound.html#extract()
|
||||
*
|
||||
* Apparently, some people have used this succesfully, see comment 1 on Issue 11:
|
||||
* http://code.google.com/p/as3wavsound/issues/detail?id=11#c1
|
||||
*/
|
||||
public function extract(target:ByteArray, length:Number, startPosition:Number = -1): Number {
|
||||
var start:Number = Math.max(startPosition, 0);
|
||||
var end:Number = Math.min(length, samples.length);
|
||||
|
||||
for (var i:Number = start; i < end; i++) {
|
||||
target.writeFloat(samples.left[i]);
|
||||
if (samples.setting.channels == 2) {
|
||||
target.writeFloat(samples.right[i]);
|
||||
} else {
|
||||
target.writeFloat(samples.left[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return samples.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total bytes of the wavData a WavSound was created with.
|
||||
*
|
||||
* Note:
|
||||
* This function is probably legacy, since we're not extending Adobe's
|
||||
* Sound anymore (backwards compatibility was dropped in v0.7.
|
||||
*/
|
||||
public function get bytesLoaded () : uint {
|
||||
return _bytesTotal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total bytes of the wavData a WavSound was created with.
|
||||
*
|
||||
* Note:
|
||||
* This function is probably legacy, since we're not extending Adobe's
|
||||
* Sound anymore (backwards compatibility was dropped in v0.7.
|
||||
*/
|
||||
public function get bytesTotal () : int {
|
||||
return _bytesTotal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total length of the sound in milliseconds.
|
||||
*/
|
||||
public function get length() : Number {
|
||||
return _length;
|
||||
}
|
||||
|
||||
internal function get samples():AudioSamples {
|
||||
return _samples;
|
||||
}
|
||||
|
||||
/**
|
||||
* _playbackSettings is set when the load() function is called.
|
||||
*/
|
||||
internal function get playbackSettings():AudioSetting {
|
||||
return _playbackSettings;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,229 @@
|
||||
/*
|
||||
* --------------------------------------
|
||||
* Benny Bottema -- WavSound Sound adaption
|
||||
* http://blog.projectnibble.org/
|
||||
* --------------------------------------
|
||||
* sazameki -- audio manipulating library
|
||||
* http://sazameki.org/
|
||||
* --------------------------------------
|
||||
*
|
||||
* - developed by:
|
||||
* Benny Bottema
|
||||
* blog.projectnibble.org
|
||||
* hosted by:
|
||||
* Google Code (code.google.com)
|
||||
* code.google.com/p/as3wavsound/
|
||||
*
|
||||
* - audio library in its original state developed by:
|
||||
* Takaaki Yamazaki
|
||||
* www.zkdesign.jp
|
||||
* hosted by:
|
||||
* Spark project (www.libspark.org)
|
||||
* www.libspark.org/svn/as3/sazameki/branches/fp10/
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed under the MIT License
|
||||
*
|
||||
* Copyright (c) 2008 Takaaki Yamazaki
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
package org.as3wavsound {
|
||||
import flash.events.EventDispatcher;
|
||||
import flash.media.SoundChannel;
|
||||
import flash.events.Event;
|
||||
import flash.media.SoundTransform;
|
||||
import org.as3wavsound.sazameki.core.AudioSamples;
|
||||
import org.as3wavsound.WavSound;
|
||||
|
||||
/**
|
||||
* Used to keep track of open channels during playback. Each channel represents
|
||||
* an 'instance' of a sound and so each channel is responsible for its own mixing.
|
||||
*
|
||||
* The WavSound class uses the WavSoundPlayer to play instances of itself and
|
||||
* returns the WavSoundChannel returned by the player.
|
||||
*
|
||||
* Also, the WavSoundPlayer uses the buffer() function to make the playing WavSoundChannel
|
||||
* mix its own samples into the master buffer.
|
||||
*
|
||||
* Dispatches the Event.SOUND_COMPLETE event when the last sample has been mixed
|
||||
* into the master buffer.
|
||||
*
|
||||
* Also see buffer().
|
||||
*
|
||||
* @author Benny Bottema
|
||||
*/
|
||||
public class WavSoundChannel extends EventDispatcher {
|
||||
|
||||
/*
|
||||
* creation-time information
|
||||
*/
|
||||
|
||||
// The player to delegate play() stop() requests to.
|
||||
private var player:WavSoundPlayer;
|
||||
|
||||
// a WavSound currently playing back on this channel instance
|
||||
// (there can be mutliple instances with the same WavSound).
|
||||
private var _wavSound:WavSound;
|
||||
|
||||
// works the same as Adobe's SoundChannel.soundTransform
|
||||
private var _soundTransform:SoundTransform = new SoundTransform();
|
||||
|
||||
/*
|
||||
* play-time information *per WavSound instance*
|
||||
*/
|
||||
|
||||
// starting phase if not at the beginning, made global to avoid recalculating all the time
|
||||
private var startPhase:Number;
|
||||
// current phase of the sound, basically matches a single current sample frame for each WavSound
|
||||
private var phase:Number = 0;
|
||||
// the current avarage volume of samples buffered to the left audiochannel
|
||||
private var _leftPeak:Number = 0;
|
||||
// the current avarage volume of samples buffered to the right audiochannel
|
||||
private var _rightPeak:Number = 0;
|
||||
// how many loops we need to buffer
|
||||
private var loopsLeft:Number;
|
||||
// indicates if the phase has reached total sample count and no loops are left
|
||||
private var finished:Boolean;
|
||||
|
||||
/**
|
||||
* Constructor: pre-calculates starting phase (and performs some validation for this), see init().
|
||||
*/
|
||||
public function WavSoundChannel(player:WavSoundPlayer, wavSound:WavSound, startTime:Number, loops:int, soundTransform:SoundTransform) {
|
||||
this.player = player;
|
||||
this._wavSound = wavSound;
|
||||
if (soundTransform != null) {
|
||||
this._soundTransform = soundTransform;
|
||||
}
|
||||
init(startTime, loops);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates and validates the starting time. Starting time in milliseconds is converted into
|
||||
* sample position and then marked as starting phase.
|
||||
*
|
||||
* Also resets finished state and sets 'loopsLeft' equal to the given 'loops' value.
|
||||
*/
|
||||
internal function init(startTime:Number, loops:int):void {
|
||||
var startPositionInMillis:Number = Math.floor(startTime);
|
||||
var maxPositionInMillis:Number = Math.floor(_wavSound.length);
|
||||
if (startPositionInMillis > maxPositionInMillis) {
|
||||
throw new Error("startTime greater than sound's length, max startTime is " + maxPositionInMillis);
|
||||
}
|
||||
phase = startPhase = Math.floor(startPositionInMillis * _wavSound.samples.length / _wavSound.length);
|
||||
finished = false;
|
||||
loopsLeft = loops;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tells the WavsoundPlayer to stop this specific SoundWavChannel instance.
|
||||
*/
|
||||
public function stop():void {
|
||||
player.stop(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called from WavSoundPlayer when the player is ready to mix new samples into the master
|
||||
* sample buffer.
|
||||
*
|
||||
* Fills a target samplebuffer with (optionally transformed) samples from the current
|
||||
* WavSoundChannel instance.
|
||||
*
|
||||
* Keeps filling the buffer until the last samples are buffered or until the buffersize is
|
||||
* reached. When the buffer is full, phase and loopsLeft keep track of how which samples
|
||||
* still need to be buffered in the next buffering cycle (when this method is called again).
|
||||
*
|
||||
* @param sampleBuffer The target buffer to mix in the current (transformed) samples.
|
||||
* @param soundTransform The soundtransform that belongs to a single channel being played
|
||||
* (containing volume, panning etc.).
|
||||
*/
|
||||
internal function buffer(sampleBuffer:AudioSamples):void {
|
||||
// calculate volume and panning
|
||||
var volume: Number = (_soundTransform.volume / 1);
|
||||
var volumeLeft: Number = volume * (1 - _soundTransform.pan) / 2;
|
||||
var volumeRight: Number = volume * (1 + _soundTransform.pan) / 2;
|
||||
// channel settings
|
||||
var needRightChannel:Boolean = _wavSound.playbackSettings.channels == 2;
|
||||
var hasRightChannel:Boolean = _wavSound.samples.setting.channels == 2;
|
||||
|
||||
// extra references to avoid excessive getter calls in the following
|
||||
// for-loop (it appeares CPU is being hogged otherwise)
|
||||
var samplesLength:Number = _wavSound.samples.length;
|
||||
var samplesLeft:Vector.<Number> = _wavSound.samples.left;
|
||||
var samplesRight:Vector.<Number> = _wavSound.samples.right;
|
||||
var sampleBufferLength:Number = sampleBuffer.length;
|
||||
var sampleBufferLeft:Vector.<Number> = sampleBuffer.left;
|
||||
var sampleBufferRight:Vector.<Number> = sampleBuffer.right;
|
||||
|
||||
var leftPeakRecord:Number = 0;
|
||||
var rightPeakRecord:Number = 0;
|
||||
|
||||
// finally, mix the samples in the master sample buffer
|
||||
if (!finished) {
|
||||
for (var i:int = 0; i < sampleBufferLength; i++) {
|
||||
if (!finished) {
|
||||
// write (transformed) samples to buffer
|
||||
var sampleLeft:Number = samplesLeft[phase] * volumeLeft;
|
||||
sampleBufferLeft[i] += sampleLeft;
|
||||
leftPeakRecord += sampleLeft;
|
||||
var channelValue:Number = ((needRightChannel && hasRightChannel) ? samplesRight[phase] : samplesLeft[phase]);
|
||||
var sampleRight:Number = channelValue * volumeRight;
|
||||
sampleBufferRight[i] += sampleRight;
|
||||
rightPeakRecord += sampleRight;
|
||||
|
||||
// check playing and looping state
|
||||
if (++phase >= samplesLength) {
|
||||
phase = startPhase;
|
||||
finished = loopsLeft-- == 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (finished) {
|
||||
dispatchEvent(new Event(Event.SOUND_COMPLETE));
|
||||
}
|
||||
}
|
||||
|
||||
_leftPeak = leftPeakRecord / sampleBufferLength;
|
||||
_rightPeak = rightPeakRecord / sampleBufferLength
|
||||
}
|
||||
|
||||
public function get leftPeak(): Number {
|
||||
return _leftPeak;
|
||||
}
|
||||
|
||||
public function get rightPeak(): Number {
|
||||
return _rightPeak;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current position in milliseconds:
|
||||
*
|
||||
* phase * wavSound.length / wavSound.samples.length
|
||||
*/
|
||||
public function get position(): Number {
|
||||
return phase * _wavSound.length / _wavSound.samples.length;
|
||||
}
|
||||
|
||||
public function get soundTransform():SoundTransform {
|
||||
return _soundTransform;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* --------------------------------------
|
||||
* Benny Bottema -- WavSound Sound adaption
|
||||
* http://blog.projectnibble.org/
|
||||
* --------------------------------------
|
||||
* sazameki -- audio manipulating library
|
||||
* http://sazameki.org/
|
||||
* --------------------------------------
|
||||
*
|
||||
* - developed by:
|
||||
* Benny Bottema
|
||||
* blog.projectnibble.org
|
||||
* hosted by:
|
||||
* Google Code (code.google.com)
|
||||
* code.google.com/p/as3wavsound/
|
||||
*
|
||||
* - audio library in its original state developed by:
|
||||
* Takaaki Yamazaki
|
||||
* www.zkdesign.jp
|
||||
* hosted by:
|
||||
* Spark project (www.libspark.org)
|
||||
* www.libspark.org/svn/as3/sazameki/branches/fp10/
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed under the MIT License
|
||||
*
|
||||
* Copyright (c) 2008 Takaaki Yamazaki
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
package org.as3wavsound {
|
||||
import flash.events.SampleDataEvent;
|
||||
import flash.media.Sound;
|
||||
import flash.media.SoundChannel;
|
||||
import flash.media.SoundTransform;
|
||||
import flash.net.URLRequest;
|
||||
import flash.utils.ByteArray;
|
||||
import org.as3wavsound.sazameki.core.AudioSamples;
|
||||
import org.as3wavsound.sazameki.core.AudioSetting;
|
||||
import org.as3wavsound.WavSoundChannel;
|
||||
|
||||
/**
|
||||
* This player is used by WavSound instances to relay play() calls to and
|
||||
* return the resulting WavSoundChannel instances.
|
||||
*
|
||||
* This player is used by WavSoundChannel instances to relay stop() calls to.
|
||||
*
|
||||
* This player contains a single Sound object which acts as the master buffer in which
|
||||
* all playing sounds are mixed to. This is done to reduce cpu / memory footprint. The
|
||||
* player will loop through all playing WavSoundChannel instances and call
|
||||
* buffer(masterSampleBuffer) function on each, before writing the end result to the
|
||||
* sound card's outputstream.
|
||||
*
|
||||
* @author Benny Bottema
|
||||
*/
|
||||
internal class WavSoundPlayer {
|
||||
// The size of the master sample buffer used for playback.
|
||||
// Too small: the sound will have a jittery playback.
|
||||
// Too big: the sound will have high latencies (loading, stopping, playing, etc.).
|
||||
public static var MAX_BUFFERSIZE:Number = 8192;
|
||||
|
||||
// the master samples buffer in which all seperate Wavsounds are mixed into, always stereo at 44100Hz and bitrate 16
|
||||
private const sampleBuffer:AudioSamples = new AudioSamples(new AudioSetting(), MAX_BUFFERSIZE);
|
||||
// a list of all WavSound currenctly in playing mode
|
||||
private const playingWavSounds:Vector.<WavSoundChannel> = new Vector.<WavSoundChannel>();
|
||||
|
||||
// the singular playback Sound with which all other WavSounds are played back
|
||||
private const player:Sound = configurePlayer();
|
||||
|
||||
/**
|
||||
* Static initializer: creates, configures and a sound player using the 'sample
|
||||
* data event technique'. Until play() has been called on a WavSound, nothing is
|
||||
* audible, because playingWavSounds will still be empty.
|
||||
*
|
||||
* Also see: http://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/flash/events/SampleDataEvent.html
|
||||
*/
|
||||
private function configurePlayer():Sound {
|
||||
var player:Sound = new Sound();
|
||||
player.addEventListener(SampleDataEvent.SAMPLE_DATA, onSamplesCallback);
|
||||
player.play();
|
||||
return player;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates WavSoundChannel and adds it to the list of currently playing channels
|
||||
* (which are mixed into the master sample buffer).
|
||||
*
|
||||
* This function is called by WavSound instances which returns the new WavSoundChannel
|
||||
* instance to the user.
|
||||
*/
|
||||
internal function play(sound:WavSound, startTime:Number, loops:int, sndTransform:SoundTransform):WavSoundChannel {
|
||||
var channel:WavSoundChannel = new WavSoundChannel(this, sound, startTime, loops, sndTransform);
|
||||
playingWavSounds.push(channel);
|
||||
return channel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a specific currently playing channel, so that its samples won't be
|
||||
* mixed to the master sample buffer anymore and therefor playback will stop.
|
||||
*/
|
||||
internal function stop(channel:WavSoundChannel):void {
|
||||
for each (var playingWavSound:WavSoundChannel in playingWavSounds) {
|
||||
if (playingWavSound == channel) {
|
||||
playingWavSounds.splice(playingWavSounds.lastIndexOf(playingWavSound), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The heartbeat of the WavSound approach, invoked by the master Sound object.
|
||||
*
|
||||
* This function handles the SampleDataEvent to mix all playing sounds in playingWavSounds
|
||||
* into the Sound's buffer. For each playing WavSoundChannel instance, the player will call
|
||||
* the channel's buffer() function to have it mix itself into the master sample buffer.
|
||||
* Finally, the resulting master buffer is written to the event's output stream.
|
||||
*
|
||||
* Also see: http://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/flash/events/SampleDataEvent.html
|
||||
*
|
||||
* @param event Contains the soundcard outputstream to mix sound samples into.
|
||||
*/
|
||||
private function onSamplesCallback(event:SampleDataEvent):void {
|
||||
// clear the buffer
|
||||
sampleBuffer.clearSamples();
|
||||
// have all channels mix their into the master sample buffer
|
||||
for each (var playingWavSound:WavSoundChannel in playingWavSounds) {
|
||||
playingWavSound.buffer(sampleBuffer);
|
||||
}
|
||||
|
||||
// extra references to avoid excessive getter calls in the following
|
||||
// for-loop (it appeares CPU is being hogged otherwise)
|
||||
var outputStream:ByteArray = event.data;
|
||||
var samplesLength:Number = sampleBuffer.length;
|
||||
var samplesLeft:Vector.<Number> = sampleBuffer.left;
|
||||
var samplesRight:Vector.<Number> = sampleBuffer.right;
|
||||
|
||||
// write all mixed samples to the sound's outputstream
|
||||
for (var i:int = 0; i < samplesLength; i++) {
|
||||
outputStream.writeFloat(samplesLeft[i]);
|
||||
outputStream.writeFloat(samplesRight[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
package org.as3wavsound.sazameki.core {
|
||||
|
||||
/**
|
||||
* Contains lists of samples -left and optionally right- decoded from a
|
||||
* WAVE ByteArray or manually mixed samples.
|
||||
*
|
||||
* Also contains a reference to an AudioSetting instance associated by
|
||||
* this samples container.
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class AudioSamples {
|
||||
public var _left:Vector.<Number>;
|
||||
public var _right:Vector.<Number>;
|
||||
private var _setting:AudioSetting;
|
||||
|
||||
/**
|
||||
* @param length Can be zero when decoding WAVE data, or a fixed buffer
|
||||
* size when mixing to a Sound's outputstream.
|
||||
*/
|
||||
public function AudioSamples(setting:AudioSetting, length:Number = 0) {
|
||||
this._setting = setting;
|
||||
this._left = new Vector.<Number>(length, length > 0);
|
||||
if (setting.channels == 2) {
|
||||
this._right = new Vector.<Number>(length, length > 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Always resets length to its former state. Don't call this after creating
|
||||
* an instance of AudioSamples, or its length is always zero.
|
||||
*/
|
||||
public function clearSamples():void {
|
||||
_left = new Vector.<Number>(length, true);
|
||||
if (setting.channels == 2) {
|
||||
_right = new Vector.<Number>(length, true);
|
||||
}
|
||||
}
|
||||
|
||||
public function get length():int {
|
||||
return left.length;
|
||||
}
|
||||
|
||||
public function get setting():AudioSetting {
|
||||
return _setting;
|
||||
}
|
||||
|
||||
public function get left():Vector.<Number> {
|
||||
return _left;
|
||||
}
|
||||
|
||||
public function get right():Vector.<Number> {
|
||||
return _right;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
package org.as3wavsound.sazameki.core {
|
||||
|
||||
/**
|
||||
* Contains a sound's playback configuration, such as mono / stereo,
|
||||
* sample rate and bit rate.
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class AudioSetting {
|
||||
// 1 or 2
|
||||
private var _channels:uint;
|
||||
// 11025, 22050 or 44100
|
||||
private var _sampleRate:uint;
|
||||
// 8 or 16
|
||||
private var _bitRate:uint;
|
||||
|
||||
/**
|
||||
* Constructor: performs some validations on the values being passed in.
|
||||
*/
|
||||
public function AudioSetting(channels:uint = 2, sampleRate:uint = 44100, bitRate:uint = 16) {
|
||||
if (sampleRate != 44100 && sampleRate != 22050 && sampleRate != 11025) {
|
||||
throw new Error("bad sample rate. sample rate must be 44100, 22050 or 11025");
|
||||
}
|
||||
if (channels != 1 && channels != 2) {
|
||||
throw new Error("channels must be 1 or 2");
|
||||
}
|
||||
|
||||
if (bitRate != 16 && bitRate != 8) {
|
||||
throw new Error("bitRate must be 8 or 16");
|
||||
}
|
||||
|
||||
_channels=channels;
|
||||
_sampleRate=sampleRate;
|
||||
_bitRate=bitRate;
|
||||
}
|
||||
|
||||
public function get channels():uint{
|
||||
return _channels;
|
||||
}
|
||||
|
||||
public function get sampleRate():uint{
|
||||
return _sampleRate;
|
||||
}
|
||||
|
||||
public function get bitRate():uint{
|
||||
return _bitRate;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
package org.as3wavsound.sazameki.format.riff {
|
||||
import flash.utils.ByteArray;
|
||||
import flash.utils.Endian;
|
||||
|
||||
/**
|
||||
* RIFF Chunk class
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class Chunk {
|
||||
protected const ENDIAN:String = Endian.LITTLE_ENDIAN;
|
||||
protected var _id:String;
|
||||
|
||||
public function Chunk(id:String) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public function set id(value:String):void {
|
||||
if (value.length > 4) {
|
||||
value = value.substr(0, 4);
|
||||
} else if (value.length < 4) {
|
||||
while (value.length < 4) {
|
||||
value += " ";
|
||||
}
|
||||
}
|
||||
_id = value;
|
||||
}
|
||||
|
||||
public function get id():String {
|
||||
return _id;
|
||||
}
|
||||
|
||||
public function toByteArray():ByteArray {
|
||||
var result:ByteArray = new ByteArray();
|
||||
result.endian = ENDIAN;
|
||||
result.writeUTFBytes(_id);
|
||||
var data:ByteArray = encodeData();
|
||||
result.writeUnsignedInt(data.length);
|
||||
result.writeBytes(data);
|
||||
return result;
|
||||
}
|
||||
|
||||
protected function encodeData():ByteArray {
|
||||
throw new Error("'encodeData()' method must be overriden");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
package org.as3wavsound.sazameki.format.riff {
|
||||
import flash.utils.ByteArray;
|
||||
|
||||
/**
|
||||
* ...
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class LIST extends Chunk {
|
||||
protected var _type:String;
|
||||
protected var _chunks:Vector.<Chunk>;
|
||||
|
||||
public function LIST(type:String) {
|
||||
this.type = type;
|
||||
super("LIST");
|
||||
}
|
||||
|
||||
public function set type(value:String):void {
|
||||
if (value.length > 4) {
|
||||
value = value.substr(0, 4);
|
||||
} else if (value.length < 4) {
|
||||
while (value.length < 4) {
|
||||
value += " ";
|
||||
}
|
||||
}
|
||||
_type = value;
|
||||
}
|
||||
|
||||
public function get type():String {
|
||||
return _type;
|
||||
}
|
||||
|
||||
override protected function encodeData():ByteArray {
|
||||
var result:ByteArray = new ByteArray();
|
||||
result.writeUTFBytes(_type);
|
||||
for (var i:int = 0; i < _chunks.length; i++) {
|
||||
result.writeBytes(_chunks[i].toByteArray());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
protected function splitList(bytes:ByteArray):Object {
|
||||
var obj:Object = new Object();
|
||||
bytes.position = 0;
|
||||
bytes.endian = ENDIAN;
|
||||
|
||||
if (bytes.readUTFBytes(4) == 'RIFF') {
|
||||
bytes.readInt();
|
||||
bytes.readUTFBytes(4);//type
|
||||
} else {
|
||||
bytes.position = 0;
|
||||
}
|
||||
|
||||
while (bytes.position < bytes.length) {
|
||||
var currentName:String = bytes.readUTFBytes(4);
|
||||
var current:int = bytes.readInt();
|
||||
|
||||
if (currentName == 'LIST') {
|
||||
currentName = bytes.readUTFBytes(4);
|
||||
current -= 4;
|
||||
}
|
||||
|
||||
var tmpByte:ByteArray = new ByteArray();
|
||||
bytes.readBytes(tmpByte, 0, current);
|
||||
|
||||
if (current % 2 == 1) {
|
||||
bytes.readByte();
|
||||
}
|
||||
obj[currentName] = tmpByte;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
package org.as3wavsound.sazameki.format.riff {
|
||||
|
||||
/**
|
||||
* ...
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class RIFF extends LIST {
|
||||
public function RIFF(type:String) {
|
||||
super(type);
|
||||
id = 'RIFF';
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,106 @@
|
||||
package org.as3wavsound.sazameki.format.wav {
|
||||
import org.as3wavsound.sazameki.core.AudioSamples;
|
||||
import org.as3wavsound.sazameki.core.AudioSetting;
|
||||
import org.as3wavsound.sazameki.format.riff.Chunk;
|
||||
import org.as3wavsound.sazameki.format.riff.RIFF;
|
||||
import org.as3wavsound.sazameki.format.wav.chunk.WavdataChunk;
|
||||
import org.as3wavsound.sazameki.format.wav.chunk.WavfmtChunk;
|
||||
|
||||
import flash.utils.ByteArray;
|
||||
|
||||
/**
|
||||
* The WAVE decoder used for playing back wav files.
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class Wav extends RIFF {
|
||||
|
||||
public function Wav() {
|
||||
super('WAVE');
|
||||
}
|
||||
|
||||
public function encode(samples:AudioSamples):ByteArray {
|
||||
var fmt:WavfmtChunk = new WavfmtChunk();
|
||||
var data:WavdataChunk = new WavdataChunk();
|
||||
|
||||
_chunks = new Vector.<Chunk>;
|
||||
_chunks.push(fmt);
|
||||
_chunks.push(data);
|
||||
|
||||
data.setAudioData(samples);
|
||||
fmt.setSetting(samples.setting);
|
||||
|
||||
return toByteArray();
|
||||
}
|
||||
|
||||
public function decode(wavData:ByteArray, setting:AudioSetting):AudioSamples {
|
||||
var obj:Object = splitList(wavData);
|
||||
var data:AudioSamples;
|
||||
|
||||
var relevantSetting:AudioSetting = setting;
|
||||
if (relevantSetting == null && obj['fmt ']) {
|
||||
relevantSetting = new WavfmtChunk().decodeData(obj['fmt '] as ByteArray);
|
||||
}
|
||||
|
||||
if (obj['fmt '] && obj['data']) {
|
||||
data = new WavdataChunk().decodeData(obj['data'] as ByteArray, relevantSetting);
|
||||
} else {
|
||||
data = new WavdataChunk().decodeData(wavData, relevantSetting);
|
||||
}
|
||||
|
||||
var needsResampling:Boolean = relevantSetting != null && relevantSetting.sampleRate != 44100;
|
||||
return (needsResampling) ? resampleAudioSamples(data, relevantSetting.sampleRate) : data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resamples the given audio samples from a given sample rate to a target sample rate (or default 44100).
|
||||
*
|
||||
* @author Simion Medvedi (medvedisimion@gmail.com)
|
||||
* @author Benny Bottema (sanitized code and added support for stereo resampling)
|
||||
*/
|
||||
private function resampleAudioSamples(data:AudioSamples, sourceRate:int, targetRate:int = 44100):AudioSamples {
|
||||
var newSize:int = data.length * targetRate / sourceRate;
|
||||
var newData:AudioSamples = new AudioSamples(new AudioSetting(data.setting.channels, targetRate, 16), newSize);
|
||||
|
||||
resampleSamples(data.left, newData.left, newSize, sourceRate, targetRate);
|
||||
// playback buffering in WavSoundChannel will take care of a possibly missing right channel
|
||||
if (data.setting.channels == 2) {
|
||||
resampleSamples(data.right, newData.right, newSize, sourceRate, targetRate);
|
||||
}
|
||||
|
||||
return newData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resamples the given audio samples from a given sample rate to a target sample rate (or default 44100).
|
||||
*
|
||||
* @author Simion Medvedi (medvedisimion@gmail.com)
|
||||
* @author Benny Bottema (sanitized code)
|
||||
*/
|
||||
private function resampleSamples(sourceSamples:Vector.<Number>, targetSamples:Vector.<Number>, newSize:int, sourceRate:int, targetRate:int = 44100):void {
|
||||
// we need to expand the sample rate from whatever it is to targetRate Khz. This code
|
||||
// is assuming that the sample rate will be < targetRate Khz.
|
||||
var multiplier:Number = targetRate / sourceRate;
|
||||
|
||||
// convert the data
|
||||
var measure:int = targetRate;
|
||||
var sourceIndex:int = 0;
|
||||
var targetIndex:int = 0;
|
||||
|
||||
while (targetIndex < newSize) {
|
||||
if (measure >= sourceRate) {
|
||||
var increment:Number = 0;
|
||||
if (targetIndex > 0 && sourceIndex < sourceSamples.length - 1) {
|
||||
increment = (sourceSamples[sourceIndex + 1] - sourceSamples[sourceIndex]) / multiplier;
|
||||
}
|
||||
targetSamples[targetIndex++] = sourceSamples[sourceIndex] + increment;
|
||||
measure -= sourceRate;
|
||||
} else {
|
||||
sourceIndex++;
|
||||
measure += targetRate;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,126 @@
|
||||
package org.as3wavsound.sazameki.format.wav.chunk {
|
||||
import flash.utils.ByteArray;
|
||||
import org.as3wavsound.sazameki.core.AudioSamples;
|
||||
import org.as3wavsound.sazameki.core.AudioSetting;
|
||||
import org.as3wavsound.sazameki.format.riff.Chunk;
|
||||
|
||||
/**
|
||||
* ...
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class WavdataChunk extends Chunk {
|
||||
private var _samples:AudioSamples;
|
||||
|
||||
public function WavdataChunk() {
|
||||
super('data');
|
||||
}
|
||||
|
||||
public function setAudioData(samples:AudioSamples):void {
|
||||
_samples = samples;
|
||||
}
|
||||
|
||||
override protected function encodeData():ByteArray {
|
||||
var bytes:ByteArray = new ByteArray();
|
||||
bytes.endian = ENDIAN;
|
||||
|
||||
var setting:AudioSetting = _samples.setting;
|
||||
var i:int;
|
||||
var sig:Number;
|
||||
var len:int = _samples.left.length;
|
||||
var left:Vector.<Number>;
|
||||
|
||||
if (setting.channels == 2) {
|
||||
left=_samples.left;
|
||||
var right:Vector.<Number>=_samples.right;
|
||||
|
||||
if (setting.bitRate == 16) {
|
||||
for (i = 0; i < len; i++) {
|
||||
sig = left[i];
|
||||
if (sig < -1) bytes.writeShort( -32767);
|
||||
else if (sig > 1) bytes.writeShort( 32767);
|
||||
else bytes.writeShort(sig * 32767);
|
||||
|
||||
sig = right[i];
|
||||
if (sig < -1) bytes.writeShort(-32767);
|
||||
else if (sig > 1) bytes.writeShort(32767);
|
||||
else bytes.writeShort(sig * 32767);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < len; i++) {
|
||||
sig = left[i];
|
||||
if (sig<-1) bytes.writeByte(0);
|
||||
else if (sig>1) bytes.writeByte(255);
|
||||
else bytes.writeByte(sig*127+128);
|
||||
|
||||
sig = right[i];
|
||||
if (sig<-1) bytes.writeByte(0);
|
||||
else if (sig>1) bytes.writeByte(255);
|
||||
else bytes.writeByte(sig*127+128);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
left = _samples.left;
|
||||
|
||||
if (setting.bitRate == 16) {
|
||||
for (i = 0; i < len; i++) {
|
||||
sig = left[i];
|
||||
if (sig < -1) bytes.writeShort(-32767);
|
||||
else if (sig > 1) bytes.writeShort(32767);
|
||||
else bytes.writeShort(sig * 32768);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < len; i++) {
|
||||
sig = left[i];
|
||||
if (sig<-1) bytes.writeByte(0);
|
||||
else if (sig>1) bytes.writeByte(255);
|
||||
else bytes.writeByte(sig*127+128);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
public function decodeData(bytes:ByteArray, setting:AudioSetting):AudioSamples {
|
||||
bytes.position = 0;
|
||||
bytes.endian = ENDIAN;
|
||||
|
||||
var samples:AudioSamples = new AudioSamples(setting);
|
||||
var length:int = bytes.length / (setting.bitRate / 8) / setting.channels;
|
||||
var i:int;
|
||||
var left:Vector.<Number>;
|
||||
|
||||
if (setting.channels == 2) {
|
||||
left = samples.left;
|
||||
var right:Vector.<Number> = samples.right;
|
||||
if (setting.bitRate == 16) {
|
||||
for (i = 0; i < length; ++i) {
|
||||
left[i] = bytes.readShort() / 32767;
|
||||
right[i] = bytes.readShort() / 32767;
|
||||
}
|
||||
|
||||
} else {
|
||||
for (i = 0; i < length; i++)
|
||||
{
|
||||
left[i] = bytes.readByte() / 255;
|
||||
right[i] = bytes.readByte() / 255;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
left = samples.left;
|
||||
if (setting.bitRate == 16) {
|
||||
for (i = 0; i < length; i++) {
|
||||
left[i] = bytes.readShort() / 32767;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < length; i++) {
|
||||
left[i] = bytes.readByte() / 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
return samples;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
package org.as3wavsound.sazameki.format.wav.chunk {
|
||||
import flash.utils.ByteArray;
|
||||
import org.as3wavsound.sazameki.core.AudioSetting;
|
||||
import org.as3wavsound.sazameki.format.riff.Chunk;
|
||||
|
||||
/**
|
||||
* ...
|
||||
*
|
||||
* @author Takaaki Yamazaki(zk design),
|
||||
* @author Benny Bottema (modified, optimized and cleaned up code)
|
||||
*/
|
||||
public class WavfmtChunk extends Chunk {
|
||||
private var _setting:AudioSetting;
|
||||
|
||||
public function WavfmtChunk() {
|
||||
super('fmt ');
|
||||
}
|
||||
|
||||
public function setSetting(setting:AudioSetting):void {
|
||||
_setting = setting;
|
||||
}
|
||||
|
||||
override protected function encodeData():ByteArray {
|
||||
var result:ByteArray = new ByteArray();
|
||||
result.endian = ENDIAN;
|
||||
|
||||
//fmt ID(2)
|
||||
result.writeShort(1);
|
||||
//channels(2)
|
||||
result.writeShort(_setting.channels);
|
||||
//sampling rate(4)
|
||||
result.writeInt(_setting.sampleRate);
|
||||
//data rate(4)
|
||||
result.writeInt(_setting.sampleRate * _setting.channels * (_setting.bitRate / 8));
|
||||
//block size(2)
|
||||
result.writeShort((_setting.bitRate / 8) * _setting.channels);
|
||||
//bit rate(2)
|
||||
result.writeShort(_setting.bitRate);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public function decodeData(bytes:ByteArray):AudioSetting {
|
||||
bytes.position = 0;
|
||||
bytes.endian = ENDIAN;
|
||||
bytes.readShort();
|
||||
var channels:int = bytes.readShort();
|
||||
var smplRate:int = bytes.readInt();
|
||||
bytes.readInt();
|
||||
bytes.readShort();
|
||||
var bit:int = bytes.readShort();
|
||||
_setting = new AudioSetting(channels, smplRate, bit);
|
||||
return _setting;
|
||||
}
|
||||
}
|
||||
}
|
228
tools/speaker/msie_flashFallback/flashFallback.js
Executable file
228
tools/speaker/msie_flashFallback/flashFallback.js
Executable file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
A fallback to flash for wav-output (for IE 10)
|
||||
Please mind that wav data has to be copied to an ArrayBuffer object internally,
|
||||
since we may not send binary data to the swf.
|
||||
This may take some time and memory for longer utterances.
|
||||
*/
|
||||
|
||||
var meSpeakFlashFallback = new function() {
|
||||
|
||||
var swfDefaultId='meSpeakFallback',
|
||||
swfDefaultUrl='meSpeakFallback.swf',
|
||||
swfElementId='', swfViaAX=false, swfInstalled=false, swfHasLoaded=false, swfVol=1;
|
||||
|
||||
// public
|
||||
|
||||
function swfInstallFallback(swfUrl, swfId, parentElementOrId) {
|
||||
var parentEl, url;
|
||||
if (swfInstalled) return true;
|
||||
if (!swfIsAvailable(10)) return false;
|
||||
swfInstalled=true;
|
||||
// set defaults
|
||||
swfElementId = (swfId && typeof swfId == 'string')? swfId:swfDefaultId;
|
||||
url = (swfUrl && typeof swfUrl == 'string')? swfUrl:swfDefaultUrl;
|
||||
if (parentElementOrId) {
|
||||
if (typeof parentElementOrId == 'string') {
|
||||
parentEl=document.getElementById(parentElementOrId);
|
||||
}
|
||||
else if (typeof parentElementOrId == 'object') {
|
||||
parentEl=parentElementOrId=null;
|
||||
}
|
||||
}
|
||||
if (!parentEl) parentEl=document.getElementsByTagName('body')[0];
|
||||
if (!parentEl) return false;
|
||||
// inject
|
||||
var obj = swfCreate(
|
||||
{
|
||||
'data': url,
|
||||
'width': '2',
|
||||
'height': '2',
|
||||
'id': swfElementId,
|
||||
'name': swfElementId,
|
||||
'align': 'top'
|
||||
},
|
||||
{
|
||||
'quality': 'low',
|
||||
'bgcolor': 'transparent',
|
||||
'allowscriptaccess': 'sameDomain',
|
||||
'allowfullscreen': 'false'
|
||||
}
|
||||
);
|
||||
parentEl.appendChild(obj);
|
||||
swfRegisterUnloadHandler();
|
||||
return true;
|
||||
}
|
||||
|
||||
function swfReady() {
|
||||
return swfHasLoaded;
|
||||
}
|
||||
|
||||
function swfSetVolume(v) {
|
||||
if (wfHasLoaded) {
|
||||
var obj=document.getElementById(swfElementId);
|
||||
if (obj) el.setVolume(v);
|
||||
}
|
||||
swfVol=v;
|
||||
}
|
||||
|
||||
function swfSpeak(txt, options) {
|
||||
if (swfHasLoaded && window.meSpeak) {
|
||||
var obj=document.getElementById(swfElementId);
|
||||
if (obj) {
|
||||
if (!typeof options != 'object') options={};
|
||||
options.rawdata='array';
|
||||
obj.play( meSpeak.speak(txt, options) );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function swf10Available() {
|
||||
return swfIsAvailable(10);
|
||||
}
|
||||
|
||||
function swfFallbackHandshake() {
|
||||
swfHasLoaded=true;
|
||||
if (swfVol!=1) swfSetVolume(swfVol);
|
||||
if (window.console) console.log('meSpeak-SWF-fallback available.');
|
||||
}
|
||||
|
||||
|
||||
// private: a stripped-down version of swfobject.js
|
||||
|
||||
function swfIsAvailable(leastMajorVersion) {
|
||||
// returns Boolean: flashplayer and version at least 10.x
|
||||
var sf='Shockwave Flash', sfm='application/x-shockwave-flash';
|
||||
if (navigator.plugins !== undefined && typeof navigator.plugins[sf] == 'object') {
|
||||
var d=navigator.plugins[sf].description;
|
||||
if (d && !(typeof navigator.mimeTypes !==undefined && navigator.mimeTypes[sfm] && !navigator.mimeTypes[sfm].enabledPlugin)) {
|
||||
d=d.replace(/^.*\s+(\S+\s+\S+$)/, '$1');
|
||||
if (leastMajorVersion<= parseInt(d.replace(/^(.*)\..*$/, '$1'), 10)) return true;
|
||||
}
|
||||
}
|
||||
else if (window.ActiveXObject) {
|
||||
try {
|
||||
var a=new ActiveXObject('ShockwaveFlash.ShockwaveFlash');
|
||||
if (a) {
|
||||
swfViaAX=true;
|
||||
d=a.GetVariable('$version');
|
||||
if (d) {
|
||||
d=d.split(' ')[1].split(',');
|
||||
if (leastMajorVersion<= parseInt(d[0], 10)) return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch(e) {}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function swfCreate(attributes, params) {
|
||||
if (swfViaAX) {
|
||||
var att='', par='', i;
|
||||
for (i in attributes) {
|
||||
var a=i.toLowerCase;
|
||||
if (a=='data') {
|
||||
params.movie=attributes[i];
|
||||
}
|
||||
else if (a=='styleclass') {
|
||||
att+=' class="'+attributes[i]+'"';
|
||||
}
|
||||
else if (a!='classid') {
|
||||
att+=' '+i+'="'+attributes[i]+'"';
|
||||
}
|
||||
}
|
||||
for (i in params) {
|
||||
if (params[i] != Object.prototype[i]) par+=' <param name="'+i+'" value="'+params[i]+'" />';
|
||||
}
|
||||
var el=document.createElement('div');
|
||||
el.outerHTML='<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"'+att+'>'+par+'</object>';
|
||||
return el;
|
||||
}
|
||||
else {
|
||||
var o=document.createElement('object');
|
||||
o.setAttribute('type', 'application/x-shockwave-flash');
|
||||
for (var i in attributes) {
|
||||
if (attributes[i] != Object.prototype[i]) {
|
||||
var a=i.toLowerCase();
|
||||
if (a=='styleclass') {
|
||||
o.setAttribute('class', attributes[i]);
|
||||
}
|
||||
else if (a!='styleclass') {
|
||||
o.setAttribute(i, attributes[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i in params) {
|
||||
if (attributes[i] != Object.prototype[i] && i.toLowerCase() != 'movie') {
|
||||
var p=document.createElement('param');
|
||||
p.setAttribute('name', i);
|
||||
p.setAttribute('value', attributes[i]);
|
||||
o.appendChild(p);
|
||||
}
|
||||
}
|
||||
return o;
|
||||
}
|
||||
}
|
||||
|
||||
function swfRemove(obj) {
|
||||
try {
|
||||
if (typeof obj =='string') obj=document.getElementById(obj);
|
||||
if (!obj || typeof obj !='object') return;
|
||||
if (swfViaAX) {
|
||||
obj.style.display='none';
|
||||
swfRemoveObjectInIE(obj.id);
|
||||
}
|
||||
else if (obj.parentNode) {
|
||||
obj.parentNode.removeChild(obj);
|
||||
}
|
||||
swfInstalled=false;
|
||||
}
|
||||
catch(e) {}
|
||||
}
|
||||
|
||||
function swfRemoveObjectInIE(id) {
|
||||
var obj=document.getElementById(obj);
|
||||
if (obj) {
|
||||
if (obj.readyState==4) {
|
||||
for (var i in obj) {
|
||||
if (typeof obj[i] =='function') obj[i] = null;
|
||||
}
|
||||
if (obj.parentNode) obj.parentNode.removeChild(obj);
|
||||
}
|
||||
else {
|
||||
setTimeout(function() {swfRemoveObjectInIE(id)}, 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function swfUnloadHandler() {
|
||||
if (swfElementId) swfRemove(swfElementId);
|
||||
if (!window.addEventListener && window.detachEvent) window.detachEvent('onunload', swfUnloadHandler);
|
||||
}
|
||||
|
||||
function swfRegisterUnloadHandler() {
|
||||
if (window.addEventListener) {
|
||||
window.addEventListener('unload', swfUnloadHandler, false);
|
||||
}
|
||||
else if (window.attachEvent) {
|
||||
window.attachEvent('onunload', swfUnloadHandler);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
'install': swfInstallFallback,
|
||||
'isAvailable': swf10Available,
|
||||
'ready': swfReady,
|
||||
'speak': swfSpeak,
|
||||
'setVolume': swfSetVolume,
|
||||
'swfFallbackHandshake': swfFallbackHandshake
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
function meSpeakFallbackHandshake() {
|
||||
// handshake handler with swf external interface
|
||||
meSpeakFlashFallback.swfFallbackHandshake();
|
||||
}
|
||||
|
||||
|
168
tools/speaker/msie_flashFallback/index.html
Executable file
168
tools/speaker/msie_flashFallback/index.html
Executable file
@ -0,0 +1,168 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>meSpeak Flash Fallback</title>
|
||||
<link href="http://fonts.googleapis.com/css?family=Open+Sans&subset=latin" rel="stylesheet" type="text/css" />
|
||||
<link href="http://fonts.googleapis.com/css?family=Lato:300&subset=latin" rel="stylesheet" type="text/css" />
|
||||
|
||||
<script type="text/javascript" src="../mespeak.js"></script>
|
||||
<script type="text/javascript" src="flashFallback.js"></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
meSpeak.loadConfig("../mespeak_config.json");
|
||||
meSpeak.loadVoice("../voices/en/en.json");
|
||||
|
||||
var fallbackInstalled = false;
|
||||
var hasTypedArrays = Boolean(window.Int32Array && window.Float32Array
|
||||
&& (window.Int32Array.prototype.subarray || window.Int32Array.subarray)
|
||||
),
|
||||
canSpeak = (hasTypedArrays && window.meSpeak && meSpeak.canPlay());
|
||||
|
||||
function checkAndInstall() {
|
||||
// check typed array support (we need this) and wav-support (we fallback, if not present)
|
||||
if (hasTypedArrays && window.meSpeak && !meSpeak.canPlay()) {
|
||||
canSpeak = installFallback();
|
||||
}
|
||||
}
|
||||
|
||||
function installFallback() {
|
||||
// install with explicit default values
|
||||
// swf will be 2px x 2px (h x w), transparent
|
||||
return fallbackInstalled = meSpeakFlashFallback.install(
|
||||
'meSpeakFallback.swf', // url
|
||||
'meSpeakFallback', // id of swf object
|
||||
null // parent element to inject into
|
||||
// (default: append to body)
|
||||
);
|
||||
}
|
||||
|
||||
function fallbackSpeak() {
|
||||
meSpeakFlashFallback.speak('Hello world.', { 'speed': 180 } );
|
||||
}
|
||||
|
||||
// some functions for abstracting
|
||||
|
||||
function speakAny( txt, options, vol ) {
|
||||
if (fallbackInstalled) {
|
||||
meSpeakFlashFallback.speak( txt, options );
|
||||
}
|
||||
else if (canSpeak) {
|
||||
meSpeak.speak( txt, options, vol );
|
||||
}
|
||||
}
|
||||
|
||||
function setVolume( vol) {
|
||||
if (fallbackInstalled) {
|
||||
meSpeakFlashFallback.setVolume( vol );
|
||||
}
|
||||
else if (canSpeak) {
|
||||
meSpeak.setVolume( vol );
|
||||
}
|
||||
}
|
||||
|
||||
function fallbackReady() {
|
||||
// check if the swf has loaded and performed a handshake, indicating that it's available
|
||||
return meSpeakFlashFallback.ready();
|
||||
}
|
||||
|
||||
// install only, if needed
|
||||
// if (document.addEventListener) document.addEventListener('DOMContentLoaded', checkAndInstall, false);
|
||||
|
||||
// install unconditionally (for testing purpose)
|
||||
if (document.addEventListener) document.addEventListener('DOMContentLoaded', installFallback, false);
|
||||
</script>
|
||||
|
||||
<style type="text/css">
|
||||
html
|
||||
{
|
||||
margin: 0;
|
||||
padding: 2em 1.5em 4.5em 1.5em;
|
||||
background-color: #e2e3e4;
|
||||
}
|
||||
body
|
||||
{
|
||||
max-width: 900px;
|
||||
padding: 2px 40px 60px 40px;
|
||||
margin: 0 auto 0 auto;
|
||||
background-color: #fafafb;
|
||||
color: #111;
|
||||
font-family: 'Open Sans',sans-serif;
|
||||
font-size: 13px;
|
||||
line-height: 19px;
|
||||
}
|
||||
h1,h2,h3,h4
|
||||
{
|
||||
font-family: 'Lato',sans-serif;
|
||||
font-weight: 300;
|
||||
}
|
||||
h1 {
|
||||
font-size: 46px;
|
||||
line-height: 46px;
|
||||
color: #2681a7;
|
||||
margin-top: 0.5em;
|
||||
margin-bottom: 0.5em;
|
||||
padding: 0;
|
||||
}
|
||||
h2
|
||||
{
|
||||
font-size: 36px;
|
||||
color: #111;
|
||||
margin-top: 0;
|
||||
margin-bottom: 1.5em;
|
||||
clear: both;
|
||||
}
|
||||
h1 span.pict { font-size: 38px; color: #ccc; margin-left: 0.5em; letter-spacing: -2px; }
|
||||
p.codesample,xmp
|
||||
{
|
||||
margin: 1em 0;
|
||||
padding: 1em 0 1em 2em;
|
||||
white-space: pre;
|
||||
font-family: monospace;
|
||||
line-height: 18px;
|
||||
background-color: #f2f3f5;
|
||||
color: #111;
|
||||
}
|
||||
a { color: #006f9e; }
|
||||
a:hover,a:focus { color: #2681a7; }
|
||||
a:active { color: #cd360e; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>meSpeak.js <span class="pict">(( • ))</span></h1>
|
||||
<h2>Flash Fallback <small>(for IE 10)</small></h2>
|
||||
|
||||
<p>A fallback to play meSpeak's wav-files via Flash.<br />Since typed arrays are still a requirement, the only use-case is MS Internet Explorer 10.<br />
|
||||
Please note that, since we may not send binary data to a swf-object, the data has to be copied internally to a binary object, which may take some time and memory.<br />Because of this, the status of this solution is at best "experimental" and not for everyday use.</p>
|
||||
<p>The fallback "meSpeakFallback.swf" uses the <a href="http://code.google.com/p/as3wavsound/">AS3WavSound</a> library (v0.9) for wav-playback inside the SWF. (Ironically Flash doesn't support native wav-playback either.)<br />The swf-file is compiled for network use, meaning it will work only, if loaded over a network (web-server).</p>
|
||||
|
||||
<p>For testing, the fallback is installed for all browsers on this page:
|
||||
<input type="button" value="speak: Hello world." onclick="fallbackSpeak();" /></p>
|
||||
|
||||
<p>See page source for API & details.</p>
|
||||
<p>Download: <a href="http://www.masswerk.at/mespeak/msie_flashFallback.zip">msie_flashFallback.zip</a><br />
|
||||
<em>(Installation: Put the unzipped directory inside your mespeak-directory.)</em></p>
|
||||
|
||||
<hr style="margin-top: 2em; margin-bottom: 2em;">
|
||||
<p><strong>A Note on Usage:</strong><br />While the fallback-script strips down the coded needed to inject a swf-object to a minimum, best practice would be to include the script only, if applicable (i.e.: for MS IE 10 only), using MS-specific conditional comments:</p>
|
||||
|
||||
<xmp style="margin-left: 1em;">
|
||||
<!--[if IE 10]>
|
||||
<script type="text/javascript" src="flashFallback.js"></script>
|
||||
<![endif]-->
|
||||
</xmp>
|
||||
<p>If doing so, you would have to check for the existence of <code>meSpeakFlashFallback</code> first, before calling it:</p>
|
||||
|
||||
<xmp style="margin-left: 1em;">
|
||||
if (window.meSpeakFlashFallback) callbackInstalled = meSpeakFlashFallback.install();
|
||||
|
||||
// later, see page source for API details and usage
|
||||
if (callbackInstalled) meSpeakFlashFallback.speak('Hello world.');
|
||||
</xmp>
|
||||
<p>This way, page- and memory-loads are reduced to a minimum for all browsers.</p>
|
||||
<p>Please note that there is a delay between calling <code>install()</code> and the fallback actually being available, since the swf-file has to load and initialize first.</p>
|
||||
<p> </p>
|
||||
<p>Norbert Landsteiner, mass:werk – media environments, <a href="http://www.masswerk.at/" target="_top">www.masswerk.at</a><br />
|
||||
Vienna, July 2013</p>
|
||||
</body>
|
||||
</html>
|
BIN
tools/speaker/msie_flashFallback/meSpeakFallback.swf
Executable file
BIN
tools/speaker/msie_flashFallback/meSpeakFallback.swf
Executable file
Binary file not shown.
Reference in New Issue
Block a user