AudioBuffer
is a buffer of floating point samples
- * corresponding to a single channel of streaming audio. It is readonly, but you
- * can obtain a copy of the samples in the buffer by using the toArray
method.
- * In fact, when drawing a waveform, you should use the toArray
method
- * rather than iterating over the buffer itself because it is possible that the samples
- * in the buffer will be replaced with new ones between calls to the get
method,
- * which results in a waveform that appears to have discontinuities in it.
- *
- * @author Damien Di Fede
- *
- */
-
-public interface AudioBuffer
-{
- /**
- * Returns the length of the buffer.
- *
- * @return int: the number of samples in the buffer
- *
- * @related AudioBuffer
- */
- int size();
-
- /**
- * Gets the ith
sample in the buffer. This method
- * does not do bounds checking, so it may throw an exception.
- *
- * @param i
- * int: the index of the sample you want to get
- *
- * @return float: the ith
sample
- *
- * @example Basics/DrawWaveformAndLevel
- *
- * @related AudioBuffer
- */
- float get(int i);
-
- /**
- * Gets the current level of the buffer. It is calculated as the
- * root-mean-square of all the samples in the buffer.
- *
- * @return float: the RMS amplitude of the buffer
- *
- * @example Basics/DrawWaveformAndLevel
- *
- * @related AudioBuffer
- */
- float level();
-
- /**
- * Returns the samples in the buffer in a new float array.
- * Modifying the samples in the returned array will not change
- * the samples in the buffer.
- *
- * @return float[]: a new float array containing the buffer's samples
- *
- * @related AudioBuffer
- */
- float[] toArray();
-}
diff --git a/src/ddf/minim/AudioEffect.java b/src/ddf/minim/AudioEffect.java
deleted file mode 100644
index cdf62e9..0000000
--- a/src/ddf/minim/AudioEffect.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioEffect
is anything that can process one or two float
- * arrays. Typically it is going to be some kind of time-based process because
- * the float arrays passed to it will be consecutive chunks of audio data. The
- * effect is expected to modify these arrays in such a way that the values
- * remain in the range [-1, 1]. All of the effects included with Minim implement
- * this interface and all you need to do to write your own effects is to create
- * a class that implements this interface and then add an instance of it to an
- * anything that is Effectable
, such as an AudioOutput
.
- *
- * This interface is Deprecated and will likely be removed from a future version
- * of Minim. We now recommend implementing your effects by extending
- * You can obtain an AudioInput from Minim by using one of the getLineIn methods:
- *
- * An AudioOutput is a connection to the output of a computer's sound card.
- * Typically the computer speakers are connected to this.
- * You can use an AudioOutput to do real-time sound synthesis by patching
- * UGens to an output object. You can get an AudioOutput object from Minim
- * using one of five methods:
- *
- * In the event that an output doesn't exist with the requested parameters,
- * Minim will spit out an error and return null.
- * In general, you will want to use one of the first two methods listed above.
- *
- * In addition to directly patching UGens to the output, you can also schedule
- * "notes" to be played by the output at some time in the future. This can
- * be very powerful when writing algorithmic music and sound. See the playNote
- * method for more information.
- *
- * Versions of playNote that do not have an Instrument argument
- * will create an instance of a default Instrument that plays a
- * sine tone based on the parameters passed in.
- *
- * To facilitate writing algorithmic music, the start time and
- * duration of a note is expressed in beats and not in seconds.
- * By default, the tempo of an AudioOutput will be 60 BPM (beats per minute),
- * which means that beats are equivalent to seconds. If you want to think
- * in seconds when writing your note playing code, then simply don't change
- * the tempo of the output.
- *
- * Another thing to keep in mind is that the AudioOutput processes its
- * note queue in its own Thread, so if you are going to queue up a lot of
- * notes at once you will want to use the pauseNotes method before queuing
- * them. If you don't, the timing will be slightly off because the "now" that
- * the start time of each note is an offset from will change from note to note.
- * Once all of your notes have been added, you call resumeNotes to allow
- * the AudioOutput to process notes again.
- *
- * @related Instrument
- * @related setTempo ( )
- * @related setNoteOffset ( )
- * @related setDurationFactor ( )
- * @related pauseNotes ( )
- * @related resumeNotes ( )
- *
- * @example Basics/SequenceSound
- *
- * @shortdesc Schedule a "note" to played by the output.
- *
- * @param startTime
- * float: when the note should begin playing, in beats
- * @param duration
- * float: how long the note should be, in beats
- * @param instrument
- * the Instrument that will play the note
- */
- public void playNote(float startTime, float duration, Instrument instrument)
- {
- noteManager.addEvent( startTime, duration, instrument );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument.
- *
- * @see #playNote(float, float, Instrument)
- *
- * @param startTime
- * float: when the note should begin playing, in beats
- * @param duration
- * float: how long the note should be, in beats
- * @param hz
- * float: the frequency, in Hertz, of the note to be played
- */
- public void playNote(float startTime, float duration, float hz)
- {
- noteManager.addEvent( startTime, duration, new DefaultInstrument( hz, this ) );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument.
- *
- * @see #playNote(float, float, Instrument)
- *
- * @param startTime
- * float: when the note should begin playing, in beats
- * @param duration
- * float: how long the note should be, in beats
- * @param pitchName
- * String: the pitch name of the note to be played (e.g. "A4" or "Bb3")
- */
- public void playNote(float startTime, float duration, String pitchName)
- {
- noteManager.addEvent( startTime, duration, new DefaultInstrument( Frequency.ofPitch( pitchName ).asHz(), this ) );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument and has a duration of 1 beat.
- *
- * @see #playNote(float, float, Instrument)
- *
- * @param startTime
- * float: when the note should begin playing, in beats
- * @param hz
- * float: the frequency, in Hertz, of the note to be played
- */
- public void playNote(float startTime, float hz)
- {
- noteManager.addEvent( startTime, 1.0f, new DefaultInstrument( hz, this ) );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument and has a duration of 1 beat.
- *
- * @see #playNote(float, float, Instrument)
- *
- * @param startTime
- * float: when the note should begin playing, in beats
- * @param pitchName
- * String: the pitch name of the note to be played (e.g. "A4" or "Bb3")
- */
- public void playNote(float startTime, String pitchName)
- {
- noteManager.addEvent( startTime, 1.0f, new DefaultInstrument( Frequency.ofPitch( pitchName ).asHz(), this ) );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument, has a duration of 1 beat,
- * and is played immediately.
- *
- * @see #playNote(float, float, Instrument)
- *
- * @param hz
- * float: the frequency, in Hertz, of the note to be played
- */
- public void playNote(float hz)
- {
- noteManager.addEvent( 0.0f, 1.0f, new DefaultInstrument( hz, this ) );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument,
- * has a duration of 1 beat, and is played immediately.
- *
- * @see #playNote(float, float, Instrument)
- *
- * @param pitchName
- * String: the pitch name of the note to be played (e.g. "A4" or "Bb3")
- */
- public void playNote(String pitchName)
- {
- noteManager.addEvent( 0.0f, 1.0f, new DefaultInstrument( Frequency.ofPitch( pitchName ).asHz(), this ) );
- }
-
- /**
- * Schedule a "note" to played by the output that uses the default Instrument,
- * has a duration of 1 beat, is played immediately, and has a pitch of "A4".
- * This is good to use if you just want to generate some test tones.
- *
- * @see #playNote(float, float, Instrument)
- */
- public void playNote()
- {
- noteManager.addEvent( 0.0f, 1.0f, new DefaultInstrument( Frequency.ofPitch( "" ).asHz(), this ) );
- }
-
- /**
- * The tempo of an AudioOutput controls how it will interpret the start time and duration
- * arguments of playNote methods. By default the tempo of an AudioOutput is 60 BPM (beats per minute),
- * which means that one beat lasts one second. Setting the tempo to 120 BPM means that one beat lasts
- * half of a second. When the tempo is changed, it will only effect playNote calls made
- * after the change.
- *
- * @shortdesc Set the tempo of the AudioOutput to change the meaning of start times and durations for notes.
- *
- * @example Basics/SequenceSound
- *
- * @param tempo
- * float: the new tempo for the AudioOutput, in BPM (beats per minute)
- *
- * @related getTempo ( )
- */
- public void setTempo(float tempo)
- {
- noteManager.setTempo( tempo );
- }
-
- /**
- * Return the current tempo of the AudioOuput.
- * Tempo is expressed in BPM (beats per minute).
- *
- * @return float: the current tempo
- *
- * @example Basics/SequenceSound
- *
- * @related setTempo ( )
- */
- public float getTempo()
- {
- return noteManager.getTempo();
- }
-
- /**
- * When writing out musical scores in code, it is often nice to think about
- * music in sections, where all of the playNote calls have start times relative to
- * the beginning of the section. The setNoteOffset method facilitates this by
- * letting you set a time from which all start times passed to playNote calls
- * will add on to. So, if you set the note offset to 16, that means all playNote
- * start times will be relative to the 16th beat from "now".
- *
- * By default, note offset is 0.
- *
- * @shortdesc Sets the amount of time added to all start times passed to playNote calls.
- *
- * @param noteOffset
- * float: the amount of time added to all start times passed to playNote calls.
- *
- * @example Basics/SequenceSound
- *
- * @related getNoteOffset ( )
- */
- public void setNoteOffset(float noteOffset)
- {
- noteManager.setNoteOffset( noteOffset );
- }
-
- /**
- * Return the current value of the note offset for this output.
- *
- * @return float: the current note offset
- *
- * @example Basics/SequenceSound
- *
- * @related setNoteOffset ( )
- */
- public float getNoteOffset()
- {
- return noteManager.getNoteOffset();
- }
-
- /**
- * The duration factor of an AudioOutput defines how durations passed to playNote calls
- * are scaled before being queued. If your duration factor is 0.5 and you queue a note
- * with a duration of 2, the actual duration will become 1. This might be useful if
- * you want to queue a string of notes first with long durations and then very short durations.
- *
- * By default the duration factor is 1.
- *
- * @shortdesc Sets a factor that will scale durations passed to subsequent playNote calls.
- *
- * @param durationFactor
- * float: the duration factor
- *
- * @related getDurationFactor ( )
- */
- public void setDurationFactor(float durationFactor)
- {
- noteManager.setDurationFactor( durationFactor );
- }
-
- /**
- * Return the current value of the duration factor for this output.
- *
- * @return float: the current duration factor
- *
- * @related setDurationFactor ( )
- */
- public float getDurationFactor()
- {
- return noteManager.getDurationFactor();
- }
-
- /**
- * An AudioOutput processes its note queue in its own Thread,
- * so if you are going to queue up a lot of notes at once
- * you will want to use the
- * To create an AudioSample you must use either the loadSample or createSample
- * methods of the Minim class.
- *
- * AudioSample also provides most of the same methods as AudioPlayer for
- * controlling volume, panning, and so forth.
- *
- * We now recommend using
- * The {@link #volume()}, {@link #gain()}, {@link #pan()}, and
- * {@link #balance()} methods return objects of type
- * Not all controls are available on all objects. Before calling the methods
- * mentioned above, you should call
- * {@link #hasControl(javax.sound.sampled.Control.Type)} with the control type
- * you want to use. Alternatively, you can use the
- * See: http://wikipedia.org/wiki/Decibel
- *
- * @shortdesc Returns the current gain.
- *
- * @return float: the current gain or zero if a gain control is unavailable.
- * the gain is expressed in decibels.
- *
- * @related setGain ( )
- * @related shiftGain ( )
- * @related isShiftingGain ( )
- */
- public float getGain()
- {
- return getValue(GAIN);
- }
-
- /**
- * Sets the gain. If a gain control is not available,
- * this does nothing.
- *
- * @shortdesc Sets the gain.
- *
- * @param value
- * float: the new value for the gain, expressed in decibels.
- *
- * @related getGain ( )
- * @related shiftGain ( )
- * @related isShiftingGain ( )
- */
- public void setGain(float value)
- {
- setValue(GAIN, value);
- }
-
- /**
- * Transitions the gain from one value to another.
- *
- * @param from
- * float: the starting gain
- * @param to
- * float: the ending gain
- * @param millis
- * int: the length of the transition in milliseconds
- *
- * @related getGain ( )
- * @related setGain ( )
- * @related isShiftingGain ( )
- */
- public void shiftGain(float from, float to, int millis)
- {
- if ( hasControl(GAIN) )
- {
- setGain(from);
- gshifter = new ValueShifter(from, to, millis);
- gshift = true;
- }
- }
-
- /**
- * Returns true if the gain is currently shifting.
- * If no gain control is available this method returns false.
- *
- * @return true if shifting, false otherwise
- *
- * @related getGain ( )
- * @related setGain ( )
- * @related shiftGain ( )
- */
- public boolean isShiftingGain() {
- return gshift;
- }
-
- /**
- * Returns the current balance. This will be in the range [-1, 1].
- * Usually balance will only be available for stereo audio sources,
- * because it describes how much attenuation should be applied to
- * the left and right channels.
- * If a balance control is not available, this will do nothing.
- *
- * @shortdesc Returns the current balance.
- *
- * @return float: the current balance or zero if a balance control is unavailable
- *
- * @related setBalance ( )
- * @related shiftBalance ( )
- * @related isShiftingBalance ( )
- */
- public float getBalance()
- {
- return getValue(BALANCE);
- }
-
- /**
- * Sets the balance.
- * The value should be in the range [-1, 1].
- * If a balance control is not available, this will do nothing.
- *
- * @shortdesc Sets the balance.
- *
- * @param value
- * float: the new value for the balance
- *
- * @related getBalance ( )
- * @related shiftBalance ( )
- * @related isShiftingBalance ( )
- */
- public void setBalance(float value)
- {
- setValue(BALANCE, value);
- }
-
- /**
- * Transitions the balance from one value to another.
- *
- * @param from
- * float: the starting balance
- * @param to
- * float: the ending balance
- * @param millis
- * int: the length of the transition in milliseconds
- *
- * @related getBalance ( )
- * @related setBalance ( )
- * @related isShiftingBalance ( )
- */
- public void shiftBalance(float from, float to, int millis)
- {
- if ( hasControl(BALANCE) )
- {
- setBalance(from);
- bshifter = new ValueShifter(from, to, millis);
- bshift = true;
- }
- }
-
- /**
- * Returns true if the balance is currently shifting.
- * If no gain control is available this method returns false.
- *
- * @return true if shifting, false otherwise
- *
- * @related getBalance ( )
- * @related setBalance ( )
- * @related shiftBalance ( )
- */
- public boolean isShiftingBalance() {
- return bshift;
- }
-
- /**
- * Returns the current pan.
- * Usually pan will be only be available on mono audio sources because
- * it describes a mono signal's position in a stereo field.
- * This will be in the range [-1, 1], where -1 will place the sound
- * only in the left speaker and 1 will place the sound only in the right speaker.
- *
- * @shortdesc Returns the current pan.
- *
- * @return float: the current pan or zero if a pan control is unavailable
- *
- * @related setPan ( )
- * @related shiftPan ( )
- * @related isShiftingPan ( )
- */
- public float getPan()
- {
- return getValue(PAN);
- }
-
- /**
- * Sets the pan.
- * The provided value should be in the range [-1, 1].
- * If a pan control is not present, this does nothing.
- *
- * @shortdesc Sets the pan.
- *
- * @param value
- * float: the new value for the pan
- *
- * @related getPan ( )
- * @related shiftPan ( )
- * @related isShiftingPan ( )
- */
- public void setPan(float value)
- {
- setValue(PAN, value);
- }
-
- /**
- * Transitions the pan from one value to another.
- *
- * @param from
- * float: the starting pan
- * @param to
- * float: the ending pan
- * @param millis
- * int: the length of the transition in milliseconds
- *
- * @related getPan ( )
- * @related setPan ( )
- * @related isShiftingPan ( )
- */
- public void shiftPan(float from, float to, int millis)
- {
- if ( hasControl(PAN) )
- {
- setPan(from);
- pshifter = new ValueShifter(from, to, millis);
- pshift = true;
- }
- }
-
- /**
- * Returns true if the pan is currently shifting.
- * If no gain control is available this method returns false.
- *
- * @return true if shifting, false otherwise
- *
- * @related getPan ( )
- * @related setPan ( )
- * @related shiftPan ( )
- */
- public boolean isShiftingPan() {
- return pshift;
- }
-}
diff --git a/src/ddf/minim/Effectable.java b/src/ddf/minim/Effectable.java
deleted file mode 100644
index 9eeb496..0000000
--- a/src/ddf/minim/Effectable.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede
- *
- * The
- * Minim keeps references to all of the resources that are
- * returned from these various methods so that you don't have to worry about closing them.
- * Instead, when your application ends you can simply call the stop method of your Minim instance.
- * Processing users do not need to do this because Minim detects when a PApplet is passed
- * to the contructor and registers for a notification of application shutdown.
- *
- * Minim requires an Object that can handle two important
- * file system operations so that it doesn't have to worry about details of
- * the current environment. These two methods are:
- *
- * These are methods that are defined in Processing, which Minim was originally
- * designed to cleanly interface with. The
- * Minim requires an Object that can handle two important
- * file system operations so that it doesn't have to worry about details of
- * the current environment. These two methods are:
- *
- * These are methods that are defined in Processing, which Minim was originally
- * designed to cleanly interface with. The
- * If using Minim outside of Processing, you must call this to
- * release all of the audio resources that Minim has generated.
- * It will call close() on all of them for you.
- *
- */
- public void stop()
- {
- debug( "Stopping Minim..." );
-
- // close all sources and release them
- for( AudioSource s : sources )
- {
- // null the parent so the AudioSource doesn't try to call removeSource
- s.parent = null;
- s.close();
- }
- sources.clear();
-
- for( AudioStream s : streams )
- {
- s.close();
- }
-
- // stop the implementation
- mimp.stop();
- }
-
- void addSource( AudioSource s )
- {
- sources.add( s );
- s.parent = this;
- }
-
- void removeSource( AudioSource s )
- {
- sources.remove( s );
- }
-
- /**
- * When using the JavaSound implementation of Minim, this sets the JavaSound Mixer
- * that will be used for obtaining input sources such as AudioInputs.
- * THIS METHOD WILL BE REPLACED IN A FUTURE VERSION.
- *
- * @param mixer
- * The Mixer we should try to acquire inputs from.
- */
- @Deprecated
- public void setInputMixer(Mixer mixer)
- {
- if ( mimp instanceof JSMinim )
- {
- ( (JSMinim)mimp ).setInputMixer( mixer );
- }
- }
-
- /**
- * When using the JavaSound implementation of Minim, this sets the JavaSound Mixer
- * that will be used for obtain output destinations such as those required by AudioOuput,
- * AudioPlayer, AudioSample, and so forth.
- * THIS METHOD WILL BE REPLACED IN A FUTURE VERSION.
- *
- * @param mixer
- * The Mixer we should try to acquire outputs from.
- */
- @Deprecated
- public void setOutputMixer(Mixer mixer)
- {
- if ( mimp instanceof JSMinim )
- {
- ( (JSMinim)mimp ).setOutputMixer( mixer );
- }
- }
-
- /**
- * Creates an AudioSample using the provided sample data and AudioFormat.
- * When a buffer size is not provided, it defaults to 1024. The buffer size
- * of a sample controls the size of the left, right, and mix AudioBuffer
- * fields of the returned AudioSample.
- *
- * @shortdesc Creates an AudioSample using the provided sample data and AudioFormat.
- *
- * @param sampleData
- * float[]: the single channel of sample data
- * @param format
- * the AudioFormat describing the sample data
- *
- * @return an AudioSample that can be triggered to make sound
- *
- * @example Advanced/CreateAudioSample
- *
- * @related AudioSample
- */
- public AudioSample createSample(float[] sampleData, AudioFormat format)
- {
- return createSample( sampleData, format, 1024 );
- }
-
- /**
- * Creates an AudioSample using the provided sample data and
- * AudioFormat, with the desired output buffer size.
- *
- * @param sampleData
- * float[]: the single channel of sample data
- * @param format
- * the AudioFormat describing the sample data
- * @param bufferSize
- * int: the output buffer size to use,
- * which controls the size of the left, right, and mix AudioBuffer
- * fields of the returned AudioSample.
- *
- * @return an AudioSample that can be triggered to make sound
- */
- public AudioSample createSample( float[] sampleData, AudioFormat format, int bufferSize )
- {
- AudioSample sample = mimp.getAudioSample( sampleData, format, bufferSize );
- addSource( sample );
- return sample;
- }
-
- /**
- * Creates an AudioSample using the provided left and right channel
- * sample data with an output buffer size of 1024.
- *
- * @param leftSampleData
- * float[]: the left channel of the sample data
- * @param rightSampleData
- * float[]: the right channel of the sample data
- * @param format
- * the AudioFormat describing the sample data
- *
- * @return an AudioSample that can be triggered to make sound
- */
- public AudioSample createSample( float[] leftSampleData, float[] rightSampleData, AudioFormat format )
- {
- return createSample( leftSampleData, rightSampleData, format, 1024 );
- }
-
- /**
- * Creates an AudioSample using the provided left and right channel
- * sample data.
- *
- * @param leftSampleData
- * float[]: the left channel of the sample data
- * @param rightSampleData
- * float[]: the right channel of the sample data
- * @param format
- * the AudioFormat describing the sample data
- * @param bufferSize
- * int: the output buffer size to use,
- * which controls the size of the left, right, and mix AudioBuffer
- * fields of the returned AudioSample.
- *
- * @return an AudioSample that can be triggered to make sound
- */
- public AudioSample createSample(float[] leftSampleData, float[] rightSampleData, AudioFormat format, int bufferSize)
- {
- AudioSample sample = mimp.getAudioSample( leftSampleData, rightSampleData, format, bufferSize );
- addSource( sample );
- return sample;
- }
-
- /**
- * Loads the requested file into an AudioSample.
- * By default, the buffer size used is 1024.
- *
- * @shortdesc Loads the requested file into an AudioSample.
- *
- * @param filename
- * the file or URL that you want to load
- *
- * @return an AudioSample that can be triggered to make sound
- *
- * @example Basics/TriggerASample
- *
- * @see #loadSample(String, int)
- * @see AudioSample
- * @related AudioSample
- */
- public AudioSample loadSample(String filename)
- {
- return loadSample( filename, 1024 );
- }
-
- /**
- * Loads the requested file into an AudioSample.
- *
- * @param filename
- * the file or URL that you want to load
- * @param bufferSize
- * int: The sample buffer size you want.
- * This controls the size of the left, right, and mix
- * AudioBuffer fields of the returned AudioSample.
- *
- * @return an AudioSample that can be triggered to make sound
- */
- public AudioSample loadSample(String filename, int bufferSize)
- {
- AudioSample sample = mimp.getAudioSample( filename, bufferSize );
- addSource( sample );
- return sample;
- }
-
- /** @invisible
- * Loads the requested file into an {@link AudioSnippet}
- *
- * @param filename
- * the file or URL you want to load
- * @return an
- * Using setOutputMixer you can also create AudioOutputs that
- * send sound to specific output channels of a soundcard.
- *
- * @example Basics/SynthesizeSound
- *
- * @shortdesc get an AudioOutput that can be used to generate audio
- *
- * @return an AudioOutput that can be used to generate audio
- * @see #getLineOut(int, int, float, int)
- * @related AudioOutput
- * @related UGen
- */
- public AudioOutput getLineOut()
- {
- return getLineOut( STEREO );
- }
-
- /**
- * Gets an {@link AudioOutput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @return an
- * This class is also useful for performing offline rendering of audio.
- *
- * @example Advanced/OfflineRendering
- *
- * @author Damien Di Fede
- *
- */
-
-public class SignalSplitter implements Recordable, AudioListener
-{
- private Vector
- * UGens might also have UGenInputs. Oscil, for example, has a UGenInput called
- *
- * A UGenInput will have an InputType of either AUDIO or CONTROL.
- * An AUDIO input will always have the same number of channels
- * as the owning UGen, in other words the length of the array
- * returned by getLastValues will have a length equal to
- * channel count. A CONTROL input will always have one channel
- * and its value can be conveniently queried by calling getLastValue().
- *
- * @example Basics/PatchingAnInput
- * @author Anderson Mills
- *
- */
- public final class UGenInput
- {
- private UGen m_incoming;
- private InputType m_inputType;
- private float[] m_lastValues;
-
- /**
- * Create a UGenInput with a particular type.
- *
- * @param type the InputType of this UGenInput
- */
- public UGenInput(InputType type)
- {
- m_inputType = type;
- m_allInputs.add( this );
- // assume one channel. good for controls and mono audio.
- m_lastValues = new float[1];
- }
-
- /**
- * Create a UGenInput of the specified type with an initial value.
- *
- * @param type the InputType of this UGenInput
- * @param value the initial float value used for all last values
- */
- public UGenInput( InputType type, float value )
- {
- m_inputType = type;
- m_allInputs.add( this );
- m_lastValues = new float[1];
- m_lastValues[0] = value;
- }
-
- /**
- * Set the number of channels this input should generate.
- * This will be called by the owning UGen if this input
- * is an AUDIO input.
- *
- * @param numberOfChannels
- * how many channels this input should generate
- */
- public void setChannelCount(int numberOfChannels)
- {
- if ( m_lastValues.length != numberOfChannels )
- {
- // make sure we keep the value we already had when
- // our channel count changes.
- float val = m_lastValues.length > 0 ? m_lastValues[0] : 0;
- m_lastValues = new float[numberOfChannels];
- Arrays.fill(m_lastValues, val);
- }
-
- // make sure our incoming UGen knows about this
- if ( m_incoming != null )
- {
- m_incoming.setChannelCount( numberOfChannels );
- }
- }
-
- /**
- * Returns how many channels this UGenInput generates.
- *
- * @return int: how many channels this input generates
- */
- public int channelCount()
- {
- return m_lastValues.length;
- }
-
- /**
- * Returns the InputType of this UGenInput.
- *
- * @return InputType: either AUDIO or CONTROL
- */
- public InputType getInputType()
- {
- return m_inputType;
- }
-
- /**
- * The outer UGen is the UGen that owns this input.
- * For instance, calling this on the frequency UGenInput
- * member of an Oscil will return the Oscil.
- *
- * @return the UGen that owns this UGenInput
- */
- public UGen getOuterUGen()
- {
- return UGen.this;
- }
-
- /**
- * The incoming UGen is the UGen that is patched to
- * this UGenInput. When this input is ticked, it
- * will tick the incoming UGen and store the result
- * in its last values.
- *
- * @return the UGen that is patched to this UGenInput
- */
- public UGen getIncomingUGen()
- {
- return m_incoming;
- }
-
- /**
- * This method is called when a UGen is patched to this input.
- * Typically you will not call this method directly,
- * use UGen's patch method instead.
- *
- * @param in
- * the UGen being patched to this input
- */
- public void setIncomingUGen(UGen in)
- {
- m_incoming = in;
- if ( m_incoming != null )
- {
- m_incoming.setChannelCount( m_lastValues.length );
- }
- }
-
- /**
- * Returns true if a UGen is patched to this UGenInput.
- *
- * @return true if a UGen is patched to this UGenInput
- */
- public boolean isPatched()
- {
- return ( m_incoming != null );
- }
-
- /**
- * Access the last values generated by this input.
- *
- * @return float[]: the last values generated by this input
- */
- public float[] getLastValues()
- {
- return m_lastValues;
- }
-
- /**
- * Returns the first value in the array of last values. This is meant to
- * make code that gets values from CONTROL inputs easier to read.
- *
- * @shortdesc Returns the first value in the array of last values.
- *
- * @return float: the last value generated by this input
- */
- // TODO (ddf) change these two to getValue and setValue?
- public float getLastValue()
- {
- return m_lastValues[0];
- }
-
- /**
- *
- * Sets all values in the last values array to the provided value. If
- * you want to set last values in the different channels of this input
- * to different values, you should use getLastValues to do so. For
- * example:
- *
- * A Fourier Transform is an algorithm that transforms a signal in the time
- * domain, such as a sample buffer, into a signal in the frequency domain, often
- * called the spectrum. The spectrum does not represent individual frequencies,
- * but actually represents frequency bands centered on particular frequencies.
- * The center frequency of each band is usually expressed as a fraction of the
- * sampling rate of the time domain signal and is equal to the index of the
- * frequency band divided by the total number of bands. The total number of
- * frequency bands is usually equal to the length of the time domain signal, but
- * access is only provided to frequency bands with indices less than half the
- * length, because they correspond to frequencies below the Nyquist frequency.
- * In other words, given a signal of length
- * As an example, if you construct an FFT with a
- *
- * Usage
- *
- * A typical usage of the FFT is to analyze a signal so that the
- * frequency spectrum may be represented in some way, typically with vertical
- * lines. You could do this in Processing with the following code, where
- *
- * Windowing is the process of shaping the audio samples before transforming them
- * to the frequency domain. The Fourier Transform assumes the sample buffer is is a
- * repetitive signal, if a sample buffer is not truly periodic within the measured
- * interval sharp discontinuities may arise that can introduce spectral leakage.
- * Spectral leakage is the speading of signal energy across multiple FFT bins. This
- * "spreading" can drown out narrow band signals and hinder detection.
- *
- * A windowing function
- * attempts to reduce spectral leakage by attenuating the measured sample buffer
- * at its end points to eliminate discontinuities. If you call the
- * Averages
- *
- * FFT also has functions that allow you to request the creation of
- * an average spectrum. An average spectrum is simply a spectrum with fewer
- * bands than the full spectrum where each average band is the average of the
- * amplitudes of some number of contiguous frequency bands in the full spectrum.
- *
- *
- *
- *
- * If you don't want any averages calculated, then you can call
- *
- * Inverse Transform
- *
- * FFT also supports taking the inverse transform of a spectrum.
- * This means that a frequency spectrum will be transformed into a time domain
- * signal and placed in a provided sample buffer. The length of the time domain
- * signal will be
- * As an example, if you construct a FourierTransform with a
- *
- * Usage
- *
- * A typical usage of a FourierTransform is to analyze a signal so that the
- * frequency spectrum may be represented in some way, typically with vertical
- * lines. You could do this in Processing with the following code, where
- *
- * Windowing is the process of shaping the audio samples before transforming them
- * to the frequency domain. The Fourier Transform assumes the sample buffer is is a
- * repetitive signal, if a sample buffer is not truly periodic within the measured
- * interval sharp discontinuities may arise that can introduce spectral leakage.
- * Spectral leakage is the speading of signal energy across multiple FFT bins. This
- * "spreading" can drown out narrow band signals and hinder detection.
- *
- * A windowing function
- * attempts to reduce spectral leakage by attenuating the measured sample buffer
- * at its end points to eliminate discontinuities. If you call the
- * Averages
- *
- * FourierTransform also has functions that allow you to request the creation of
- * an average spectrum. An average spectrum is simply a spectrum with fewer
- * bands than the full spectrum where each average band is the average of the
- * amplitudes of some number of contiguous frequency bands in the full spectrum.
- *
- *
- *
- *
- * If you don't want any averages calculated, then you can call
- *
- * Inverse Transform
- *
- * FourierTransform also supports taking the inverse transform of a spectrum.
- * This means that a frequency spectrum will be transformed into a time domain
- * signal and placed in a provided sample buffer. The length of the time domain
- * signal will be
- * Windowing
- *
- * Windowing is the process of shaping the audio samples before transforming them
- * to the frequency domain. The Fourier Transform assumes the sample buffer is is a
- * repetitive signal, if a sample buffer is not truly periodic within the measured
- * interval sharp discontinuities may arise that can introduce spectral leakage.
- * Spectral leakage is the speading of signal energy across multiple FFT bins. This
- * "spreading" can drown out narrow band signals and hinder detection.
- *
- * A windowing function
- * attempts to reduce spectral leakage by attenuating the measured sample buffer
- * at its end points to eliminate discontinuities. If you call the
- *
- Fast Fourier Transform
-
- A Fourier Transform is an algorithm that transforms a signal in the time
- domain, such as a sample buffer, into a signal in the frequency domain, often
- called the spectrum. The spectrum does not represent individual frequencies,
- but actually represents frequency bands centered on particular frequencies.
- The center frequency of each band is usually expressed as a fraction of the
- sampling rate of the time domain signal and is equal to the index of the
- frequency band divided by the total number of bands. The total number of
- frequency bands is usually equal to the length of the time domain signal, but
- access is only provided to frequency bands with indices less than half the
- length, because they correspond to frequencies below the Nyquist frequency.
- In other words, given a signal of length
- Beat (or Onset) Detection
-
- The BeatDetect class allows you to analyze an audio stream for beats (rhythmic onsets).
- Beat
- Detection Algorithms by Frederic Patin describes beats in the following
- way:
-
-
- BeatDetect has two modes: sound energy tracking and frequency energy
- tracking. In sound energy mode, the level of the buffer, as returned by
-
- In sound energy mode you use
- *
- * It is supposed to be a replacement of the byte[] stream architecture of
- * JavaSound, especially for chains of AudioInputStreams. Ideally, all involved
- * AudioInputStreams handle reading into a FloatSampleBuffer.
- *
- * Specifications:
- *
- * When a cascade of AudioInputStreams use FloatSampleBuffer for processing,
- * they may implement the interface FloatSampleInput. This signals that this
- * stream may provide float buffers for reading. The data is not
- * converted back to bytes, but stays in a single buffer that is passed from
- * stream to stream. For that serves the read(FloatSampleBuffer) method, which
- * is then used as replacement for the byte-based read functions of
- * AudioInputStream.
- * To summarize, here are some advantages when using a FloatSampleBuffer for
- * streaming:
- *
- * Simple benchmarks showed that the processing requirements for the conversion
- * to and from float is about the same as when converting it to shorts or ints
- * without dithering, and significantly higher with dithering. An own
- * implementation of a random number generator may improve this.
- *
- * "Lazy" deletion of samples and channels:
- * Use the
- * Note that the lazy mechanism implies that the arrays returned from
- *
- * As an example, consider a chain of converters that all act on the same
- * instance of FloatSampleBuffer. Some converters may decrease the sample count
- * (e.g. sample rate converter) and delete channels (e.g. PCM2PCM converter).
- * So, processing of one block will decrease both. For the next block, all
- * starts from the beginning. With the lazy mechanism, all float arrays are only
- * created once for processing all blocks.
- * Dithering:
- * The format and the number of samples of this float buffer are not
- * changed, so if the byte array has more samples than fit into this float
- * buffer, it is not expanded.
- *
- * @param buffer the byte buffer to write to this float buffer
- * @param srcByteOffset the offset in bytes in buffer where to start reading
- * @param format the audio format of the bytes in buffer
- * @param dstSampleOffset the offset in samples where to start writing the
- * converted float data into this float buffer
- * @param aSampleCount the number of samples to write
- * @return the number of samples actually written
- */
- public int writeByteBuffer(byte[] buffer, int srcByteOffset,
- AudioFormat format, int dstSampleOffset, int aSampleCount) {
- if (dstSampleOffset + aSampleCount > getSampleCount()) {
- aSampleCount = getSampleCount() - dstSampleOffset;
- }
- int lChannels = format.getChannels();
- if (lChannels > getChannelCount()) {
- lChannels = getChannelCount();
- }
- if (lChannels > format.getChannels()) {
- lChannels = format.getChannels();
- }
- for (int channel = 0; channel < lChannels; channel++) {
- float[] data = getChannel(channel);
-
- FloatSampleTools.byte2floatGeneric(buffer, srcByteOffset,
- format.getFrameSize(), data, dstSampleOffset, aSampleCount,
- format);
- srcByteOffset += format.getFrameSize() / format.getChannels();
- }
- return aSampleCount;
- }
-
- /**
- * Deletes all channels, frees memory... This also removes hidden channels
- * by lazy remove.
- */
- public void reset() {
- init(0, 0, 1, false);
- }
-
- /**
- * Destroys any existing data and creates new channels. It also destroys
- * lazy removed channels and samples. Channels will not be silenced, though.
- */
- public void reset(int newChannels, int newSampleCount, float newSampleRate) {
- init(newChannels, newSampleCount, newSampleRate, false);
- }
-
- // //////////////////////// conversion back to bytes ///////////////////
-
- /**
- * @return the required size of the buffer for calling
- * convertToByteArray(..) is called
- */
- public int getByteArrayBufferSize(AudioFormat format) {
- return getByteArrayBufferSize(format, getSampleCount());
- }
-
- /**
- * @param lenInSamples how many samples to be considered
- * @return the required size of the buffer for the given number of samples
- * for calling convertToByteArray(..)
- */
- public int getByteArrayBufferSize(AudioFormat format, int lenInSamples) {
- // make sure this format is supported
- checkFormatSupported(format);
- return format.getFrameSize() * lenInSamples;
- }
-
- /**
- * Writes this sample buffer's audio data to
- * If
- * If
- * If
- * This method is not error tolerant, in particular, runtime exceptions
- * will be thrown if the channel counts do not match, or if the
- * offsets and count exceed the buffer's capacity.
- *
- * @param source the source buffer from where to take samples and mix to this one
- * @param sourceOffset offset in source where to start reading samples
- * @param thisOffset offset in this buffer from where to start mixing samples
- * @param count number of samples to mix
- */
- public void mix(FloatSampleBuffer source, int sourceOffset, int thisOffset, int count) {
- int localChannelCount = getChannelCount();
- for (int ch = 0; ch < localChannelCount; ch++) {
- float[] thisChannel = getChannel(ch);
- float[] otherChannel = source.getChannel(ch);
- for (int i = 0; i < count; i++) {
- thisChannel[i+thisOffset] += otherChannel[i+sourceOffset];
- }
- }
- }
-
- /**
- * Copies the contents of this buffer to the destination buffer at the
- * destOffset. At most,
- *
- * Note: this value is only used, when dithering is actually performed.
- */
- public void setDitherBits(float ditherBits) {
- if (ditherBits <= 0) {
- throw new IllegalArgumentException(
- "DitherBits must be greater than 0");
- }
- this.ditherBits = ditherBits;
- }
-
- public float getDitherBits() {
- return ditherBits;
- }
-
- /**
- * Sets the mode for dithering. This can be one of:
- *
- * Currently, the following bit sizes are supported:
- *
- * Only PCM formats are accepted. The method will convert all byte values
- * from
- * Only PCM formats are accepted. The method will convert all byte values
- * from
- * Only PCM formats are accepted. The method will convert all byte values
- * from
- * Only PCM formats are accepted. The method will convert all bytes from
- *
- * The
- * For mono data, set
- * E.g.:
- * Only PCM formats are accepted. The method will convert all samples from
- *
- * Dithering should be used when the output resolution is significantly
- * lower than the original resolution. This includes if the original data
- * was 16-bit and it is now converted to 8-bit, or if the data was generated
- * in the float domain. No dithering need to be used if the original sample
- * data was in e.g. 8-bit and the resulting output data has a higher
- * resolution. If dithering is used, a sensitive value is
- * DEFAULT_DITHER_BITS.
- *
- * @param input a List of float arrays with the input audio data
- * @param inOffset index in the input arrays where to start the conversion
- * @param output the byte array that receives the converted audio data
- * @param outByteOffset the start offset in
- * Only PCM formats are accepted. The method will convert all samples from
- *
- * Dithering should be used when the output resolution is significantly
- * lower than the original resolution. This includes if the original data
- * was 16-bit and it is now converted to 8-bit, or if the data was generated
- * in the float domain. No dithering need to be used if the original sample
- * data was in e.g. 8-bit and the resulting output data has a higher
- * resolution. If dithering is used, a sensitive value is
- * DEFAULT_DITHER_BITS.
- *
- * @param input the audio data in normalized samples
- * @param inOffset index in input where to start the conversion
- * @param output the byte array that receives the converted audio data
- * @param outByteOffset the start offset in
- * Only PCM formats are accepted. The method will convert all samples from
- *
- * The
- * For mono data, set
- * E.g.:
- Here are some of the features of Minim:
-
- If you are using Processing 2.0 you've already got Minim! So feel free to peruse
- this documentation and start playing with examples. If you prefer Javadocs,
- we got those too!
- Bear in mind, however, that the version of Minim included with Processing 2.0
- is not the most recent version. We recommend using Processing 3.0 so that you can
- easily install the latest version of the library to take advantage of all the
- latest bug fixes and additions.
-
- In Processing 3.0 you can install Minim from the Contribution Manager.
- Open the manager using the Sketch menu. Choose Import Library and then Add Library.
- In the Libraries tab of the Contribution Manager, type Minim in the Filter box,
- select the library from the list, and click Install.
-
- If you are not using Processing,
- you can still use Minim! We provide a constructor for the
- The download includes Processing examples and source code.
- Minim is licensed under the
- GNU Lesser General Public License (LGPL),
- a copy of which is included with the distribution.
-
- Download:
- Minim 2.2.2 Zip or visit the
- Github 2.2.2 release page
-
- If you have any questions about using the library you can start by
- checking the Processing forum
- or send me a private message there.
- If you find bugs, please report them on the
- Github issues page.
-
- If you'd like to contribute to the development of Minim, simply
- fork the project on Github
- and send pull requests when you've got code you'd like us to consider
- for inclusion in the library. Enjoy!
-
- * A balance of 0 will make no change to the incoming audio. Negative balance
- * will decrease the volume of the right channel and positive balance will
- * decrease the volume of the left channel. This is meant to mirror how
- * a balance knob on a typical stereo operates.
- *
- * @author Anderson Mills
- *
- * @example Synthesis/balanceExample
- *
- */
-public class Balance extends UGen
-{
-
- /**
- * The audio input is where audio comes in to be balanced. You won't need to
- * patch to this directly, patching to the balance UGen itself will achieve
- * the same thing.
- *
- * @related Balance
- */
- public UGenInput audio;
-
- /**
- * The balance control should be driven by UGens that generate values in the
- * range [-1, 1].
- *
- * @related setBalance ( )
- * @related Balance
- */
- public UGenInput balance;
-
- /**
- * Construct a Balance with a value of 0 (no change).
- *
- */
- public Balance()
- {
- this( 0.0f );
- }
-
- /**
- * Construct a balance with a particular value.
- *
- * @param balanceVal
- * float: a value in the range [-1, 1]
- */
- public Balance( float balanceVal )
- {
- super();
- // jam3: These can't be instantiated until the uGenInputs ArrayList
- // in the super UGen has been constructed
- //audio = new UGenInput(InputType.AUDIO);
- audio = new UGenInput(InputType.AUDIO);
- balance = new UGenInput(InputType.CONTROL);
- balance.setLastValue(balanceVal);
- }
-
- /**
- * Set the balance setting to balanceVal.
- *
- * @param balanceVal
- * float: the new value for this Balance
- *
- * @related balance
- * @related Balance
- */
- public void setBalance( float balanceVal )
- {
- balance.setLastValue(balanceVal);
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- for(int i = 0; i < channels.length; i++)
- {
- float tmp = audio.getLastValues()[i];
- float bal = balance.getLastValue();
- channels[i] = tmp*(float)Math.min( 1.0f, Math.max( 0.0f, 1.0f + Math.pow( -1.0f, i )* bal) );
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/BitCrush.java b/src/ddf/minim/ugens/BitCrush.java
deleted file mode 100644
index 06a7887..0000000
--- a/src/ddf/minim/ugens/BitCrush.java
+++ /dev/null
@@ -1,140 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.Minim;
-import ddf.minim.UGen;
-
-/**
- * BitCrush is an effect that reduces the fidelity of the incoming signal.
- * This results in a sound that is "crunchier" sounding, or "distorted".
- *
- * Audio is represented digitally (ultimately) as an integral value. If you
- * have 16-bit audio, then you can represent a sample value with any number
- * in the range -32,768 to +32,767. If you bit-crush this audio to be 8-bit,
- * then you effectively reduce it representation to -128 to +127, even though
- * you will still represent it with a 16-bit number. This reduction in the
- * fidelity of the representation essentially squares off the waveform,
- * which makes it sound "crunchy". Try bit crushing down to 1-bit and see
- * what you get!
- *
- * @author Anderson Mills
- *
- * @example Synthesis/bitCrushExample
- *
- * @related UGen
- */
-public class BitCrush extends UGen
-{
- // jam3: define the inputs to gain
-
- /**
- * The audio input is where audio that gets bit-crushed should be patched.
- * However, you don't need to patch directly to this input, patching to
- * the UGen itself will accomplish the same thing.
- *
- * @related BitCrush
- */
- public UGenInput audio;
-
- /**
- * Control the bit resolution with another UGen by patching to bitRes. Values that
- * make sense for this start at 1 and go up to whatever the actual resolution of
- * the incoming audio is (typically 16).
- *
- * @example Synthesis/bitCrushExample
- *
- * @related setBitRes ( )
- * @related BitCrush
- */
- public UGenInput bitRes;
-
- /**
- * Control the bit rate with another UGen by patching to bitRate.
- * Values that make sense for this start at 1 and go up to whatever the
- * sample rate of your AudioOutput are (typically 44100)
- *
- * @example Synthesis/bitCrushExample
- *
- * @related BitCrush
- */
- public UGenInput bitRate;
-
- float[] sampledFrame;
- int sampleCounter;
-
- /**
- * Construct a BitCrush with a bit resolution of 1 and a bit rate of 44100.
- *
- */
- public BitCrush()
- {
- this( 1.0f, 44100 );
- }
-
- /**
- * Construct a BitCrush with the specified bit resolution and bit rate.
- *
- * @param localBitRes
- * float: typically you'll want this in the range [1,16]
- * @param localBitRate
- * float: this must be in the range [1,outputSampleRate]
- */
- public BitCrush( float localBitRes, float localBitRate )
- {
- super();
- // jam3: These can't be instantiated until the uGenInputs ArrayList
- // in the super UGen has been constructed
- //audio = new UGenInput(InputType.AUDIO);
- audio = new UGenInput(InputType.AUDIO);
- bitRes = new UGenInput(InputType.CONTROL);
- bitRes.setLastValue(localBitRes);
- bitRate = new UGenInput(InputType.CONTROL);
- bitRate.setLastValue( localBitRate );
-
- sampledFrame = new float[ channelCount() ];
- }
-
- protected void channelCountChanged()
- {
- sampledFrame = new float[ channelCount() ];
- sampleCounter = 0;
-
- //System.out.println( "BitCrush now has " + getAudioChannelCount() + " channels." );
- }
-
- /**
- * Set the bit resolution directly.
- *
- * @param localBitRes
- * float: typically you'll want this in the range [1,16]
- *
- * @related bitRes
- * @related BitCrush
- */
- public void setBitRes(float localBitRes)
- {
- bitRes.setLastValue(localBitRes);
- }
-
- @Override
- protected void uGenerate(float[] out)
- {
- if ( sampleCounter <= 0 )
- {
- if ( audio.getLastValues().length != channelCount() )
- {
- Minim.error( "BitCrush audio has " + audio.getLastValues().length + " channels and sampledFrame has " + channelCount() );
- }
- System.arraycopy( audio.getLastValues(), 0, sampledFrame, 0, channelCount() );
- sampleCounter = (int)(sampleRate() / Math.max(bitRate.getLastValue(),1));
- }
-
- final int res = 1 << (int)bitRes.getLastValue();
- for( int i = 0; i < out.length; ++i )
- {
- int samp = (int)(res * sampledFrame[i]);
- out[i] = (float)samp/res;
- }
-
- --sampleCounter;
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Bypass.java b/src/ddf/minim/ugens/Bypass.java
deleted file mode 100644
index 605acc3..0000000
--- a/src/ddf/minim/ugens/Bypass.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- *
- * The Bypass UGen allows you to wrap another UGen and then insert that UGen into your
- * signal chain using Bypass in its place. You can then dynamically route the
- * audio through the wrapped UGen or simply allow incoming audio to pass through unaffected.
- * Using a Bypass UGen allows you to avoid concurrency issues caused by patching and unpatching
- * during runtime from a Thread other than the audio one.
- *
- * Your usage of Bypass might look something like this:
- *
- * If you needed to patch something else to one of the inputs of the GranulateSteady,
- * you'd use the
- * Now, calling the
- * Inputs for the Flanger are:
- *
- * A more thorough description can be found on wikipedia:
- * http://en.wikipedia.org/wiki/Flanging
- *
- *
- * @author Damien Di Fede
- *
- * @example Synthesis/flangerExample
- *
- * @related UGen
- */
-
-public class Flanger extends UGen
-{
- /**
- * Where the input goes.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput audio;
-
- /**
- * How much does the flanger delay the incoming signal. Used as the low
- * value of the modulated delay amount.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput delay;
-
- /**
- * The frequency of the LFO applied to the delay.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput rate;
-
- /**
- * How many milliseconds the LFO increases the delay by at the maximum.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput depth;
-
- /**
- * How much of the flanged signal is fed back into the effect.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput feedback;
-
- /**
- * How much of the dry signal is added to the output.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput dry;
-
- /**
- * How much of the flanged signal is added to the output.
- *
- * @example Synthesis/flangerExample
- *
- * @related Flanger
- * @related UGen.UGenInput
- */
- public UGenInput wet;
-
- private float[] delayBuffer;
- private int outputFrame;
- private int bufferFrameLength;
-
- // ////////////
- // LFO
- // ////////////
-
- // where we will sample our waveform, moves between [0,1]
- private float step;
- // the step size we will use to advance our step
- private float stepSize;
- // what was our frequency from the last time we updated our step size
- // stashed so that we don't do more math than necessary
- private float prevFreq;
- // 1 / sampleRate, which is used to calculate stepSize
- private float oneOverSampleRate;
-
- /**
- * Construct a Flanger by specifying all initial values.
- *
- * @param delayLength
- * float: the minimum delay applied to incoming samples (in milliseconds)
- * @param lfoRate
- * float: the frequency of the the LFO
- * @param delayDepth
- * float: the maximum amount added to the delay by the LFO (in milliseconds)
- * @param feedbackAmplitude
- * float: the amount of the flanged signal fed back into the effect
- * @param dryAmplitude
- * float: the amount of incoming signal added to the output
- * @param wetAmplitude
- * float: the amount of the flanged signal added to the output
- */
- public Flanger(float delayLength, float lfoRate, float delayDepth,
- float feedbackAmplitude, float dryAmplitude, float wetAmplitude)
- {
- audio = addAudio();
- delay = addControl( delayLength );
- rate = addControl( lfoRate );
- depth = addControl( delayDepth );
- feedback = addControl( feedbackAmplitude );
- dry = addControl( dryAmplitude );
- wet = addControl( wetAmplitude );
- }
-
- private void resetBuffer()
- {
- int sampleCount = (int)( 100 * sampleRate() / 1000 );
- delayBuffer = new float[sampleCount * audio.channelCount()];
- outputFrame = 0;
- bufferFrameLength = sampleCount;
- }
-
- // clamps rate for us
- private float getRate()
- {
- float r = rate.getLastValue();
- return r > 0.001f ? r : 0.001f;
- }
-
- protected void sampleRateChanged()
- {
- resetBuffer();
-
- oneOverSampleRate = 1 / sampleRate();
- // don't call updateStepSize because it checks for frequency change
- stepSize = getRate() * oneOverSampleRate;
- prevFreq = getRate();
- // start at the lowest value
- step = 0.25f;
- }
-
- // updates our step size based on the current frequency
- private void updateStepSize()
- {
- float currFreq = getRate();
- if ( prevFreq != currFreq )
- {
- stepSize = currFreq * oneOverSampleRate;
- prevFreq = currFreq;
- }
- }
-
- protected void channelCountChanged()
- {
- resetBuffer();
- }
-
- protected void uGenerate(float[] out)
- {
- // generate lfo value
- float lfo = Waves.SINE.value( step );
-
- // modulate the delay amount using the lfo value.
- // we always modulate tp a max of 5ms above the input delay.
- float dep = depth.getLastValue() * 0.5f;
- float delMS = delay.getLastValue() + ( lfo * dep + dep );
-
- // how many sample frames is that?
- int delFrame = (int)( delMS * sampleRate() / 1000 );
-
- for ( int i = 0; i < out.length; ++i )
- {
- int outputIndex = outputFrame * audio.channelCount() + i;
- float inSample = audio.getLastValues()[i];
- float wetSample = delayBuffer[outputIndex];
-
- // figure out where we need to place the delayed sample in our ring
- // buffer
- int delIndex = ( ( outputFrame + delFrame ) * audio.channelCount() + i )
- % delayBuffer.length;
- delayBuffer[delIndex] = inSample + wetSample
- * feedback.getLastValue();
-
- // the output sample is in plus wet, each scaled by amplitude inputs
- out[i] = inSample * dry.getLastValue() + wetSample
- * wet.getLastValue();
- }
-
- // next output frame
- ++outputFrame;
- if ( outputFrame == bufferFrameLength )
- {
- outputFrame = 0;
- }
-
- updateStepSize();
-
- // step the LFO
- step += stepSize;
- if ( step > 1 )
- {
- step -= 1;
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Frequency.java b/src/ddf/minim/ugens/Frequency.java
deleted file mode 100644
index 21e74fe..0000000
--- a/src/ddf/minim/ugens/Frequency.java
+++ /dev/null
@@ -1,266 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.ListIterator;
-import java.util.TreeMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import ddf.minim.Minim;
-
-/**
- *
- * An Oscil is a UGen that generates audio by oscillating over a Waveform
- * at a particular frequency. For instance, if you were to create this Oscil:
- *
- * When patched to an AudioOuput, it would generate a continuous sine wave tone
- * at 440 Hz and would sound like a test tone.
- * This frequency also happens to be the same as the pitch played
- * by the lead oboist in a orchestra when they tune up at the beginning of a concert.
- *
- * However, rather than give Oscil a fixed, or limited, set of sounds it
- * can generate, instead it simply oscillates over a generic Waveform object.
- * Waveform is simply an interface that declares a value method, which
- * is used by Oscil to determine what value it should output at any given moment
- * in time. Generally, you will use predefined Waveforms from the Waves class,
- * or generated Waveforms using the WavetableGenerator class. However, there's
- * no particular reason you couldn't define your own classes that implement
- * the Waveform interface.
- *
- * Another abstraction the Oscil UGen makes use of is the Frequency class.
- * This class allows you to define a frequency in terms of pitch, midi note,
- * or hertz. This is often quite useful when writing musical scores with code.
- * For instance, we could use the Frequency class when creating an Oscil that
- * will sound the same as the example above:
- *
- * Sampler provides several inputs that allow you to control the properties
- * of a triggered sample. When you call the trigger method, the values of these
- * inputs are "snapshotted" and used to configure the new voice that will play
- * the sample. So, changing the values does not effect already playing voices,
- * except for
- * Sample the Waveform at the location specified.
- * As an example, if the Waveform represents a sine wave,
- * then we would expect the following:
- *
- *
- *
- *
- * Would generate a Wavetable that was 4096 samples long and the values of those samples would start at 1.0,
- * linearly decrease to -1.0 over 2048 samples, and then increase to 1.0 over the next 2048 samples.
- *
- * If you wanted to generate a triangle wavetable with 4096 samples, you'd do this:
- *
- *
- *
- * But what this method lets you do, is create a Wavetable that contains several different partials, each with
- * a particular amplitude or phase shift. For instance, you could create a Wavetable that plays two pitches an octave
- * apart like this:
- *
- *
- * If this is something you want a particular instrument you write to do, then creating a Wavetable that already
- * contains the octave and using that in an Oscil will be less computationally expensive than creating two Oscils
- * and setting their frequencies an octave apart.
- *
- * @shortdesc Generates a Wavetable from a list of partials with matching amplitudes and phases.
- *
- * @param size
- * int: how many samples the Wavetable should contain
- * @param partial
- * float[]: a list of partials to generate
- * @param amp
- * float[]: the amplitude of each partial
- * @param phase
- * float[]: the phase of each partial
- *
- * @return a Wavetable
- *
- * @related Wavetable
- *
- */
- // generates waveform from lists of partials
- // phases are between 0 and 1
- public static Wavetable gen9(int size, float[] partial, float[] amp, float[] phase)
- {
-
- if (partial.length != amp.length
- || partial.length != phase.length
- || amp.length != phase.length)
- {
- System.err.println("Input arrays of different size!");
- return null;
- }
-
- float[] waveform = new float[size];
-
- float index = 0;
- for (int i = 0; i < size; i++)
- {
- index = (float)i / (size - 1);
- for (int j = 0; j < partial.length; j++)
- {
- waveform[i] += amp[j]
- * Math.sin(2 * Math.PI * partial[j] * index + phase[j]);
- }
- }
-
- return new Wavetable(waveform);
- }
-
- /**
- *
- * Generate a Wavetable given a list of amplitudes for successive partials (harmonics). These two method
- * calls are equivalent:
- *
- *
- *
- AudioOutput can also be treated as a UGen for the purposes of patching, but it stands alone, different from the rest.
-
- Whenever you want to use a UGen, you'll need to instantiate it first.
- The code to instantiate and Oscil UGen can look like this:
-
- This line tells Processing to create a new reference to an Oscil object and call that reference "osc".
- This line also says to "instantiate" an Oscil object using one of its constructors, specifically,
- the constructor which takes two float values. This line also tells Processing to make the new object
- ref "osc" and make it point to the newly instantiated Oscil object.
- In summary, this line makes "osc" refer to a new Oscil object.
- Every UGen will need to be instantiated in a fashion similar to this,
- although sometimes the creation of the reference and the creation of the object may need to be put in different locations.
-
- Almost all UGens have inputs. An input is a way to control what the UGen does while it's generating sound.
- For instance, an Oscil UGen has an amplitude, frequency, and phase input.
- The frequency input allows the frequency to be changed as the Oscil is sound.
- Many of the UGens, like the following effects UGens, have an audio input.
- This is what brings in the sampleframes from the previous UGens.
- Often when directly patching to a UGen, you are actually patching to its audio input.
-
- An Oscil UGen is an oscillator.
- It creates an output waveform which repeats at the specified frequency and at the specified amplitude.
- In the case of Oscil, if a constructor with two floats is called, the first is read as the frequency and
- the second is read as the amplitude. It is possible to include a waveshape as well, as a third argument.
-
- The Noise UGen generates noise of different "tints": white, pink, and red/brown.
- Wikipedia has some excellent articles on audio noise.
-
- LiveInput transmits incoming audio through the synthesis chain. The incoming audio is selected by the operating system.
-
- FilePlayer will play a file (even from the web!) into the synthesis stream. It can be looped and paused as desired.
-
- Sampler is typically used to load a short audio file into memory and then "trigger" it to play the sound.
- Before triggering the sound you can set the inputs to control the section of the sample to play, the amplitude,
- the duration of the fade in of the sound (attack), and the playback rate.
-
- Vocoder is a very simple vocoding effect (http://en.wikipedia.org/wiki/Vocoder) that provides an audio and a modulator input.
-
- The Delay UGen repeats a delayed version of the incoming signal.
-
- The Pan UGen takes a mono signal and specifies a stereo position for that signal.
-
- The Balance UGen attenuates the left or right channel of stereo signal.
-
- The Gain attenuates or amplifies the incoming signal. Gain is expressed in dB.
-
- BitCrush reduces the bit resolution of the incoming signal.
-
- WaveShaper uses the incoming signal as the index to another wave.
- This is a standard form of waveform synthesis and Wikipedia has a good article about it.
-
- MoogFilter is a digital model of the analog filter found on Moog synthesizers. It can be
- configured to be either low pass, band pass, or high pass.
-
- Flanger is a special kind of tight delay that has modulation of the delay time built into the algorithm.
-
- Envelopes are generally used to control the amplitude or another aspect of the sound during the playback of a note.
- A more detailed discussion of the usage of envelopes will follow in the Instruments section.
-
- The Line Ugen outputs a value which changes from a specified beginning value to an ending value over a specified time.
-
- The ADSR UGen produces an attack-decay-sustain-release envelope.
-
- The Damp UGen produces an attack-decay envelope.
-
- The GranulateSteady UGen produces steady length granular-synthesis grains from the input signal.
-
- The GranulateRandom UGen produces random length granular-synthesis grains from the input signal.
- At this time, the grains for both GranulateSteady and GranulateRandom have a linear fade in and out.
-
- The Oscil UGen can also be used as an envelope if the frequency is set so the period of the wave is about the duration of the note.
-
- Due to our decision to use the patching mechanism to connect synthesis chains,
- it's unfortunately not simple to do math with the sample frames being passed along the synthesis chains.
- We had to implement math directly as UGens.
-
- The Constant UGen generates a constant value as a signal.
-
- The Summer UGen adds (sums) all incoming inputs.
-
- The Multiplier UGen multiplies an incoming signal by an amplitude.
-
- The Reciprocal UGen generates the reciprocal of the incoming signal.
- This can be useful when, for example, building a physical modeling instrument which is based on the length of a tube.
-
- The Midi2Hz UGen generates the equivalent frequency in Hertz for an incoming signal given as a MIDI note number.
- The MIDI note number does not need to be an integer. This permits changes in pitch which are musical in nature.
-
- The Abs UGen outputs the absolute value of the incoming signal.
-
- The Bypass UGen can be used to "wrap" another UGen, enabling you to route audio around it without having to unpatch
- anything.
-
- The EnvelopeFollower UGen will analyze the incoming signal and output a value that represents the strength of that signal.
-
- The TickRate UGen can be used to slow down or speed up the rate at which a UGen generates.
-
- The Sink UGen is similar to a Summer in that it can have many UGens patched to it, but it will not produce any sound,
- instead simply ticking everything that is patched to it and discarding the audio.
-
- * If this object is a Map or a List, and it's also a JSONStreamAware or a JSONAware, JSONStreamAware or JSONAware will be considered firstly.
- *
- * DO NOT call this method from writeJSONString(Writer) of a class that implements both JSONStreamAware and (Map or List) with
- * "this" as the first parameter, use JSONObject.writeJSONString(Map, Writer) or JSONArray.writeJSONString(List, Writer) instead.
- *
- * @see org.json.simple.JSONObject#writeJSONString(Map, Writer)
- * @see org.json.simple.JSONArray#writeJSONString(List, Writer)
- *
- * @param value
- * @param writer
- */
- public static void writeJSONString(Object value, Writer out) throws IOException {
- if(value == null){
- out.write("null");
- return;
- }
-
- if(value instanceof String){
- out.write('\"');
- out.write(escape((String)value));
- out.write('\"');
- return;
- }
-
- if(value instanceof Double){
- if(((Double)value).isInfinite() || ((Double)value).isNaN())
- out.write("null");
- else
- out.write(value.toString());
- return;
- }
-
- if(value instanceof Float){
- if(((Float)value).isInfinite() || ((Float)value).isNaN())
- out.write("null");
- else
- out.write(value.toString());
- return;
- }
-
- if(value instanceof Number){
- out.write(value.toString());
- return;
- }
-
- if(value instanceof Boolean){
- out.write(value.toString());
- return;
- }
-
- if((value instanceof JSONStreamAware)){
- ((JSONStreamAware)value).writeJSONString(out);
- return;
- }
-
- if((value instanceof JSONAware)){
- out.write(((JSONAware)value).toJSONString());
- return;
- }
-
- if(value instanceof Map){
- JSONObject.writeJSONString((Map)value, out);
- return;
- }
-
- if(value instanceof Collection){
- JSONArray.writeJSONString((Collection)value, out);
- return;
- }
-
- if(value instanceof byte[]){
- JSONArray.writeJSONString((byte[])value, out);
- return;
- }
-
- if(value instanceof short[]){
- JSONArray.writeJSONString((short[])value, out);
- return;
- }
-
- if(value instanceof int[]){
- JSONArray.writeJSONString((int[])value, out);
- return;
- }
-
- if(value instanceof long[]){
- JSONArray.writeJSONString((long[])value, out);
- return;
- }
-
- if(value instanceof float[]){
- JSONArray.writeJSONString((float[])value, out);
- return;
- }
-
- if(value instanceof double[]){
- JSONArray.writeJSONString((double[])value, out);
- return;
- }
-
- if(value instanceof boolean[]){
- JSONArray.writeJSONString((boolean[])value, out);
- return;
- }
-
- if(value instanceof char[]){
- JSONArray.writeJSONString((char[])value, out);
- return;
- }
-
- if(value instanceof Object[]){
- JSONArray.writeJSONString((Object[])value, out);
- return;
- }
-
- out.write(value.toString());
- }
-
- /**
- * Convert an object to JSON text.
- *
- * If this object is a Map or a List, and it's also a JSONAware, JSONAware will be considered firstly.
- *
- * DO NOT call this method from toJSONString() of a class that implements both JSONAware and Map or List with
- * "this" as the parameter, use JSONObject.toJSONString(Map) or JSONArray.toJSONString(List) instead.
- *
- * @see org.json.simple.JSONObject#toJSONString(Map)
- * @see org.json.simple.JSONArray#toJSONString(List)
- *
- * @param value
- * @return JSON text, or "null" if value is null or it's an NaN or an INF number.
- */
- public static String toJSONString(Object value){
- final StringWriter writer = new StringWriter();
-
- try{
- writeJSONString(value, writer);
- return writer.toString();
- } catch(IOException e){
- // This should never happen for a StringWriter
- throw new RuntimeException(e);
- }
- }
-
- /**
- * Escape quotes, \, /, \r, \n, \b, \f, \t and other control characters (U+0000 through U+001F).
- * @param s
- * @return
- */
- public static String escape(String s){
- if(s==null)
- return null;
- StringBuffer sb = new StringBuffer();
- escape(s, sb);
- return sb.toString();
- }
-
- /**
- * @param s - Must not be null.
- * @param sb
- */
- static void escape(String s, StringBuffer sb) {
- final int len = s.length();
- for(int i=0;iUGen
.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-
-@Deprecated
-public interface AudioEffect
-{
- /**
- * Processes signal
in some way.
- *
- * @param signal
- * an array of audio samples, representing a mono sound stream.
- */
- void process(float[] signal);
-
- /**
- * Processes sigLeft
and sigRight
in some way.
- *
- * @param sigLeft
- * an array of audio samples, representing the left channel of a
- * stereo sound stream
- * @param sigRight
- * an array of audio samples, representing the right channel of a
- * stereo sound stream
- */
- void process(float[] sigLeft, float[] sigRight);
-}
diff --git a/src/ddf/minim/AudioInput.java b/src/ddf/minim/AudioInput.java
deleted file mode 100644
index b59732c..0000000
--- a/src/ddf/minim/AudioInput.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede
- * // get the default STEREO input
- * AudioInput getLineIn()
- *
- * // specifiy either Minim.MONO or Minim.STEREO for type
- * AudioInput getLineIn(int type)
- *
- * // bufferSize is the size of the left, right,
- * // and mix buffers of the input you get back
- * AudioInput getLineIn(int type, int bufferSize)
- *
- * // sampleRate is a request for an input of a certain sample rate
- * AudioInput getLineIn(int type, int bufferSize, float sampleRate)
- *
- * // bitDepth is a request for an input with a certain bit depth
- * AudioInput getLineIn(int type, int bufferSize, float sampleRate, int bitDepth)
- *
- * In the event that an input doesn't exist with the requested parameters,
- * Minim will spit out an error and return null. In general,
- * you will want to use the first two methods listed above.
- *
- * @example Basics/MonitorInput
- *
- * @related Minim
- *
- * @author Damien Di Fede
- *
- */
-public class AudioInput extends AudioSource
-{
- boolean m_isMonitoring;
- AudioStream m_stream;
-
- /** @invisible
- *
- * Constructs an AudioInput
that uses out
to read
- * samples from stream
. The samples from stream
- * can be accessed by through the interface provided by AudioSource
.
- *
- * @param stream the AudioStream
that provides the samples
- * @param out the AudioOut
that will read from stream
- */
- public AudioInput(AudioStream stream, AudioOut out)
- {
- super( out );
- out.setAudioStream(stream);
- stream.open();
-
- disableMonitoring();
-
- m_stream = stream;
- }
-
- public void close()
- {
- super.close();
- m_stream.close();
- }
-
- /**
- * Returns whether or not this AudioInput is monitoring.
- * In other words, whether you will hear in your speakers
- * the audio coming into the input.
- *
- * @return boolean: true if monitoring is on
- *
- * @example Basics/MonitorInput
- *
- * @related enableMonitoring ( )
- * @related disableMonitoring ( )
- * @related AudioInput
- */
- public boolean isMonitoring()
- {
- return m_isMonitoring;
- }
-
- /**
- * When monitoring is enabled, you will be able to hear
- * the audio that is coming through the input.
- *
- * @example Basics/MonitorInput
- *
- * @related disableMonitoring ( )
- * @related isMonitoring ( )
- * @related AudioInput
- */
- public void enableMonitoring()
- {
- // make sure we don't make sound
- if ( hasControl(VOLUME) )
- {
- setVolume( 1 );
- m_isMonitoring = true;
- }
- else if ( hasControl(GAIN) )
- {
- setGain( 0 );
- m_isMonitoring = true;
- }
- else
- {
- Minim.error( "Monitoring is not available on this AudioInput." );
- }
- }
-
- /**
- *
- * When monitoring is disabled, you will not hear
- * the audio that is coming through the input,
- * but you will still be able to access the samples
- * in the left, right, and mix buffers. This is
- * default state of an AudioInput and is what
- * you will want if your input is microphone
- * and your output is speakers. Otherwise: feedback.
- *
- * @shortdesc When monitoring is disabled, you will not hear
- * the audio that is coming through the input.
- *
- * @example Basics/MonitorInput
- *
- * @related enableMonitoring ( )
- * @related isMonitoring ( )
- * @related AudioInput
- *
- */
- public void disableMonitoring()
- {
- // make sure we don't make sound
- if ( hasControl(VOLUME) )
- {
- setVolume( 0 );
- }
- else if ( hasControl(GAIN) )
- {
- setGain( -64 );
- }
-
- m_isMonitoring = false;
- }
-}
diff --git a/src/ddf/minim/AudioListener.java b/src/ddf/minim/AudioListener.java
deleted file mode 100644
index 65c16f1..0000000
--- a/src/ddf/minim/AudioListener.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioListener
can be used to monitor Recordable
- * objects such as AudioPlayer
, AudioOutput
, and AudioInput
.
- * Each time a Recordable
object receives a new sample buffer
- * from the audio system, or generates a new sample buffer at the request of the
- * audio system, it passes a copy of this buffer to its listeners. You can
- * implement this interface if you want to receive samples in a callback fashion,
- * rather than using an object's AudioBuffer
s to access them. You
- * add an AudioListener
to a Recordable
by calling
- * the addListener method. When you want to stop receiving samples you call the
- * removeListener method.
- *
- * @example Advanced/AddAndRemoveAudioListener
- *
- * @author Damien Di Fede
- *
- * @related AudioPlayer
- * @related AudioInput
- * @related AudioOutput
- * @related SignalSplitter
- */
-public interface AudioListener
-{
- /**
- * Called by the audio object this AudioListener is attached to
- * when that object has new samples.
- *
- * @example Advanced/AddAndRemoveAudioListener
- *
- * @param samp
- * a float[] buffer of samples from a MONO sound stream
- *
- * @related AudioListener
- */
- void samples(float[] samp);
-
- /**
- * Called by the Recordable
object this is attached to
- * when that object has new samples.
- *
- * @param sampL
- * a float[] buffer containing the left channel of a STEREO sound stream
- * @param sampR
- * a float[] buffer containing the right channel of a STEREO sound stream
- *
- * @related AudioListener
- */
- void samples(float[] sampL, float[] sampR);
-
- // TODO: consider replacing above two methods with this single one
- // void samples( MultiChannelBuffer buffer );
-}
diff --git a/src/ddf/minim/AudioMetaData.java b/src/ddf/minim/AudioMetaData.java
deleted file mode 100644
index ee29665..0000000
--- a/src/ddf/minim/AudioMetaData.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioMetaData
provides information commonly found in ID3 tags.
- * However, other audio formats, such as Ogg, can contain
- * similar information. So rather than refer to this information
- * as ID3Tags or similar, we simply call it metadata. This base
- * class returns the empty string or -1 from all methods and
- * derived classes are expected to simply override the methods
- * that they have information for. This is a little less brittle
- * than using an interface because later on new properties can
- * be added without breaking existing code.
- *
- * @example Basics/GetMetaData
- */
-public abstract class AudioMetaData
-{
- /**
- * The length of the recording in milliseconds.
- *
- * @return int: the length in milliseconds
- *
- * @related AudioMetaData
- */
- public int length()
- {
- return -1;
- }
-
- /**
- *
- * How many sample frames are in this recording.
- *
- * @return int: the number of sample frames
- *
- * @related AudioMetaData
- */
- public int sampleFrameCount()
- {
- return -1;
- }
-
- /**
- * The name of the file / URL of the recording.
- *
- * @return String: the file name
- *
- * @related AudioMetaData
- */
- public String fileName()
- {
- return "";
- }
-
- /**
- * The title of the recording.
- *
- * @return String: the title tag
- *
- * @related AudioMetaData
- */
- public String title()
- {
- return "";
- }
-
- /**
- * The author of the recording.
- *
- * @return String: the author tag
- *
- * @related AudioMetaData
- */
- public String author()
- {
- return "";
- }
-
- /**
- * The album the recording came from.
- *
- * @return String: the album tab
- *
- * @related AudioMetaData
- */
- public String album()
- {
- return "";
- }
-
- /**
- * The date the recording was made.
- *
- * @return String: the date tag
- *
- * @related AudioMetaData
- */
- public String date()
- {
- return "";
- }
-
- /**
- * The comment field in the file.
- *
- * @return String: the comment tag
- *
- * @related AudioMetaData
- */
- public String comment()
- {
- return "";
- }
-
- /**
- * The track number of the recording.
- * This will sometimes be in the form 3/10,
- * giving you both the track number and total
- * tracks on the album this track came from.
- *
- * @return String: the track tag
- *
- * @related AudioMetaData
- */
- public String track()
- {
- return "";
- }
-
- /**
- * The genre of the recording.
- *
- * @return String: the genre tag
- *
- * @related AudioMetaData
- */
- public String genre()
- {
- return "";
- }
-
- /**
- * The copyright of the recording.
- *
- * @return String: the copyright tag
- *
- * @related AudioMetaData
- */
- public String copyright()
- {
- return "";
- }
-
- /**
- * The disc number of the recording.
- *
- * @return String: the disc tag
- *
- * @related AudioMetaData
- */
- public String disc()
- {
- return "";
- }
-
- /**
- * The composer of the recording.
- *
- * @return String: the composer tag
- *
- * @related AudioMetaData
- */
- public String composer()
- {
- return "";
- }
-
- /**
- * The lyrics for the recording, if any.
- *
- * @return String: the lyrics tag
- *
- * @related AudioMetaData
- */
- public String lyrics()
- {
- return "";
- }
-
- /**
- * The orchestra that performed the recording.
- *
- * @return String: the orchestra tag
- *
- * @related AudioMetaData
- */
- public String orchestra()
- {
- return "";
- }
-
- /**
- * The publisher of the recording.
- *
- * @return String: the publisher tag
- *
- * @related AudioMetaData
- */
- public String publisher()
- {
- return "";
- }
-
- /**
- * The software the recording was encoded with.
- *
- * @return String: the encoded tag
- *
- * @related AudioMetaData
- */
- public String encoded()
- {
- return "";
- }
-}
diff --git a/src/ddf/minim/AudioOutput.java b/src/ddf/minim/AudioOutput.java
deleted file mode 100644
index cbf1a73..0000000
--- a/src/ddf/minim/AudioOutput.java
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede
- * AudioOutput getLineOut()
- *
- * // specifiy either Minim.MONO or Minim.STEREO for type
- * AudioOutput getLineOut(int type)
- *
- * // bufferSize is the size of the left, right,
- * // and mix buffers of the output you get back
- * AudioOutput getLineOut(int type, int bufferSize)
- *
- * // sampleRate is a request for an output of a certain sample rate
- * AudioOutput getLineOut(int type, int bufferSize, float sampleRate)
- *
- * // bitDepth is a request for an output with a certain bit depth
- * AudioInput getLineOut(int type, int bufferSize, float sampleRate, int bitDepth)
- *
- * AudioOutput
that will use out
- * to generate sound.
- *
- * @param out
- * the AudioOut
that does most of our work
- *
- * @invisible
- */
- public AudioOutput(AudioOut out)
- {
- super( out );
- synth = out;
- signals = new SignalChain();
- noteManager = new NoteManager( getFormat().getSampleRate() );
- bus = new Summer();
- // configure it
- bus.setSampleRate( getFormat().getSampleRate() );
- bus.setChannelCount( getFormat().getChannels() );
-
- synth.setAudioSignal( new SampleGenerator() );
- }
-
- /** @deprecated */
- public void addSignal(AudioSignal signal)
- {
- signals.add( signal );
- }
-
- /** @deprecated */
- public AudioSignal getSignal(int i)
- {
- // get i+1 because the bus is signal 0.
- return signals.get( i );
- }
-
- /** @deprecated */
- public void removeSignal(AudioSignal signal)
- {
- signals.remove( signal );
- }
-
- /** @deprecated */
- public AudioSignal removeSignal(int i)
- {
- // remove i+1 because the bus is 1
- return signals.remove( i );
- }
-
- /** @deprecated */
- public void clearSignals()
- {
- signals.clear();
- }
-
- /** @deprecated */
- public void disableSignal(int i)
- {
- // disable i+1 because the bus is 0
- signals.disable( i );
- }
-
- /** @deprecated */
- public void disableSignal(AudioSignal signal)
- {
- signals.disable( signal );
- }
-
- /** @deprecated */
- public void enableSignal(int i)
- {
- signals.enable( i );
- }
-
- /** @deprecated */
- public void enableSignal(AudioSignal signal)
- {
- signals.enable( signal );
- }
-
- /** @deprecated */
- public boolean isEnabled(AudioSignal signal)
- {
- return signals.isEnabled( signal );
- }
-
- /** @deprecated */
- public boolean isSounding()
- {
- for ( int i = 1; i < signals.size(); i++ )
- {
- if ( signals.isEnabled( signals.get( i ) ) )
- {
- return true;
- }
- }
- return false;
- }
-
- /** @deprecated */
- public void noSound()
- {
- for ( int i = 1; i < signals.size(); i++ )
- {
- signals.disable( i );
- }
- }
-
- /** @deprecated */
- public int signalCount()
- {
- return signals.size();
- }
-
- /** @deprecated */
- public void sound()
- {
- for ( int i = 1; i < signals.size(); i++ )
- {
- signals.enable( i );
- }
- }
-
- /** @deprecated */
- public boolean hasSignal(AudioSignal signal)
- {
- return signals.contains( signal );
- }
-
- /**
- * playNote is a method of scheduling a "note" to be played at
- * some time in the future (or immediately), where a "note" is
- * an instance of a class that implements the Instrument interface.
- * The Instrument interface requires you to implement a noteOn method
- * that accepts a float duration value and is called when that
- * Instrument should begin making sound, and a noteOff method
- * that is called when that Instrument should stop making sound.
- * pauseNotes
method before queuing
- * them. If you don't, the timing will be slightly off because the "now" that
- * the start time of each note is an offset from will change from note to note.
- * Once all of your notes have been added, you call resumeNotes
to allow
- * the AudioOutput to process notes again.
- *
- * @shortdesc pause note processing
- *
- * @example Basics/SequenceSound
- *
- * @related resumeNotes ( )
- */
- public void pauseNotes()
- {
- noteManager.pause();
- }
-
- /**
- * Resume note processing.
- *
- * @example Basics/SequenceSound
- *
- * @see #pauseNotes()
- * @related pauseNotes ( )
- */
- public void resumeNotes()
- {
- noteManager.resume();
- }
-
-}
diff --git a/src/ddf/minim/AudioPlayer.java b/src/ddf/minim/AudioPlayer.java
deleted file mode 100644
index 72f5a32..0000000
--- a/src/ddf/minim/AudioPlayer.java
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioPlayer
provides a self-contained way of playing a
- * sound file by streaming it from disk (or the internet). It
- * provides methods for playing and looping the file, as well
- * as methods for setting the position in the file and
- * looping a section of the file. You can obtain an
- * AudioPlayer
by using the loadFile method of the Minim
- * class.
- *
- * @example Basics/PlayAFile
- *
- * @related Minim
- *
- * @author Damien Di Fede
- */
-
-public class AudioPlayer extends AudioSource implements Playable
-{
- // the rec that this plays
- private AudioRecordingStream recording;
- private AudioOut output;
- // only set to true is pause is called
- private boolean isPaused;
-
- /**
- * Constructs an AudioPlayer
that plays recording
using
- * the AudioOut
provided. Generally you will not call this directly
- * and will instead use the Minim.loadFile
method.
- *
- * @see Minim#loadFile(String)
- *
- * @param recording
- * the AudioRecordingStream
to play
- *
- * @param out the AudioOut
to play the recording on
- *
- * @invisible
- */
- public AudioPlayer(AudioRecordingStream recording, AudioOut out)
- {
- super(out);
- this.recording = recording;
- output = out;
- // output.setAudioSignal( new StreamSignal(recording, output.bufferSize()) );
- output.setAudioStream(recording);
- }
-
- /**
- * Starts playback from the current position.
- * If this was previously set to loop, looping will be disabled.
- *
- * @shortdesc Starts playback from the current position.
- *
- * @example Basics/PlayAFile
- *
- * @related AudioPlayer
- */
- public void play()
- {
- recording.play();
- isPaused = false;
- }
-
- /**
- * Starts playback some number of milliseconds into the file.
- * If this was previously set to loop, looping will be disabled.
- *
- * @shortdesc Starts playback some number of milliseconds into the file.
- *
- * @param millis
- * int: how many milliseconds from the beginning of the file to begin playback from
- *
- * @related AudioPlayer
- */
- public void play(int millis)
- {
- cue(millis);
- play();
- }
-
- /**
- * Pauses playback.
- *
- * @example AudioPlayer/pause
- *
- * @related AudioPlayer
- */
- public void pause()
- {
- recording.pause();
- isPaused = true;
- }
-
- /**
- * Rewinds to the beginning. This does not stop playback.
- *
- * @example AudioPlayer/rewind
- *
- * @related AudioPlayer
- */
- public void rewind()
- {
- cue(0);
- }
-
- /**
- * Set the AudioPlayer
to loop some number of times.
- * If it is already playing, the position
- * will not be reset to the beginning.
- * If it is not playing, it will start playing.
- * If you previously called this method and then paused the
- * AudioPlayer
, you can resume looping
- * by using the result of getLoopCount()
as
- * the argument for this method.
- * To loop indefinitely, use loop()
.
- *
- * @shortdesc Set the AudioPlayer
to loop some number of times.
- *
- * @param num
- * int: the number of times to loop
- *
- * @example AudioPlayer/loopNum
- *
- * @related AudioPlayer
- */
- public void loop(int num)
- {
- // if we were paused, we need to grab the current state
- // because calling loop totally resets it
- if ( isPaused )
- {
- int pos = recording.getMillisecondPosition();
- recording.loop( num );
- recording.setMillisecondPosition(pos);
- }
- else
- {
- recording.loop(num);
- }
-
- isPaused = false;
- }
-
- /**
- * Sets the AudioPlayer
to loop indefinitely.
- * If it is already playing, the position
- * will not be reset to the beginning.
- * If it is not playing, it will start playing.
- *
- * @shortdesc Sets the AudioPlayer
to loop indefinitely.
- *
- * @example AudioPlayer/loop
- *
- * @related AudioPlayer
- */
- public void loop()
- {
- loop(Minim.LOOP_CONTINUOUSLY);
- }
-
- /**
- * Returns the number of loops left to do.
- *
- * @return int: the number of loops left
- *
- * @example AudioPlayer/loopNum
- *
- * @related AudioPlayer
- */
- public int loopCount()
- {
- return recording.getLoopCount();
- }
-
- /**
- * Returns the length of the sound in milliseconds. If for any reason the
- * length could not be determined, this will return -1. However, an unknown
- * length should not impact playback.
- *
- * @shortdesc Returns the length of the sound in milliseconds.
- *
- * @return int: the length of the sound in milliseconds
- *
- * @example Advanced/CueAnAudioPlayer
- *
- * @related AudioPlayer
- */
- public int length()
- {
- return recording.getMillisecondLength();
- }
-
- /**
- * Returns the current position of the "playhead" in milliseconds
- * (ie how much of the sound has already been played).
- *
- * @example Advanced/CueAnAudioPlayer
- *
- * @return int: the current position of the "playhead" in milliseconds
- *
- * @related AudioPlayer
- */
- public int position()
- {
- return recording.getMillisecondPosition();
- }
-
- /**
- * Sets the position to millis
milliseconds from
- * the beginning. This will not change the play state. If an error
- * occurs while trying to cue, the position will not change.
- * If you try to cue to a negative position or to a position
- * that is greater than length()
, the amount will be clamped
- * to zero or length()
.
- *
- * @shortdesc Sets the position to millis
milliseconds from
- * the beginning.
- *
- * @example Advanced/CueAnAudioPlayer
- *
- * @param millis
- * int: the millisecond position to place the "playhead"
- *
- * @related length ( )
- * @related AudioPlayer
- */
- public void cue(int millis)
- {
- if (millis < 0)
- {
- millis = 0;
- }
- else if (millis > length())
- {
- millis = length();
- }
- recording.setMillisecondPosition(millis);
- }
-
- /**
- * Skips millis
milliseconds from the current position.
- * millis
can be negative, which will make this skip backwards.
- * If the skip amount would result in a negative position or a position that is greater than
- * length()
, the new position will be clamped to zero or
- * length()
.
- *
- * @shortdesc Skips millis
milliseconds from the current position.
- *
- * @param millis
- * int: how many milliseconds to skip, sign indicates direction
- *
- * @example AudioPlayer/skip
- *
- * @related AudioPlayer
- */
- public void skip(int millis)
- {
- int pos = position() + millis;
- if (pos < 0)
- {
- pos = 0;
- }
- else if (pos > length())
- {
- pos = length();
- }
- Minim.debug("AudioPlayer.skip: skipping " + millis + " milliseconds, new position is " + pos);
- recording.setMillisecondPosition(pos);
- }
-
- /**
- * Returns true if the AudioPlayer
is currently playing
- * and has more than one loop left to play.
- *
- * @return true if this is looping, false if not
- *
- * @example AudioPlayer/loopNum
- *
- * @related AudioPlayer
- */
- public boolean isLooping()
- {
- return recording.getLoopCount() != 0;
- }
-
- /**
- * Indicates if the AudioPlayer
is currently playing.
- *
- * @return true if this is currently playing, false if not
- *
- * @example AudioPlayer/loopNum
- *
- * @related AudioPlayer
- */
- public boolean isPlaying()
- {
- return recording.isPlaying();
- }
-
- /**
- * Returns the meta data for the recording being played by this player.
- *
- * @return AudioMetaData: the meta data for this player's recording
- *
- * @example Basics/GetMetaData
- *
- * @related AudioPlayer
- * @related AudioMetaData
- */
- public AudioMetaData getMetaData()
- {
- return recording.getMetaData();
- }
-
- /**
- * Sets the loop points used when looping.
- *
- * @param start
- * int: the start of the loop in milliseconds
- * @param stop
- * int: the end of the loop in milliseconds
- *
- * @example AudioPlayer/setLoopPoints
- *
- * @related AudioPlayer
- */
- public void setLoopPoints(int start, int stop)
- {
- recording.setLoopPoints(start, stop);
-
- }
-
- /**
- * Release the resources associated with playing this file.
- * All AudioPlayers returned by Minim's loadFile method
- * will be closed by Minim when it's stop method is called.
- * If you are using Processing, Minim's stop method will be
- * called automatically when your application exits.
- *
- * @shortdesc Release the resources associated with playing this file.
- *
- * @related AudioPlayer
- *
- * @invisible
- */
- public void close()
- {
- recording.close();
- super.close();
- }
-}
diff --git a/src/ddf/minim/AudioRecorder.java b/src/ddf/minim/AudioRecorder.java
deleted file mode 100644
index ecdf550..0000000
--- a/src/ddf/minim/AudioRecorder.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioRecorder
can be used to record audio that is being
- * played by a Recordable
object such as an AudioOutput
,
- * AudioInput
, or AudioPlayer
. An AudioRecorder
- * need not necessarily record to disk, but the recorders you receive from
- * Minim's createRecorder method will do so.
- *
- * @example Advanced/RecordAndPlayback
- *
- * @author Damien Di Fede
- *
- */
-
-public class AudioRecorder
-{
- private Recordable source;
- private SampleRecorder recorder;
-
- /** @invisible
- *
- * Constructs an AudioRecorder
that will use
- * recorder
to record recordSource
.
- * You might use this if you want to implement your own SampleRecorder
- * that can encode to file types not available in Minim.
- *
- * @param recordSource
- * the Recordable
object to record
- * @param recorder
- * the SampleRecorder
to use to record it
- */
- public AudioRecorder(Recordable recordSource, SampleRecorder recorder)
- {
- source = recordSource;
- this.recorder = recorder;
- source.addListener(recorder);
- }
-
- /**
- * Begins recording audio from the current record source. If recording was
- * previously halted, and the save method was not called, samples will be
- * appended to the end of the material recorded so far.
- *
- * @shortdesc Begins recording audio from the current record source.
- *
- * @example Advanced/RecordAndPlayback
- *
- * @related AudioRecorder
- */
- public void beginRecord()
- {
- recorder.beginRecord();
- }
-
- /**
- * Halts the recording of audio from the current record source.
- *
- * @example Advanced/RecordAndPlayback
- *
- * @related AudioRecorder
- */
- public void endRecord()
- {
- recorder.endRecord();
- }
-
- /**
- * Returns the current record state.
- *
- * @return true if this is currently recording
- *
- * @example Advanced/RecordAndPlayback
- *
- * @related AudioRecorder
- */
- public boolean isRecording()
- {
- return recorder.isRecording();
- }
-
- /**
- * Requests that the recorder saves. This will only
- * work if you have called the endRecord method. If this was created with a
- * buffered recorder, then calling the beginRecord method after saving will
- * not overwrite the file on the disk, unless this method is subsequently
- * called. However, if this was created with an unbuffered recorder, it is
- * likely that a call to the beginRecord method will create the file again,
- * overwriting the file that had previously been saved. An
- * AudioRecordingStream
will be returned if the
- * SampleRecorder
used to record the audio saved to a file
- * (this will always be the case if you use createRecorder
or
- * the first constructor for AudioRecorder
).
- *
- * @shortdesc Requests that the recorder saves.
- *
- * @return the audio that was recorded as an AudioRecordingStream
- *
- * @example Advanced/RecordAndPlayback
- *
- * @related AudioRecorder
- */
- // TODO: this should return whatever our "file handle" interface winds up being.
- public AudioRecordingStream save()
- {
- return recorder.save();
- }
-
- /**
- * Sets the record source for this recorder. The record source can be set at
- * any time, but if you are in the middle of recording it is a good idea to mute the old
- * record source, then add the new record source, also muted, and then unmute
- * the new record source. Otherwise, you'll probably wind up with a pop in the
- * recording.
- *
- * @shortdesc Sets the record source for this recorder.
- *
- * @param recordSource
- * an AudioSample, AudioPlayer, AudioInput, or AudioOutput
- *
- * @related AudioRecorder
- */
- public void setRecordSource(Recordable recordSource)
- {
- source.removeListener(recorder);
- source = recordSource;
- source.addListener(recorder);
- }
-
- /** @invisible
- * Sets the SampleRecorder
for this recorder. Similar caveats
- * apply as with {@link #setRecordSource(Recordable)}. This calls
- * endRecord
and save
on the current
- * SampleRecorder
before setting the new one.
- *
- * @param recorder
- * the new SampleRecorder
to use
- */
- public void setSampleRecorder(SampleRecorder recorder)
- {
- this.recorder.endRecord();
- this.recorder.save();
- source.removeListener(this.recorder);
- source.addListener(recorder);
- this.recorder = recorder;
- }
-}
diff --git a/src/ddf/minim/AudioSample.java b/src/ddf/minim/AudioSample.java
deleted file mode 100644
index a348a3f..0000000
--- a/src/ddf/minim/AudioSample.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioSample
keeps the entire file in an internal buffer and
- * all you can do is trigger()
the sound. However, you can trigger
- * the sound even if it is still playing back. It is not advised that you use
- * this class for long sounds (like entire songs, for example) because the
- * entire file is kept in memory.
- * Sampler
class from the ugens package because it is more
- * full-featured than AudioSample
.
- *
- * @example Basics/TriggerASample
- *
- * @related Minim
- *
- * @author Damien Di Fede
- *
- */
-
-// TODO: some kind of event for when a sample finishes playing?
-
-public abstract class AudioSample extends AudioSource
-{
- /**
- * int used to request the left channel of audio from the getChannel method.
- *
- * @related getChannel ( )
- * @related AudioSample
- */
- static public final int LEFT = 1;
-
- /**
- * int used to request the right channel of audio from the getChannel method.
- *
- * @related getChannel ( )
- * @related AudioSample
- */
- static public final int RIGHT = 2;
-
- protected AudioSample(AudioOut output)
- {
- super( output );
- }
-
- /**
- * Get the AudioMetaData for this sample. This will mostly be useful if you
- * have created an AudioSample from an mp3 file and want to get at some of
- * the most common ID3 tags.
- *
- * @shortdesc Get the AudioMetaData for this sample.
- *
- * @example Basics/GetMetaData
- *
- * @return the AudioMetaData for the sample.
- *
- * @related AudioMetaData
- * @related AudioSample
- */
- public abstract AudioMetaData getMetaData();
-
- /**
- * Gets the samples for the requested channel number as a float array.
- * Use either AudioSample.LEFT or AudioSample.RIGHT.
- *
- * @example Advanced/AudioSampleGetChannel
- *
- * @param channelNumber
- * int: the channel you want the samples for
- *
- * @return float[]: the samples in the specified channel
- *
- * @related AudioSample
- */
- public abstract float[] getChannel(int channelNumber);
-
- /**
- * Gets the length in milliseconds of this AudioSample.
- *
- * @return int: the length in milliseconds
- *
- * @related AudioSample
- */
- public abstract int length();
-
- /**
- * Triggers the sound to play once. Can be called again before the sound
- * finishes playing.
- *
- * @example Basics/TriggerASample
- *
- * @related AudioSample
- */
- public abstract void trigger();
-
- /**
- * Stops all sound being produced by this AudioSample.
- *
- * @related AudioSample
- */
- public abstract void stop();
-}
diff --git a/src/ddf/minim/AudioSignal.java b/src/ddf/minim/AudioSignal.java
deleted file mode 100644
index b90f16c..0000000
--- a/src/ddf/minim/AudioSignal.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioSignal
interface. Your only responsibility
- * is to fill either a single float buffer or two float buffers with values in
- * the range of [-1, 1]. The AudioOutput
to which you add your
- * signal will handle the mixing of multiple signals. There may be values in the
- * arrays when you receive them, left over from the previous signal in a
- * SignalChain
, but you can disregard them (or use them if
- * you're feeling crazy like that).
- *
- * @author Damien Di Fede
- * @invisible
- */
-@Deprecated
-public interface AudioSignal
-{
- /**
- * Fills signal
with values in the range of [-1, 1].
- * signal
represents a mono audio signal.
- *
- * @param signal
- * the float array to fill
- */
- void generate(float[] signal);
-
- /**
- * Fills left
and right
with values in the range
- * of [-1, 1]. left
represents the left channel of a stereo
- * signal, right
represents the right channel of that same
- * stereo signal.
- *
- * @param left
- * the left channel
- * @param right
- * the right channel
- */
- void generate(float[] left, float[] right);
-}
diff --git a/src/ddf/minim/AudioSnippet.java b/src/ddf/minim/AudioSnippet.java
deleted file mode 100644
index 5965f2c..0000000
--- a/src/ddf/minim/AudioSnippet.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioSnippet
is a simple wrapper around a JavaSound
- * Clip
(It isn't called AudioClip because that's an interface
- * defined in the package java.applet). It provides almost the exact same
- * functionality, the main difference being that length, position, and cue are
- * expressed in milliseconds instead of microseconds. You can obtain an
- * AudioSnippet
by using {@link Minim#loadSnippet(String)}. One
- * of the limitations of AudioSnippet
is that you do not have
- * access to the audio samples as they are played. However, you are spared all
- * of the overhead associated with making samples available. An
- * AudioSnippet
is a good choice if all you need to do is play a
- * short sound at some point. If your aim is to repeatedly trigger a sound, you
- * should use an {@link AudioSample} instead.
- *
- * @author Damien Di Fede
- */
-
-/** @deprecated */
-public class AudioSnippet extends Controller implements Playable
-{
- private AudioRecording recording;
-
- public AudioSnippet(AudioRecording rec)
- {
- super(rec.getControls());
- rec.open();
- recording = rec;
- }
-
- public void play()
- {
- recording.play();
- }
-
- public void play(int millis)
- {
- cue(millis);
- play();
- }
-
- public void pause()
- {
- recording.pause();
- }
-
- public void rewind()
- {
- cue(0);
- }
-
- public void loop()
- {
- recording.loop(Minim.LOOP_CONTINUOUSLY);
- }
-
- public void loop(int n)
- {
- recording.loop(n);
- }
-
- public int loopCount()
- {
- return recording.getLoopCount();
- }
-
- public int length()
- {
- return recording.getMillisecondLength();
- }
-
- public int position()
- {
- return recording.getMillisecondPosition();
- }
-
- public void cue(int millis)
- {
- if (millis < 0)
- millis = 0;
- if (millis > length())
- millis = length();
- recording.setMillisecondPosition(millis);
- }
-
- public void skip(int millis)
- {
- int pos = position() + millis;
- if (pos < 0)
- pos = 0;
- else if (pos > length())
- pos = length();
- recording.setMillisecondPosition(pos);
- }
-
- public boolean isLooping()
- {
- return recording.getLoopCount() != 0;
- }
-
- public boolean isPlaying()
- {
- return recording.isPlaying();
- }
-
- /**
- * Closes the snippet so that any resources it is using can be released. This
- * should be called when you are finished using this snippet.
- *
- */
- public void close()
- {
- recording.close();
- }
-
- public AudioMetaData getMetaData()
- {
- return recording.getMetaData();
- }
-
- public void setLoopPoints(int start, int stop)
- {
- recording.setLoopPoints(start, stop);
- }
-}
diff --git a/src/ddf/minim/AudioSource.java b/src/ddf/minim/AudioSource.java
deleted file mode 100644
index 611a0a3..0000000
--- a/src/ddf/minim/AudioSource.java
+++ /dev/null
@@ -1,309 +0,0 @@
-package ddf.minim;
-
-import javax.sound.sampled.AudioFormat;
-
-import ddf.minim.spi.AudioOut;
-
-/**
- * An AudioSource
is a kind of wrapper around an
- * AudioStream
. An AudioSource
will add its
- * AudioBuffer
s as listeners on the stream so that you can access
- * the stream's samples without having to implement AudioListener
- * yourself. It also provides the Effectable
and
- * Recordable
interface. Because an AudioStream
must
- * be closed when you are finished with it, you must remember to call
- * {@link #close()} on any AudioSource
s you obtain from Minim, such
- * as AudioInput
s, AudioOutput
s, and
- * AudioPlayer
s.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-public class AudioSource extends Controller implements Effectable, Recordable
-{
- // the instance of Minim that created us, if one did.
- Minim parent;
-
- private AudioOut stream;
- // the signal splitter used to manage listeners to the source
- // our stereobuffer will be the first in the list
- private SignalSplitter splitter;
- // the StereoBuffer that will subscribe to synth
- private StereoBuffer buffer;
- // the effects chain used for effecting
- private EffectsChain effects;
-
- /**
- * The AudioBuffer containing the left channel samples. If this is a mono
- * sound, it contains the single channel of audio.
- *
- * @example Basics/PlayAFile
- *
- * @related AudioBuffer
- */
- public final AudioBuffer left;
-
- /**
- * The AudioBuffer containing the right channel samples. If this is a mono
- * sound, right
contains the same samples as
- * left
.
- *
- * @example Basics/PlayAFile
- *
- * @related AudioBuffer
- */
- public final AudioBuffer right;
-
- /**
- * The AudioBuffer containing the mix of the left and right channels. If this is
- * a mono sound, mix
contains the same
- * samples as left
.
- *
- * @example Basics/PlayAFile
- *
- * @related AudioBuffer
- */
- public final AudioBuffer mix;
-
- /**
- * Constructs an AudioSource
that will subscribe to the samples
- * in stream
. It is expected that the stream is using a
- * DataLine
for playback. If it is not, calls to
- * Controller
's methods will result in a
- * NullPointerException
.
- *
- * @param istream
- * the AudioStream
to subscribe to and wrap
- *
- * @invisible
- */
- public AudioSource(AudioOut istream)
- {
- super( istream.getControls() );
- stream = istream;
-
- // we gots a buffer for users to poll
- buffer = new StereoBuffer( stream.getFormat().getChannels(),
- stream.bufferSize(), this );
- left = buffer.left;
- right = buffer.right;
- mix = buffer.mix;
-
- // we gots a signal splitter that we'll add any listeners the user wants
- splitter = new SignalSplitter( stream.getFormat(), stream.bufferSize() );
- // we stick our buffer in the signal splitter because we can only set
- // one
- // listener on the stream
- splitter.addListener( buffer );
- // and there it goes.
- stream.setAudioListener( splitter );
-
- // we got an effects chain that we'll add user effects to
- effects = new EffectsChain();
- // we set it as the effect on the stream
- stream.setAudioEffect( effects );
-
- stream.open();
- }
-
- /**
- * Closes this source, making it unavailable.
- *
- * @invisible
- */
- public void close()
- {
- Minim.debug( "Closing " + this.toString() );
-
- stream.close();
-
- // if we have a parent, tell them to stop tracking us
- // so that we can get garbage collected
- if ( parent != null )
- {
- parent.removeSource( this );
- }
- }
-
- /** @deprecated */
- public void addEffect(AudioEffect effect)
- {
- effects.add( effect );
- }
-
- /** @deprecated */
- public void clearEffects()
- {
- effects.clear();
- }
-
- /** @deprecated */
- public void disableEffect(int i)
- {
- effects.disable( i );
- }
-
- /** @deprecated */
- public void disableEffect(AudioEffect effect)
- {
- effects.disable( effect );
- }
-
- /** @deprecated */
- public int effectCount()
- {
- return effects.size();
- }
-
- /** @deprecated */
- public void effects()
- {
- effects.enableAll();
- }
-
- /** @deprecated */
- public boolean hasEffect(AudioEffect e)
- {
- return effects.contains( e );
- }
-
- /** @deprecated */
- public void enableEffect(int i)
- {
- effects.enable( i );
- }
-
- /** @deprecated */
- public void enableEffect(AudioEffect effect)
- {
- effects.enable( effect );
- }
-
- /** @deprecated */
- public AudioEffect getEffect(int i)
- {
- return effects.get( i );
- }
-
- /** @deprecated */
- public boolean isEffected()
- {
- return effects.hasEnabled();
- }
-
- /** @deprecated */
- public boolean isEnabled(AudioEffect effect)
- {
- return effects.isEnabled( effect );
- }
-
- /** @deprecated */
- public void noEffects()
- {
- effects.disableAll();
- }
-
- /** @deprecated */
- public void removeEffect(AudioEffect effect)
- {
- effects.remove( effect );
- }
-
- /** @deprecated */
- public AudioEffect removeEffect(int i)
- {
- return effects.remove( i );
- }
-
- /**
- * Add an AudioListener to this sound generating object,
- * which will have its samples method called every time
- * this object generates a new buffer of samples.
- *
- * @shortdesc Add an AudioListener to this sound generating object.
- *
- * @example Advanced/AddAndRemoveAudioListener
- *
- * @param listener
- * the AudioListener that will listen to this
- *
- * @related AudioListener
- */
- public void addListener( AudioListener listener )
- {
- splitter.addListener( listener );
- }
-
- /**
- * The internal buffer size of this sound object.
- * The left, right, and mix AudioBuffers of this object
- * will be this large, and sample buffers passed to
- * AudioListeners added to this object will be this large.
- *
- * @shortdesc The internal buffer size of this sound object.
- *
- * @example Basics/PlayAFile
- *
- * @return int: the internal buffer size of this sound object, in sample frames.
- */
- public int bufferSize()
- {
- return stream.bufferSize();
- }
-
- /**
- * Returns an AudioFormat object that describes the audio properties
- * of this sound generating object. This is often useful information
- * when doing sound analysis or some synthesis, but typically you
- * will not need to know about the specific format.
- *
- * @shortdesc Returns AudioFormat object that describes the audio properties
- * of this sound generating object.
- *
- * @example Advanced/GetAudioFormat
- *
- * @return an AudioFormat describing this sound object.
- */
- public AudioFormat getFormat()
- {
- return stream.getFormat();
- }
-
- /**
- * Removes an AudioListener that was previously
- * added to this sound object.
- *
- * @example Advanced/AddAndRemoveAudioListener
- *
- * @param listener
- * the AudioListener that should stop listening to this
- *
- * @related AudioListener
- */
- public void removeListener( AudioListener listener )
- {
- splitter.removeListener( listener );
- }
-
- /**
- * The type is an int describing the number of channels
- * this sound object has.
- *
- * @return Minim.MONO if this is mono, Minim.STEREO if this is stereo
- */
- public int type()
- {
- return stream.getFormat().getChannels();
- }
-
- /**
- * Returns the sample rate of this sound object.
- *
- * @return the sample rate of this sound object.
- */
- public float sampleRate()
- {
- return stream.getFormat().getSampleRate();
- }
-}
diff --git a/src/ddf/minim/BasicAudioOut.java b/src/ddf/minim/BasicAudioOut.java
deleted file mode 100644
index 9437acd..0000000
--- a/src/ddf/minim/BasicAudioOut.java
+++ /dev/null
@@ -1,117 +0,0 @@
-package ddf.minim;
-
-import javax.sound.sampled.AudioFormat;
-import javax.sound.sampled.Control;
-
-import ddf.minim.spi.AudioOut;
-import ddf.minim.spi.AudioStream;
-
-// ddf (9/5/15): very very basic audio out implementation
-// : that is used when creating an AudioInput
-// : in the event that getLineOut does not return
-// : a usable audio out.
-class BasicAudioOut extends Thread
-implements AudioOut
-{
- private AudioFormat format;
- private MultiChannelBuffer buffer;
- private AudioListener listener;
- private AudioStream stream;
- private boolean running;
-
- public BasicAudioOut(AudioFormat format, int bufferSize)
- {
- this.format = format;
- buffer = new MultiChannelBuffer(bufferSize, format.getChannels());
- }
-
- public void run()
- {
- running = true;
- while (running)
- {
- // this should block until we get a full buffer
- int samplesRead = stream.read(buffer);
-
- // but with JavaSound, at least, it might return without
- // a full buffer if the TargetDataLine the stream is reading from
- // is closed during a read, so in that case we simply
- // fill the rest of the buffer with silence
- if ( samplesRead != buffer.getBufferSize() )
- {
- for(int i = samplesRead; i < buffer.getBufferSize(); ++i)
- {
- for(int c = 0; c < buffer.getChannelCount(); ++c)
- {
- buffer.setSample( c, i, 0 );
- buffer.setSample( c, i, 0 );
- }
- }
- }
-
- if (buffer.getChannelCount()==1)
- {
- listener.samples(buffer.getChannel(0));
- }
- else
- {
- listener.samples(buffer.getChannel(0), buffer.getChannel(1));
- }
-
- try
- {
- Thread.sleep(1);
- }
- catch (InterruptedException e)
- {
- }
- }
- }
-
- public void open()
- {
- start();
- }
-
- public void close()
- {
- running = false;
- }
-
- public Control[] getControls()
- {
- return new Control[0];
- }
-
- public AudioFormat getFormat()
- {
- return format;
- }
-
- public int bufferSize()
- {
- return buffer.getBufferSize();
- }
-
-
- public void setAudioSignal(AudioSignal signal)
- {
- //Minim.error( "BasicAudioOut does not support setting an AudioSignal." );
- }
-
- public void setAudioStream(AudioStream stream)
- {
- this.stream = stream;
- }
-
- public void setAudioEffect(AudioEffect effect)
- {
- //Minim.error( "BasicAudiOut does not support setting an AudioEffect." );
- }
-
- public void setAudioListener(AudioListener listen)
- {
- this.listener = listen;
- }
-
-}
diff --git a/src/ddf/minim/Controller.java b/src/ddf/minim/Controller.java
deleted file mode 100644
index 88e6a52..0000000
--- a/src/ddf/minim/Controller.java
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede Controller
is the base class of all Minim classes that deal
- * with audio I/O. It provides control over the underlying DataLine
,
- * which is a low-level JavaSound class that talks directly to the audio
- * hardware of the computer. This means that you can make changes to the audio
- * without having to manipulate the samples directly. The downside to this is
- * that when outputting sound to the system (such as with an
- * AudioOutput
), these changes will not be present in the
- * samples made available to your program.
- * FloatControl
,
- * which is a class defined by the JavaSound API. A FloatControl
- * represents a control of a line that holds a float
value. This
- * value has an associated maximum and minimum value (such as between -1 and 1
- * for pan), and also a unit type (such as dB for gain). You should refer to the
- * FloatControl
- * Javadoc for the full description of the methods available.
- * get
and
- * set
methods, which will simply do nothing if the control you
- * are trying to manipulate is not available.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-public class Controller
-{
- /** @invisible
- * The volume control type.
- */
- @Deprecated
- public static FloatControl.Type VOLUME = FloatControl.Type.VOLUME;
-
- /** @invisible
- * The gain control type.
- */
- @Deprecated
- public static FloatControl.Type GAIN = FloatControl.Type.MASTER_GAIN;
-
- /** @invisible
- * The balance control type.
- */
- @Deprecated
- public static FloatControl.Type BALANCE = FloatControl.Type.BALANCE;
-
- /** @invisible
- * The pan control type.
- */
- @Deprecated
- public static FloatControl.Type PAN = FloatControl.Type.PAN;
-
- /** @invisible
- * The sample rate control type.
- */
- @Deprecated
- public static FloatControl.Type SAMPLE_RATE = FloatControl.Type.SAMPLE_RATE;
-
- /** @invisible
- * The mute control type.
- */
- @Deprecated
- public static BooleanControl.Type MUTE = BooleanControl.Type.MUTE;
-
- private Control[] controls;
- // the starting value for shifting
- private ValueShifter vshifter, gshifter, bshifter, pshifter;
- private boolean vshift, gshift, bshift, pshift;
-
- /**
- * Constructs a Controller
for the given Line
.
- *
- * @param cntrls
- * an array of Controls that this Controller will manipulate
- *
- * @invisible
- */
- public Controller(Control[] cntrls)
- {
- controls = cntrls;
- vshift = gshift = bshift = pshift = false;
- }
-
- // for line reading/writing classes to alert the controller
- // that a new buffer has been read/written
- void update()
- {
- if ( vshift )
- {
- setVolume( vshifter.value() );
- if ( vshifter.done() ) vshift = false;
- }
-
- if ( gshift )
- {
- setGain( gshifter.value() );
- if ( gshifter.done() ) gshift = false;
- }
-
- if ( bshift )
- {
- setBalance( bshifter.value() );
- if ( bshifter.done() ) bshift = false;
- }
-
- if ( pshift )
- {
- setPan( pshifter.value() );
- if ( pshifter.done() ) pshift = false;
- }
- }
-
- // a small class to interpolate a value over time
- class ValueShifter
- {
- private float tstart, tend, vstart, vend;
-
- public ValueShifter(float vs, float ve, int t)
- {
- tstart = (int)System.currentTimeMillis();
- tend = tstart + t;
- vstart = vs;
- vend = ve;
- }
-
- public float value()
- {
- int millis = (int)System.currentTimeMillis();
- float norm = (float)(millis-tstart) / (tend-tstart);
- float range = (float)(vend-vstart);
- return vstart + range*norm;
- }
-
- public boolean done()
- {
- return (int)System.currentTimeMillis() > tend;
- }
- }
-
- /** @invisible
- *
- * Prints the available controls and their ranges to the console. Not all
- * Controllers have all of the controls available on them so this is a way to find
- * out what is available.
- *
- */
- public void printControls()
- {
- if (controls.length > 0)
- {
- System.out.println("Available controls are:");
- for (int i = 0; i < controls.length; i++)
- {
- Control.Type type = controls[i].getType();
- System.out.print(" " + type.toString());
- if (type == VOLUME || type == GAIN || type == BALANCE || type == PAN)
- {
- FloatControl fc = (FloatControl) controls[i];
- String shiftSupported = "does";
- if (fc.getUpdatePeriod() == -1)
- {
- shiftSupported = "doesn't";
- }
- System.out.println(", which has a range of " + fc.getMaximum() + " to "
- + fc.getMinimum() + " and " + shiftSupported
- + " support shifting.");
- }
- else
- {
- System.out.println("");
- }
- }
- }
- else
- {
- System.out.println("There are no controls available.");
- }
- }
-
- /** @invisible
- *
- * Returns whether or not the particular control type is supported by this Controller
- *
- * @param type
- * the Control.Type to query for
- *
- * @see #VOLUME
- * @see #GAIN
- * @see #BALANCE
- * @see #PAN
- * @see #SAMPLE_RATE
- * @see #MUTE
- *
- * @return true if the control is available
- */
- @Deprecated
- public boolean hasControl(Control.Type type)
- {
- for(int i = 0; i < controls.length; i++)
- {
- if ( controls[i].getType().equals(type) )
- {
- return true;
- }
- }
- return false;
- }
-
- /** @invisible
- *
- * Returns an array of all the available Control
s for the
- * DataLine
being controlled. You can use this if you want to
- * access the controls directly, rather than using the convenience methods
- * provided by this class.
- *
- * @return an array of all available controls
- */
- @Deprecated
- public Control[] getControls()
- {
- return controls;
- }
-
- @Deprecated
- public Control getControl(Control.Type type)
- {
- for(int i = 0; i < controls.length; i++)
- {
- if ( controls[i].getType().equals(type) )
- {
- return controls[i];
- }
- }
- return null;
- }
-
- /** @invisible
- * Gets the volume control for the Line
, if it exists. You
- * should check for the availability of a volume control by using
- * {@link #hasControl(javax.sound.sampled.Control.Type)} before calling this
- * method.
- *
- * @return the volume control
- */
- @Deprecated
- public FloatControl volume()
- {
- return (FloatControl)getControl(VOLUME);
- }
-
- /** @invisible
- * Gets the gain control for the Line
, if it exists. You
- * should check for the availability of a gain control by using
- * {@link #hasControl(javax.sound.sampled.Control.Type)} before calling this
- * method.
- *
- * @return the gain control
- */
- @Deprecated
- public FloatControl gain()
- {
- return (FloatControl) getControl(GAIN);
- }
-
- /** @invisible
- * Gets the balance control for the Line
, if it exists. You
- * should check for the availability of a balance control by using
- * {@link #hasControl(javax.sound.sampled.Control.Type)} before calling this
- * method.
- *
- * @return the balance control
- */
- @Deprecated
- public FloatControl balance()
- {
- return (FloatControl) getControl(BALANCE);
- }
-
- /** @invisible
- * Gets the pan control for the Line
, if it exists. You should
- * check for the availability of a pan control by using
- * {@link #hasControl(javax.sound.sampled.Control.Type)} before calling this
- * method.
- *
- * @return the pan control
- */
- @Deprecated
- public FloatControl pan()
- {
- return (FloatControl) getControl(PAN);
- }
-
- /**
- * Mutes the sound.
- *
- * @related unmute ( )
- * @related isMuted ( )
- */
- public void mute()
- {
- setValue(MUTE, true);
- }
-
- /**
- * Unmutes the sound.
- *
- * @related mute ( )
- * @related isMuted ( )
- */
- public void unmute()
- {
- setValue(MUTE, false);
- }
-
- /**
- * Returns true if the sound is muted.
- *
- * @return the current mute state
- *
- * @related mute ( )
- * @related unmute ( )
- */
- public boolean isMuted()
- {
- return getValue(MUTE);
- }
-
- private boolean getValue(BooleanControl.Type type)
- {
- boolean v = false;
- if (hasControl(type))
- {
- BooleanControl c = (BooleanControl) getControl(type);
- v = c.getValue();
- }
- else
- {
- Minim.error(type.toString() + " is not supported.");
- }
- return v;
- }
-
- private void setValue(BooleanControl.Type type, boolean v)
- {
- if (hasControl(type))
- {
- BooleanControl c = (BooleanControl) getControl(type);
- c.setValue(v);
- }
- else
- {
- Minim.error(type.toString() + " is not supported.");
- }
- }
-
- private float getValue(FloatControl.Type type)
- {
- float v = 0;
- if (hasControl(type))
- {
- FloatControl c = (FloatControl) getControl(type);
- v = c.getValue();
- }
- else
- {
- Minim.error(type.toString() + " is not supported.");
- }
- return v;
- }
-
- private void setValue(FloatControl.Type type, float v)
- {
- if (hasControl(type))
- {
- FloatControl c = (FloatControl) getControl(type);
- if (v > c.getMaximum())
- v = c.getMaximum();
- else if (v < c.getMinimum()) v = c.getMinimum();
- c.setValue(v);
- }
- else
- {
- Minim.error(type.toString() + " is not supported.");
- }
- }
-
- /**
- * Returns the current volume. If a volume control is not available, this
- * returns 0. Note that the volume is not the same thing as the
- * level()
of an AudioBuffer!
- *
- * @shortdesc Returns the current volume.
- *
- * @return the current volume or zero if a volume control is unavailable
- *
- * @related setVolume ( )
- * @related shiftVolume ( )
- */
- public float getVolume()
- {
- return getValue(VOLUME);
- }
-
- /**
- * Sets the volume. If a volume control is not available,
- * this does nothing.
- *
- * @shortdesc Sets the volume.
- *
- * @param value
- * float: the new value for the volume, usually in the range [0,1].
- *
- * @related getVolume ( )
- * @related shiftVolume ( )
- * @related isShiftingVolume ( )
- */
- public void setVolume(float value)
- {
- setValue(VOLUME, value);
- }
-
- /**
- * Transitions the volume from one value to another.
- *
- * @param from
- * float: the starting volume
- * @param to
- * float: the ending volume
- * @param millis
- * int: the length of the transition in milliseconds
- *
- * @related getVolume ( )
- * @related setVolume ( )
- * @related isShiftingVolume ( )
- */
- public void shiftVolume(float from, float to, int millis)
- {
- if ( hasControl(VOLUME) )
- {
- setVolume(from);
- vshifter = new ValueShifter(from, to, millis);
- vshift = true;
- }
- }
-
- /**
- * Returns true if the volume is currently shifting.
- * If no volume control is available this method returns false.
- *
- * @return true if shifting, false otherwise
- *
- * @related getVolume ( )
- * @related setVolume ( )
- * @related shiftVolume ( )
- */
- public boolean isShiftingVolume() {
- return vshift;
- }
-
- /**
- * Returns the current gain. If a gain control is not available, this returns
- * 0. Note that the gain is not the same thing as the level()
- * of an AudioBuffer! Gain describes the current volume of the sound in
- * decibels, which is a logarithmic, rather than linear, scale. A gain
- * of 0dB means the sound is not being amplified or attenuated. Negative
- * gain values will reduce the volume of the sound, and positive values
- * will increase it.
- * Effectable
object is simply one that can have
- * AudioEffect
s attached to it. As with an audio track in a
- * typical DAW, you can enable and disable the effects on an
- * Effectable
without having to remove them from the object.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-public interface Effectable
-{
- /**
- * Enables all effects currently attached to this. If you want to enable only
- * a single effect, use {@link #enableEffect(int)}.
- *
- */
- void effects();
-
- /**
- * Disables all effects currently attached to this. If you want to disable
- * only a single effect, use {@link #disableEffect(int)}.
- *
- */
- void noEffects();
-
- /**
- * Returns true if at least one effect in the chain is enabled.
- *
- * @return true if at least one effect in the effects chain is enabled
- */
- boolean isEffected();
-
- /**
- * Returns true if effect
is in the chain and is also enabled.
- *
- * @param effect
- * the AudioEffect
to check the status of
- * @return true if effect
is in the chain and is enabled
- */
- boolean isEnabled(AudioEffect effect);
-
- /**
- * Adds an effect to the effects chain.
- *
- * @param effect
- * the AudioEffect to add
- */
- void addEffect(AudioEffect effect);
-
- /**
- * Returns the ith
effect in the effect chain.
- * This method is not required to do bounds checking and may throw an
- * ArrayOutOfBoundsException if i
is larger than
- * {@link #effectCount()}.
- *
- * @param i
- * which effect to return
- *
- * @return the requested effect
- */
- AudioEffect getEffect(int i);
-
- /**
- * Returns the number of effects in the chain.
- *
- * @return the number of effects in the chain
- */
- int effectCount();
-
- /**
- * Returns true if effect
is in the chain.
- *
- * @param effect the effec to check for
- * @return true if effect
is attached to this
- */
- boolean hasEffect(AudioEffect effect);
-
- /**
- * Enables the i
th effect in the effect chain.
- *
- * @param i
- * the index of the effect to enable
- */
- void enableEffect(int i);
-
- /**
- * Enables effect
if it is in the chain.
- *
- * @param effect
- * the AudioEffect
to enable
- */
- void enableEffect(AudioEffect effect);
-
- /**
- * disables the i
th effect in the effect chain.
- *
- * @param i
- * the index of the effect to disable
- */
- void disableEffect(int i);
-
- /**
- * Disables effect
if it is in the chain.
- *
- * @param effect
- * the AudioEffect
to disable
- */
- void disableEffect(AudioEffect effect);
-
- /**
- * Removes effect
from the effects chain.
- *
- * @param effect
- * the AudioEffect to remove
- */
- void removeEffect(AudioEffect effect);
-
- /**
- * Removes and returns the ith
effect in the
- * effect chain.
- *
- * @param i
- * which effect to remove
- * @return the removed AudioEffect
- */
- AudioEffect removeEffect(int i);
-
- /**
- * Removes all effects from the effect chain.
- *
- */
- void clearEffects();
-}
diff --git a/src/ddf/minim/EffectsChain.java b/src/ddf/minim/EffectsChain.java
deleted file mode 100644
index 17abaa0..0000000
--- a/src/ddf/minim/EffectsChain.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede EffectsChain
is a list of {@link AudioEffect AudioEffects} that
- * gives you the ability to enable and disable effects, as you would in a typical
- * DAW. When you add an effect, it is added to the end of the chain and is enabled.
- * When you remove an effect, effects further down the chain are moved up a slot.
- * EffectsChain
is itself an AudioEffect
, so you can
- * easily create groups of effects that can be enabled/disabled together by
- * putting them in an EffectsChain
and then adding that chain to
- * an Effectable
as a single effect. EffectsChain
is
- * fully synchronized
so that it is not possible to add and remove
- * effects while processing is taking place.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-@Deprecated
-public class EffectsChain implements AudioEffect
-{
- // the effects in the order they were added
- private VectorEffectsChain
.
- *
- */
- public EffectsChain()
- {
- effects = new Vectore
to the end of the chain.
- *
- * @param e the AudioEffect
to add
- */
- public synchronized void add(AudioEffect e)
- {
- effects.add(e);
- enabled.add(e);
- }
-
- /**
- * Removes e
from the chain.
- *
- * @param e the AudioEffect
to remove
- */
- public synchronized void remove(AudioEffect e)
- {
- effects.remove(e);
- enabled.remove(e);
- }
-
- /**
- * Removes and returns the i
th effect from the chain.
- *
- * @param i the index of the AudioEffect
to remove
- * @return the AudioEffect
that was removed
- */
- public synchronized AudioEffect remove(int i)
- {
- AudioEffect e = effects.remove(i);
- enabled.remove(e);
- return e;
- }
-
- /**
- * Gets the ith
effect in the chain.
- *
- * @param i the index of the AudioEffect
to get
- *
- * @return the ith
effect in the chain.
- */
- public synchronized AudioEffect get(int i)
- {
- return effects.get(i);
- }
-
- /**
- * Returns true if e
is in this chain
- *
- * @param e the AudioEffect
to check for
- * @return true if e
is in this chain
- */
- public synchronized boolean contains(AudioEffect e)
- {
- return effects.contains(e);
- }
-
- /**
- * Enables the i
th effect in the chain.
- *
- * @param i the index of the effect to enable
- */
- public synchronized void enable(int i)
- {
- enabled.add(get(i));
- }
-
- /**
- * Enables e
if it is in the chain.
- *
- * @param e the AudioEffect
to enable
- */
- public synchronized void enable(AudioEffect e)
- {
- if ( effects.contains(e) )
- {
- enabled.add(e);
- }
- }
-
- /**
- * Enables all effects in the chain.
- *
- */
- public synchronized void enableAll()
- {
- enabled.addAll(effects);
- }
-
- /**
- * Returns true if at least one effect in the chain is enabled.
- *
- * @return true if at least one effect in the chain is enabled
- */
- public synchronized boolean hasEnabled()
- {
- return enabled.size() > 0;
- }
-
- /**
- * Returns true if e
is in the chain and is enabled.
- *
- * @param e the AudioEffect
to return the status of
- * @return true if e
is enabled and in the chain
- */
- public synchronized boolean isEnabled(AudioEffect e)
- {
- return enabled.contains(e);
- }
-
- /**
- * Disables the i
th effect in the chain.
- *
- * @param i the index of the effect to disable
- */
- public synchronized void disable(int i)
- {
- enabled.remove(get(i));
- }
-
- /**
- * Disables e
if it is in the chain.
- *
- * @param e the AudioEffect
to disable
- */
- public synchronized void disable(AudioEffect e)
- {
- enabled.remove(e);
- }
-
- /**
- * Disables all effects in the chain.
- *
- */
- public synchronized void disableAll()
- {
- enabled.clear();
- }
-
- /**
- * Returns the number of effects in the chain.
- *
- * @return the number of effects in the chain
- */
- public synchronized int size()
- {
- return effects.size();
- }
-
- /**
- * Removes all effects from the effect chain.
- *
- */
- public synchronized void clear()
- {
- effects.clear();
- enabled.clear();
- }
-
- /**
- * Sends samp
to each effect in the chain, in order.
- *
- * @param samp the samples to process
- */
- public synchronized void process(float[] samp)
- {
- for (int i = 0; i < effects.size(); i++)
- {
- AudioEffect e = effects.get(i);
- if ( enabled.contains(e) )
- {
- e.process(samp);
- }
- }
- }
-
- /**
- * Sends sampL
and sampR
to each effect
- * in the chain, in order. The two float arrays should correspond to
- * the left and right channels of a stereo signal.
- *
- * @param sampL the left channel of the signal to process
- * @param sampR the right channel of the signal to process
- */
- public synchronized void process(float[] sampL, float[] sampR)
- {
- for (int i = 0; i < effects.size(); i++)
- {
- AudioEffect e = effects.get(i);
- if ( enabled.contains(e) )
- {
- e.process(sampL, sampR);
- }
- }
- }
-}
diff --git a/src/ddf/minim/MAudioBuffer.java b/src/ddf/minim/MAudioBuffer.java
deleted file mode 100644
index 96c5970..0000000
--- a/src/ddf/minim/MAudioBuffer.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede MAudioBuffer
encapsulates a sample buffer of floats. All Minim
- * classes that give you access to audio samples do so with an
- * MAudioBuffer
. The underlying array is not immutable and this
- * class has a number of methods for reading and writing to that array. It is
- * even possible to be given a direct handle on the array to process it as you
- * wish.
- *
- * @author Damien Di Fede
- *
- */
-
-final class MAudioBuffer implements AudioBuffer
-{
- private float[] samples;
-
- /**
- * Constructs and MAudioBuffer that is bufferSize
samples long.
- *
- * @param bufferSize
- * the size of the buffer
- */
- MAudioBuffer(int bufferSize)
- {
- samples = new float[bufferSize];
- }
-
- public synchronized int size()
- {
- return samples.length;
- }
-
- public synchronized float get(int i)
- {
- return samples[i];
- }
-
- public synchronized float get(float i)
- {
- int lowSamp = (int)i;
- int hiSamp = lowSamp + 1;
- if ( hiSamp == samples.length )
- {
- return samples[lowSamp];
- }
- float lerp = i - lowSamp;
- return samples[lowSamp] + lerp*(samples[hiSamp] - samples[lowSamp]);
- }
-
- public synchronized void set(float[] buffer)
- {
- if (buffer.length != samples.length)
- Minim
- .error("MAudioBuffer.set: passed array (" + buffer.length + ") " +
- "must be the same length (" + samples.length + ") as this MAudioBuffer.");
- else
- samples = buffer;
- }
-
- /**
- * Mixes the two float arrays and puts the result in this buffer. The
- * passed arrays must be the same length as this buffer. If they are not, an
- * error will be reported and nothing will be done. The mixing function is:
- * samples[i] = (b1[i] + b2[i]) / 2
- *
- * @param b1
- * the first buffer
- * @param b2
- * the second buffer
- */
- public synchronized void mix(float[] b1, float[] b2)
- {
- if ((b1.length != b2.length)
- || (b1.length != samples.length || b2.length != samples.length))
- {
- Minim.error("MAudioBuffer.mix: The two passed buffers must be the same size as this MAudioBuffer.");
- }
- else
- {
- for (int i = 0; i < samples.length; i++)
- {
- samples[i] = (b1[i] + b2[i]) / 2;
- }
- }
- }
-
- /**
- * Sets all of the values in this buffer to zero.
- */
- public synchronized void clear()
- {
- samples = new float[samples.length];
- }
-
- public synchronized float level()
- {
- float level = 0;
- for (int i = 0; i < samples.length; i++)
- {
- level += (samples[i] * samples[i]);
- }
- level /= samples.length;
- level = (float) Math.sqrt(level);
- return level;
- }
-
- public synchronized float[] toArray()
- {
- float[] ret = new float[samples.length];
- System.arraycopy(samples, 0, ret, 0, samples.length);
- return ret;
- }
-}
diff --git a/src/ddf/minim/Minim.java b/src/ddf/minim/Minim.java
deleted file mode 100644
index be1244c..0000000
--- a/src/ddf/minim/Minim.java
+++ /dev/null
@@ -1,1033 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede Minim
class is the starting point for most everything
- * you will do with this library. There are methods for obtaining objects for playing audio files:
- * AudioSample and AudioPlayer. There are methods for obtaining an AudioRecorder,
- * which is how you record audio to disk. There are methods for obtaining an AudioInput,
- * which is how you can monitor the computer's line-in or microphone, depending on what the
- * user has set as the record source. Finally there are methods for obtaining an AudioOutput,
- * which is how you can play audio generated by your program, typically by connecting classes
- * found in the ugens package.
- *
- * String sketchPath( String fileName )
- * InputStream createInput( String fileName )
- *
- * sketchPath
method is
- * expected to transform a filename into an absolute path and is used when
- * attempting to create an AudioRecorder. The createInput
method
- * is used when loading files and is expected to take a filename, which is
- * not necessarily an absolute path, and return an InputStream
- * that can be used to read the file. For example, in Processing, the createInput
- * method will search in the data folder, the sketch folder, handle URLs, and absolute paths.
- * If you are using Minim outside of Processing, you can handle whatever cases are
- * appropriate for your project.
- *
- * String sketchPath( String fileName )
- * InputStream createInput( String fileName )
- *
- * sketchPath
method is
- * expected to transform a filename into an absolute path and is used when
- * attempting to create an AudioRecorder. The createInput
method
- * is used when loading files and is expected to take a filename, which is
- * not necessarily an absolute path, and return an InputStream
- * that can be used to read the file. For example, in Processing, the createInput
- * method will search in the data folder, the sketch folder, handle URLs, and absolute paths.
- * If you are using Minim outside of Processing, you can handle whatever cases are
- * appropriate for your project.
- * AudioSnippet
of the requested file or URL
- */
- @Deprecated
- public AudioSnippet loadSnippet(String filename)
- {
- AudioRecording c = mimp.getAudioRecording( filename );
- if ( c != null )
- {
- return new AudioSnippet( c );
- }
- else
- {
- Minim.error( "Couldn't load the file " + filename );
- }
- return null;
- }
-
- /**
- * Loads the requested file into an AudioPlayer.
- * The default buffer size is 1024 samples and the
- * buffer size determines the size of the left, right,
- * and mix AudioBuffer fields on the returned AudioPlayer.
- *
- * @shortdesc Loads the requested file into an AudioPlayer.
- *
- * @example Basics/PlayAFile
- *
- * @param filename
- * the file or URL you want to load
- * @return an AudioPlayer
that plays the file
- *
- * @related AudioPlayer
- *
- * @see #loadFile(String, int)
- */
- public AudioPlayer loadFile(String filename)
- {
- return loadFile( filename, 1024 );
- }
-
- /**
- * Loads the requested file into an {@link AudioPlayer} with the request
- * buffer size.
- *
- * @param filename
- * the file or URL you want to load
- * @param bufferSize
- * int: the sample buffer size you want, which determines the
- * size of the left, right, and mix AudioBuffer fields of the
- * returned AudioPlayer.
- *
- * @return an AudioPlayer
with a sample buffer of the requested
- * size, or null if we were unable to load the file
- */
- public AudioPlayer loadFile(String filename, int bufferSize)
- {
- AudioPlayer player = null;
- AudioRecordingStream rec = mimp.getAudioRecordingStream( filename, bufferSize, false );
- if ( rec != null )
- {
- AudioFormat format = rec.getFormat();
- AudioOut out = mimp.getAudioOutput( format.getChannels(),
- bufferSize,
- format.getSampleRate(),
- format.getSampleSizeInBits() );
-
- if ( out != null )
- {
- player = new AudioPlayer( rec, out );
- }
- else
- {
- rec.close();
- }
- }
-
- if ( player != null )
- {
- addSource( player );
- }
- else
- {
- error( "Couldn't load the file " + filename );
- }
-
- return player;
- }
-
- /**
- * Loads the file into an AudioRecordingStream, which allows you to stream
- * audio data from the file yourself. Note that doing this will not
- * result in any sound coming out of your speakers, unless of course you
- * send it there. You would primarily use this to perform offline-analysis
- * of a file or for very custom sound streaming schemes.
- *
- * @shortdesc Loads the file into an AudioRecordingStream.
- *
- * @example Analysis/offlineAnalysis
- *
- * @param filename
- * the file to load
- * @param bufferSize
- * int: the bufferSize to use, which controls how much
- * of the streamed file is stored in memory at a time.
- * @param inMemory
- * boolean: whether or not the file should be cached in memory as it is read
- *
- * @return an AudioRecordingStream that you can use to read from the file.
- *
- *
- */
- public AudioRecordingStream loadFileStream(String filename, int bufferSize, boolean inMemory)
- {
- AudioRecordingStream stream = mimp.getAudioRecordingStream( filename, bufferSize, inMemory );
- streams.add( stream );
- return stream;
- }
-
- /**
- * Load the file into an AudioRecordingStream with a buffer size of 1024
- * samples.
- *
- * @param filename
- * the file to load
- * @return an AudioRecordingStream that you can use to read from the file
- */
- public AudioRecordingStream loadFileStream(String filename)
- {
- return loadFileStream(filename,1024,false);
- }
-
- /**
- * Load the metadata for the file without keeping a stream open.
- * Use this to get access to ID3 tags or similar.
- *
- * @example Basics/GetMetaData
- *
- * @param filename
- * String: the name of the file to load
- * @return
- * AudioMetaData: the metadata for the file
- */
- public AudioMetaData loadMetaData(String filename)
- {
- AudioRecordingStream stream = mimp.getAudioRecordingStream( filename, 0, false );
- AudioMetaData data = stream.getMetaData();
- stream.close();
- return data;
- }
-
- /**
- * Loads the requested file into a MultiChannelBuffer. The buffer's channel count
- * and buffer size will be adjusted to match the file.
- *
- * @shortdesc Loads the requested file into a MultiChannelBuffer.
- *
- * @example Advanced/loadFileIntoBuffer
- *
- * @param filename
- * the file to load
- * @param outBuffer
- * the MultiChannelBuffer to fill with the file's audio samples
- *
- * @return the sample rate of audio samples in outBuffer, or 0 if the load failed.
- *
- * @related MultiChannelBuffer
- */
- public float loadFileIntoBuffer( String filename, MultiChannelBuffer outBuffer )
- {
- final int readBufferSize = 4096;
- float sampleRate = 0;
- AudioRecordingStream stream = mimp.getAudioRecordingStream( filename, readBufferSize, false );
- if ( stream != null )
- {
- //stream.open();
- stream.play();
- sampleRate = stream.getFormat().getSampleRate();
- final int channelCount = stream.getFormat().getChannels();
- // for reading the file in, in chunks.
- MultiChannelBuffer readBuffer = new MultiChannelBuffer( channelCount, readBufferSize );
- // make sure the out buffer is the correct size and type.
- outBuffer.setChannelCount( channelCount );
- // how many samples to read total
- long totalSampleCount = stream.getSampleFrameLength();
- if ( totalSampleCount == -1 )
- {
- totalSampleCount = AudioUtils.millis2Frames( stream.getMillisecondLength(), stream.getFormat() );
- }
- debug( "Total sample count for " + filename + " is " + totalSampleCount );
- outBuffer.setBufferSize( (int)totalSampleCount );
-
- // now read in chunks.
- long totalSamplesRead = 0;
- while( totalSamplesRead < totalSampleCount )
- {
- // is the remainder smaller than our buffer?
- if ( totalSampleCount - totalSamplesRead < readBufferSize )
- {
- readBuffer.setBufferSize( (int)(totalSampleCount - totalSamplesRead) );
- }
-
- int samplesRead = stream.read( readBuffer );
-
- if ( samplesRead == 0 )
- {
- debug( "loadSampleIntoBuffer: got 0 samples read" );
- break;
- }
-
- // copy data from one buffer to the other.
- for(int i = 0; i < channelCount; ++i)
- {
- // a faster way to do this would be nice.
- for(int s = 0; s < samplesRead; ++s)
- {
- outBuffer.setSample( i, (int)totalSamplesRead+s, readBuffer.getSample( i, s ) );
- }
- }
-
- totalSamplesRead += samplesRead;
- }
-
- if ( totalSamplesRead != totalSampleCount )
- {
- outBuffer.setBufferSize( (int)totalSamplesRead );
- }
-
- debug("loadSampleIntoBuffer: final output buffer size is " + outBuffer.getBufferSize() );
-
- stream.close();
- }
- else
- {
- debug("Unable to load an AudioRecordingStream for " + filename);
- }
-
- return sampleRate;
- }
-
- /**
- * Creates an AudioRecorder that will use the provided Recordable object as its
- * record source and that will save to the file name specified. Recordable
- * classes in Minim include AudioOutput, AudioInput, AudioPlayer, AudioSample, and SignalSplitter
- * The format of the file will be inferred from the extension in the file name.
- * If the extension is not a recognized file type, this will return null.
- *
- * @shortdesc Creates an AudioRecorder.
- *
- * @example Basics/RecordAudioOutput
- *
- * @param source
- * the Recordable
object you want to use as a record source
- * @param fileName
- * the name of the file to record to
- *
- * @return an AudioRecorder
for the record source
- *
- * @related AudioRecorder
- */
-
- public AudioRecorder createRecorder( Recordable source, String fileName )
- {
- return createRecorder( source, fileName, false );
- }
-
- /**
- * Creates an AudioRecorder that will use the provided Recordable object as its
- * record source and that will save to the file name specified. Recordable
- * classes in Minim include AudioOutput, AudioInput, AudioPlayer, AudioSample, and SignalSplitter
- * The format of the file will be inferred from the extension in the file name.
- * If the extension is not a recognized file type, this will return null. Be aware
- * that if you choose buffered recording the call to AudioRecorder's save method
- * will block until the entire buffer is written to disk.
- * In the event that the buffer is very large, your app will noticeably hang.
- *
- * @shortdesc Creates an AudioRecorder.
- *
- * @example Basics/RecordAudioOutput
- *
- * @param source
- * the Recordable
object you want to use as a record source
- * @param fileName
- * the name of the file to record to
- * @param buffered
- * boolean: whether or not to use buffered recording
- *
- * @return an AudioRecorder
for the record source
- *
- * @related AudioRecorder
- * @invisible
- */
- public AudioRecorder createRecorder(Recordable source, String fileName, boolean buffered)
- {
- SampleRecorder rec = mimp.getSampleRecorder( source, fileName, buffered );
- if ( rec != null )
- {
- return new AudioRecorder( source, rec );
- }
- else
- {
- error( "Couldn't create an AudioRecorder for " + fileName + "." );
- }
- return null;
- }
-
- /**
- * An AudioInput is used when you want to monitor the active audio input
- * of the computer. On a laptop, for instance, this will typically be
- * the built-in microphone. On a desktop it might be the line-in
- * port on the soundcard. The default values are for a stereo input
- * with a 1024 sample buffer (ie the size of left, right, and mix
- * buffers), sample rate of 44100 and bit depth of 16. Generally
- * speaking, you will not want to specify these things, but it's
- * there if you need it.
- *
- * @shortdesc get an AudioInput that reads from the active audio input of the soundcard
- *
- * @return an AudioInput that reads from the active audio input of the soundcard
- *
- * @see #getLineIn(int, int, float, int)
- * @related AudioInput
- * @example Basics/MonitorInput
- */
- public AudioInput getLineIn()
- {
- return getLineIn( STEREO );
- }
-
- /**
- * Gets either a MONO or STEREO {@link AudioInput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @return an AudioInput
with the requested type, a 1024 sample
- * buffer, a sample rate of 44100 and a bit depth of 16
- * @see #getLineIn(int, int, float, int)
- */
- public AudioInput getLineIn(int type)
- {
- return getLineIn( type, 1024, 44100, 16 );
- }
-
- /**
- * Gets an {@link AudioInput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioInput
's sample buffer
- * to be (ie the size of left, right, and mix buffers)
- * @return an AudioInput
with the requested attributes, a
- * sample rate of 44100 and a bit depth of 16
- * @see #getLineIn(int, int, float, int)
- */
- public AudioInput getLineIn(int type, int bufferSize)
- {
- return getLineIn( type, bufferSize, 44100, 16 );
- }
-
- /**
- * Gets an {@link AudioInput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioInput
's sample buffer
- * to be (ie the size of left, right, and mix buffers)
- * @param sampleRate
- * float: the desired sample rate in Hertz (typically 44100)
- * @return an AudioInput
with the requested attributes and a
- * bit depth of 16
- * @see #getLineIn(int, int, float, int)
- */
- public AudioInput getLineIn(int type, int bufferSize, float sampleRate)
- {
- return getLineIn( type, bufferSize, sampleRate, 16 );
- }
-
- /**
- * Gets an {@link AudioInput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioInput
's sample buffer
- * to be (ie the size of left, right, and mix buffers)
- * @param sampleRate
- * float: the desired sample rate in Hertz (typically 44100)
- * @param bitDepth
- * int: the desired bit depth (typically 16)
- * @return an AudioInput
with the requested attributes
- */
- public AudioInput getLineIn(int type, int bufferSize, float sampleRate, int bitDepth)
- {
- AudioInput input = null;
- AudioStream stream = mimp.getAudioInput( type, bufferSize, sampleRate, bitDepth );
- if ( stream != null )
- {
- AudioOut out = mimp.getAudioOutput( type, bufferSize, sampleRate, bitDepth );
- // couldn't get an output, the system might not have one available
- // so in that case we provide a basic audio out to the input
- // that will pull samples from it and so forth
- if ( out == null )
- {
- out = new BasicAudioOut(stream.getFormat(), bufferSize);
- }
-
- input = new AudioInput( stream, out );
- }
-
- if ( input != null )
- {
- addSource( input );
- }
- else
- {
- error( "Minim.getLineIn: attempt failed, could not secure an AudioInput." );
- }
-
- return input;
- }
-
- /**
- * Get the input as an AudioStream that you can read from yourself, rather
- * than wrapped in an AudioInput that does that work for you.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioStream's interal
- * buffer to be.
- * @param sampleRate
- * float: the desired sample rate in Hertz (typically 44100)
- * @param bitDepth
- * int: the desired bit depth (typically 16)
- * @return an AudioStream that reads from the input source of the soundcard.
- */
- public AudioStream getInputStream(int type, int bufferSize, float sampleRate, int bitDepth)
- {
- AudioStream stream = mimp.getAudioInput( type, bufferSize, sampleRate, bitDepth );
- streams.add( stream );
- return stream;
- }
-
- /**
- * An AudioOutput is used to generate sound in real-time and output it to
- * the soundcard. Usually, the sound generated by an AudioOutput will be
- * heard through the speakers or headphones attached to a computer. The
- * default parameters for an AudioOutput are STEREO sound, a 1024 sample
- * buffer (ie the size of the left, right, and mix buffers), a sample
- * rate of 44100, and a bit depth of 16. To actually generate sound
- * with an AudioOutput you need to patch at least one sound generating
- * UGen to it, such as an Oscil.
- * AudioOutput
with the requested type, a 1024
- * sample buffer, a sample rate of 44100 and a bit depth of 16
- * @see #getLineOut(int, int, float, int)
- */
- public AudioOutput getLineOut(int type)
- {
- return getLineOut( type, 1024, 44100, 16 );
- }
-
- /**
- * Gets an {@link AudioOutput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioOutput's sample buffer
- * to be (ie the size of the left, right, and mix buffers)
- * @return an AudioOutput
with the requested attributes, a
- * sample rate of 44100 and a bit depth of 16
- * @see #getLineOut(int, int, float, int)
- */
- public AudioOutput getLineOut(int type, int bufferSize)
- {
- return getLineOut( type, bufferSize, 44100, 16 );
- }
-
- /**
- * Gets an {@link AudioOutput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioOutput's sample buffer
- * to be (ie the size of the left, right, and mix buffers)
- * @param sampleRate
- * float: the desired sample rate in Hertz (typically 44100)
- * @return an AudioOutput
with the requested attributes and a
- * bit depth of 16
- * @see #getLineOut(int, int, float, int)
- */
- public AudioOutput getLineOut(int type, int bufferSize, float sampleRate)
- {
- return getLineOut( type, bufferSize, sampleRate, 16 );
- }
-
- /**
- * Gets an {@link AudioOutput}.
- *
- * @param type
- * Minim.MONO or Minim.STEREO
- * @param bufferSize
- * int: how long you want the AudioOutput's sample buffer
- * to be (ie the size of the left, right, and mix buffers)
- * @param sampleRate
- * float: the desired sample rate in Hertz (typically 44100)
- * @param bitDepth
- * int: the desired bit depth (typically 16)
- * @return an AudioOutput
with the requested attributes
- */
- public AudioOutput getLineOut(int type, int bufferSize, float sampleRate, int bitDepth)
- {
- AudioOut out = mimp.getAudioOutput( type, bufferSize, sampleRate, bitDepth );
- if ( out != null )
- {
- AudioOutput output = new AudioOutput( out );
- addSource( output );
- return output;
- }
-
- error( "Minim.getLineOut: attempt failed, could not secure a LineOut." );
- return null;
- }
-}
diff --git a/src/ddf/minim/MultiChannelBuffer.java b/src/ddf/minim/MultiChannelBuffer.java
deleted file mode 100644
index f6e799e..0000000
--- a/src/ddf/minim/MultiChannelBuffer.java
+++ /dev/null
@@ -1,252 +0,0 @@
-package ddf.minim;
-
-/**
- * MultiChannelBuffer represents a chunk of multichannel (or mono) audio data.
- * It is primarily used internally when passing buffers of audio around, but
- * you will need to create one to use things like the loadFileIntoBuffer method of Minim
- * and the setSample method of Sampler. When thinking about a buffer of audio
- * we usually consider how many sample frames long that buffer is. This
- * is not the same as the actual number of values stored in the buffer. Mono, or
- * single channel audio, contains one sample per sample frame, but stereo is
- * two, quadraphonic is four, and so forth. The buffer size of a MultiChannelBuffer
- * is how many sample frames it stores, so when retrieving and setting values
- * it is required to indicate which channel should be operated upon.
- *
- * @example Advanced/loadFileIntoBuffer
- *
- * @related Minim
- *
- * @author Damien Di Fede
- *
- */
-
-public class MultiChannelBuffer
-{
- // TODO: consider just wrapping a FloatSampleBuffer
- private float[][] channels;
- private int bufferSize;
-
- /**
- * Construct a MultiChannelBuffer, providing a size and number of channels.
- *
- * @param bufferSize
- * int: The length of the buffer in sample frames.
- * @param numChannels
- * int: The number of channels the buffer should contain.
- */
- public MultiChannelBuffer(int bufferSize, int numChannels)
- {
- channels = new float[numChannels][bufferSize];
- this.bufferSize = bufferSize;
- }
-
- /**
- * Copy the data in the provided MultiChannelBuffer to this MultiChannelBuffer.
- * Doing so will change both the buffer size and channel count of this
- * MultiChannelBuffer to be the same as the copied buffer.
- *
- * @shortdesc Copy the data in the provided MultiChannelBuffer to this MultiChannelBuffer.
- *
- * @param otherBuffer
- * the MultiChannelBuffer to copy
- */
- public void set( MultiChannelBuffer otherBuffer )
- {
- bufferSize = otherBuffer.bufferSize;
- channels = otherBuffer.channels.clone();
- }
-
- /**
- * Returns the length of this buffer in samples.
- *
- * @return the length of this buffer in samples
- */
- public int getBufferSize()
- {
- return bufferSize;
- }
-
- /**
- * Returns the number of channels in this buffer.
- *
- * @return the number of channels in this buffer
- */
- public int getChannelCount()
- {
- return channels.length;
- }
-
- /**
- * Returns the value of a sample in the given channel,
- * at the given offset from the beginning of the buffer.
- * When sampleIndex is a float, this returns an interpolated
- * sample value. For instance, getSample( 0, 30.5f ) will
- * return an interpolated sample value in channel 0 that is
- * between the value at 30 and the value at 31.
- *
- * @shortdesc Returns the value of a sample in the given channel,
- * at the given offset from the beginning of the buffer.
- *
- * @param channelNumber
- * int: the channel to get the sample value from
- * @param sampleIndex
- * int: the offset from the beginning of the buffer, in samples.
- * @return
- * float: the value of the sample
- */
- public float getSample( int channelNumber, int sampleIndex )
- {
- return channels[channelNumber][sampleIndex];
- }
-
- /**
- * Returns the interpolated value of a sample in the given channel,
- * at the given offset from the beginning of the buffer,
- * For instance, getSample( 0, 30.5f ) will
- * return an interpolated sample value in channel 0 that is
- * between the value at 30 and the value at 31.
- *
- * @param channelNumber
- * int: the channel to get the sample value from
- * @param sampleIndex
- * float: the offset from the beginning of the buffer, in samples.
- * @return
- * float: the value of the sample
- */
- public float getSample( int channelNumber, float sampleIndex )
- {
- int lowSamp = (int)sampleIndex;
- int hiSamp = lowSamp + 1;
- if ( hiSamp == bufferSize )
- {
- return channels[channelNumber][lowSamp];
- }
- float lerp = sampleIndex - lowSamp;
- return channels[channelNumber][lowSamp] + lerp*(channels[channelNumber][hiSamp] - channels[channelNumber][lowSamp]);
- }
-
- /**
- * Sets the value of a sample in the given channel at the given
- * offset from the beginning of the buffer.
- *
- * @param channelNumber
- * int: the channel of the buffer
- * @param sampleIndex
- * int: the sample offset from the beginning of the buffer
- * @param value
- * float: the sample value to set
- */
- public void setSample( int channelNumber, int sampleIndex, float value )
- {
- channels[channelNumber][sampleIndex] = value;
- }
-
- /**
- * Calculates the RMS amplitude of one of the buffer's channels.
- *
- * @example Advanced/OfflineRendering
- *
- * @param channelNumber
- * int: the channel to use
- * @return
- * float: the RMS amplitude of the channel
- */
- public float getLevel( int channelNumber )
- {
- float[] samples = channels[channelNumber];
- float level = 0;
- for (int i = 0; i < samples.length; i++)
- {
- level += (samples[i] * samples[i]);
- }
- level /= samples.length;
- level = (float) Math.sqrt(level);
- return level;
- }
-
- /**
- * Returns the requested channel as a float array.
- * You should not necessarily assume that the
- * modifying the returned array will modify
- * the values in this buffer.
- *
- * @shortdesc Returns the requested channel as a float array.
- *
- * @param channelNumber
- * int: the channel to return
- * @return
- * float[]: the channel represented as a float array
- */
- public float[] getChannel(int channelNumber)
- {
- return channels[channelNumber];
- }
-
- /**
- * Sets all of the values in a particular channel using
- * the values of the provided float array. The array
- * should be at least as long as the current buffer size
- * of this buffer and this will only copy as many samples
- * as fit into its current buffer size.
- *
- * @shortdesc Sets all of the values in a particular channel using
- * the values of the provided float array.
- *
- * @param channelNumber
- * int: the channel to set
- * @param samples
- * float[]: the array of values to copy into the channel
- */
- public void setChannel(int channelNumber, float[] samples)
- {
- System.arraycopy( samples, 0, channels[channelNumber], 0, bufferSize );
- }
-
- /**
- * Set the number of channels this buffer contains.
- * Doing this will retain any existing channels
- * under the new channel count.
- *
- * @shortdesc Set the number of channels this buffer contains.
- *
- * @param numChannels
- * int: the number of channels this buffer should contain
- */
- public void setChannelCount(int numChannels)
- {
- if ( channels.length != numChannels )
- {
- float[][] newChannels = new float[numChannels][bufferSize];
- for( int c = 0; c < channels.length && c < numChannels; ++c )
- {
- newChannels[c] = channels[c];
- }
- channels = newChannels;
- }
- }
-
- /**
- * Set the length of this buffer in sample frames.
- * Doing this will retain all of the sample data
- * that can fit into the new buffer size.
- *
- * @shortdesc Set the length of this buffer in sample frames.
- *
- * @param bufferSize
- * int: the new length of this buffer in sample frames
- */
- public void setBufferSize(int bufferSize)
- {
- if ( this.bufferSize != bufferSize )
- {
- this.bufferSize = bufferSize;
- for( int i = 0; i < channels.length; ++i )
- {
- float[] newChannel = new float[bufferSize];
- // copy existing data into the new channel array
- System.arraycopy( channels[i], 0, newChannel, 0, (bufferSize < channels[i].length ? bufferSize : channels[i].length) );
- channels[i] = newChannel;
- }
- }
- }
-}
diff --git a/src/ddf/minim/NoteManager.java b/src/ddf/minim/NoteManager.java
deleted file mode 100644
index c227989..0000000
--- a/src/ddf/minim/NoteManager.java
+++ /dev/null
@@ -1,183 +0,0 @@
-package ddf.minim;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-
-import ddf.minim.ugens.Instrument;
-
-/**
- *
- * @author ddf
- * @invisible
- */
-
-public class NoteManager
-{
- // we use this do our timing, basically
- private float sampleRate;
- private float tempo;
- private float noteOffset;
- private float durationFactor;
- private int now;
- // our events are stored in a map.
- // the keys in this map are the "now" that the events should
- // occur at and the values are a list of events that occur
- // at that time.
- private HashMapPlayable
defines functionality that you would expect from a tapedeck
- * or CD player. Implementing classes are usually playing an audio file.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-public interface Playable
-{
- /**
- * Starts playback from the current position.
- * If this was previously set to loop, looping will be disabled.
- *
- */
- void play();
-
- /**
- * Starts playback millis
from the beginning.
- * If this was previously set to loop, looping will be disabled.
- *
- * @param millis the position to start playing from
- */
- void play(int millis);
-
- /**
- * Returns true if this currently playing.
- *
- * @return true if this is currently playing
- */
- boolean isPlaying();
-
- /**
- * Sets looping to continuous. If this is already playing, the position
- * will not be reset to the beginning. If this is not playing,
- * it will start playing.
- *
- */
- void loop();
-
- /**
- * Sets this to loop num
times. If this is already playing,
- * the position will not be reset to the beginning.
- * If this is not playing, it will start playing.
- *
- * @param num
- * the number of times to loop
- */
- void loop(int num);
-
- /**
- * Returns true if this is currently playing and has more than one loop
- * left to play.
- *
- * @return true if this is looping
- */
- boolean isLooping();
-
- /**
- * Returns the number of loops left to do.
- *
- * @return the number of loops left
- */
- int loopCount();
-
- /**
- * Sets the loop points used when looping.
- *
- * @param start the start of the loop in milliseconds
- * @param stop the end of the loop in milliseconds
- */
- void setLoopPoints(int start, int stop);
-
- /**
- * Pauses playback.
- *
- */
- void pause();
-
- /**
- * Sets the position to millis
milliseconds from
- * the beginning. This will not change the playstate. If an error
- * occurs while trying to cue, the position will not change.
- * If you try to cue to a negative position or try to a position
- * that is greater than length()
, the amount will be clamped
- * to zero or length()
.
- *
- * @param millis the position to place the "playhead"
- */
- void cue(int millis);
-
- /**
- * Skips millis
from the current position. millis
- * can be negative, which will make this skip backwards. If the skip amount
- * would result in a negative position or a position that is greater than
- * length()
, the new position will be clamped to zero or
- * length()
.
- *
- * @param millis how many milliseconds to skip, sign indicates direction
- */
- void skip(int millis);
-
- /**
- * Rewinds to the beginning. This does not stop playback.
- *
- */
- void rewind();
-
- /**
- * Returns the current position of the "playhead" (ie how much of
- * the sound has already been played)
- *
- * @return the current position of the "playhead"
- */
- int position();
-
- /**
- * Returns the length of the sound in milliseconds. If for any reason the
- * length could not be determined, this will return -1. However, an unknown
- * length should not impact playback.
- *
- * @return the length of the sound in milliseconds
- */
- int length();
-
- /**
- * Returns and AudioMetaData
object that describes this audio.
- *
- * @see AudioMetaData
- *
- * @return the AudioMetaData
for this
- */
- AudioMetaData getMetaData();
-}
diff --git a/src/ddf/minim/Polyphonic.java b/src/ddf/minim/Polyphonic.java
deleted file mode 100644
index c4b6208..0000000
--- a/src/ddf/minim/Polyphonic.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede Polyphonic
describes an object that can have multiple
- * AudioSignal
s attached to it. It is implemented by
- * {@link AudioOutput}.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-
-public interface Polyphonic
-{
- /**
- * Enables all signals currently attached to this. If you want to enable only
- * a single signal, use {@link #enableSignal(int)}.
- *
- */
- void sound();
-
- /**
- * Disables all signals currently attached to this. If you want to disable
- * only a single signal, use {@link #disableSignal(int)}.
- *
- */
- void noSound();
-
- /**
- * Returns true if at least one signal in the chain is enabled.
- *
- * @return true if at least one signal in the signal chain is enabled
- */
- boolean isSounding();
-
- /**
- * Returns true if signal
is in the chain and is also enabled.
- *
- * @param signal
- * the AudioSignal
to check the status of
- * @return true if signal
is in the chain and is enabled
- */
- boolean isEnabled(AudioSignal signal);
-
- /**
- * Adds an signal to the signals chain.
- *
- * @param signal
- * the AudioSignal
to add
- */
- void addSignal(AudioSignal signal);
-
- /**
- * Returns the ith
signal in the signal chain.
- * This method is not required to do bounds checking and may throw an
- * ArrayOutOfBoundsException if i
is larger than
- * {@link #signalCount()}.
- *
- * @param i
- * which signal to return
- *
- * @return the requested signal
- */
- AudioSignal getSignal(int i);
-
- boolean hasSignal(AudioSignal signal);
-
- /**
- * Returns the number of signals in the chain.
- *
- * @return the number of signals in the chain
- */
- int signalCount();
-
- /**
- * Enables the i
th signal in the signal chain.
- *
- * @param i
- * the index of the signal to enable
- */
- void enableSignal(int i);
-
- /**
- * Enables signal
if it is in the chain.
- *
- * @param signal
- * the AudioSignal
to enable
- */
- void enableSignal(AudioSignal signal);
-
- /**
- * disables the i
th signal in the signal chain.
- *
- * @param i
- * the index of the signal to disable
- */
- void disableSignal(int i);
-
- /**
- * Disables signal
if it is in the chain.
- *
- * @param signal
- * the AudioSignal
to disable
- */
- void disableSignal(AudioSignal signal);
-
- /**
- * Removes signal
from the signals chain.
- *
- * @param signal
- * the AudioSignal to remove
- */
- void removeSignal(AudioSignal signal);
-
- /**
- * Removes and returns the ith
signal in the
- * signal chain.
- *
- * @param i
- * which signal to remove
- * @return the removed AudioSignal
- */
- AudioSignal removeSignal(int i);
-
- /**
- * Removes all signals from the signal chain.
- *
- */
- void clearSignals();
-}
diff --git a/src/ddf/minim/Recordable.java b/src/ddf/minim/Recordable.java
deleted file mode 100644
index ea4c13d..0000000
--- a/src/ddf/minim/Recordable.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede Recordable
object is one that can provide a program with
- * floating point samples of the audio passing through it. It does this using
- * AudioListener
s. You add listeners to the Recordable
and
- * then the Recordable
will call the appropriate samples
- * method of all its listeners when it has a new buffer of samples. It is also
- * possible to query a Recordable
object for its buffer size, type
- * (mono or stereo), and audio format.
- *
- * @author Damien Di Fede
- *
- */
-public interface Recordable
-{
- /**
- * Adds a listener who will be notified each time this receives
- * or creates a new buffer of samples. If the listener has already
- * been added, it will not be added again.
- *
- * @example Advanced/AddAndRemoveAudioListener
- *
- * @param listener the listener to add
- */
- void addListener(AudioListener listener);
-
- /**
- * Removes the listener from the list of listeners.
- *
- * @example Advanced/AddAndRemoveAudioListener
- *
- * @param listener the listener to remove
- */
- void removeListener(AudioListener listener);
-
- /**
- * Returns the format of this recordable audio.
- *
- * @return the format of the audio
- */
- AudioFormat getFormat();
-
- /**
- * Returns either Minim.MONO or Minim.STEREO
- *
- * @return Minim.MONO if this is mono, Minim.STEREO if this is stereo
- */
- int type();
-
- /**
- * Returns the buffer size being used by this.
- *
- * @return the buffer size
- */
- int bufferSize();
-
- /**
- * Returns the sample rate of the audio.
- *
- * @return the sample rate of the audio
- */
- float sampleRate();
-}
diff --git a/src/ddf/minim/SignalChain.java b/src/ddf/minim/SignalChain.java
deleted file mode 100644
index b812591..0000000
--- a/src/ddf/minim/SignalChain.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede SignalChain
is a list of {@link AudioSignal AudioSignals}
- * that gives you the ability to enable (unmute) and disable (mute) signals.
- * When you add a signal, it is added to the end of the chain and is enabled.
- * When you remove a signal, signals further down the chain are moved up a slot.
- * SignalChain
is itself an AudioSignal
, so you
- * can easily create groups of signals that can be enabled/disabled together by
- * putting them in an SignalChain
and then adding that chain to a
- * Polyphonic
object as a single signal. When the signal chain is
- * asked to generate a signal, it asks each of its signals to generate audio and
- * then mixes all of the signals together. SignalChain
is fully
- * synchronized
so that signals cannot be added and removed from
- * the chain during signal generation.
- *
- * @author Damien Di Fede
- * @invisible
- *
- */
-@Deprecated
-public class SignalChain implements AudioSignal
-{
- // the signals in the order they were added
- private VectorSignalChain
.
- *
- */
- public SignalChain()
- {
- signals = new Vectorsignal
to the end of the chain.
- *
- * @param signal
- * the AudioEffect
to add
- */
- public synchronized void add(AudioSignal signal)
- {
- signals.add(signal);
- enabled.add(signal);
- }
-
- /**
- * Removes signal
from the chain.
- *
- * @param signal
- * the AudioSignal
to remove
- */
- public synchronized void remove(AudioSignal signal)
- {
- //Minim.debug("Marking " + signal.toString() + " for removal.");
- signalsToRemove.add(signal);
- }
-
- /**
- * Removes and returns the i
th signal from the
- * chain.
- *
- * @param i
- * the index of the AudioSignal
to remove
- * @return the AudioSignal
that was removed
- */
- public synchronized AudioSignal remove(int i)
- {
- AudioSignal s = signals.remove(i);
- enabled.remove(s);
- return s;
- }
-
- /**
- * Gets the ith
signal in the chain.
- *
- * @param i
- * the index of the AudioSignal
to get
- *
- * @return the ith
signal in the chain.
- */
- public synchronized AudioSignal get(int i)
- {
- return signals.get(i);
- }
-
- /**
- * Returns true if s
is in the chain.
- *
- * @param s the AudioSignal
to check for
- * @return true if s
is in the chain
- */
- public synchronized boolean contains(AudioSignal s)
- {
- return signals.contains(s);
- }
-
- /**
- * Enables the i
th effect in the chain.
- *
- * @param i
- * the index of the effect to enable
- */
- public synchronized void enable(int i)
- {
- enabled.add(get(i));
- }
-
- /**
- * Enables signal
if it is in the chain.
- *
- * @param signal
- * the AudioSignal
to enable
- */
- public synchronized void enable(AudioSignal signal)
- {
- if (signals.contains(signal))
- {
- enabled.add(signal);
- }
- }
-
- /**
- * Enables all signals in the chain.
- *
- */
- public synchronized void enableAll()
- {
- enabled.addAll(signals);
- }
-
- /**
- * Returns true if at least one effect in the chain is enabled.
- *
- * @return true if at least one effect in the chain is enabled
- */
- public synchronized boolean hasEnabled()
- {
- return enabled.size() > 0;
- }
-
- /**
- * Returns true if e
is in the chain and is enabled.
- *
- * @param signal
- * the AudioSignal
to return the status of
- * @return true if signal
is enabled and in the chain
- */
- public synchronized boolean isEnabled(AudioSignal signal)
- {
- return enabled.contains(signal);
- }
-
- /**
- * Disables the i
th effect in the chain.
- *
- * @param i
- * the index of the effect to disable
- */
- public synchronized void disable(int i)
- {
- enabled.remove(get(i));
- }
-
- /**
- * Disables signal
if it is in the chain.
- *
- * @param signal
- * the AudioSignal
to disable
- */
- public synchronized void disable(AudioSignal signal)
- {
- enabled.remove(signal);
- }
-
- /**
- * Disables all signals in the chain.
- *
- */
- public synchronized void disableAll()
- {
- enabled.clear();
- }
-
- /**
- * Returns the number of signals in the chain.
- *
- * @return the number of signals in the chain
- */
- public synchronized int size()
- {
- return signals.size();
- }
-
- /**
- * Removes all signals from the effect chain.
- *
- */
- public synchronized void clear()
- {
- signals.clear();
- enabled.clear();
- }
-
- /**
- * Asks all the enabled signals in the chain to generate a new buffer of
- * samples, adds the buffers together and puts the result in
- * signal
.
- *
- */
- public synchronized void generate(float[] signal)
- {
- if ( tmpL == null )
- {
- tmpL = new float[signal.length];
- }
- for (int i = 0; i < signals.size(); i++)
- {
- AudioSignal s = signals.get(i);
- if ( enabled.contains(s) )
- {
- for(int it = 0; it < tmpL.length; it++)
- {
- tmpL[it] = 0;
- }
- s.generate(tmpL);
- for (int is = 0; is < signal.length; is++)
- {
- signal[is] += tmpL[is];
- }
- }
- }
- // now remove signals we have marked for removal
- signals.removeAll(signalsToRemove);
- signalsToRemove.removeAllElements();
- }
-
- /**
- * Asks all the enabled signals in the chain to generate a left and right
- * buffer of samples, adds the signals together and puts the result in
- * left
and right
.
- */
- public synchronized void generate(float[] left, float[] right)
- {
- if ( tmpL == null )
- {
- tmpL = new float[left.length];
- }
- if ( tmpR == null )
- {
- tmpR = new float[right.length];
- }
- for (int i = 0; i < signals.size(); i++)
- {
- AudioSignal s = signals.get(i);
- if ( enabled.contains(s) )
- {
- s.generate(tmpL, tmpR);
- for (int j = 0; j < left.length; j++)
- {
- left[j] += tmpL[j];
- right[j] += tmpR[j];
- }
- }
- }
- // now remove signals we have marked for removal
- signals.removeAll(signalsToRemove);
- signalsToRemove.removeAllElements();
- }
-}
diff --git a/src/ddf/minim/SignalSplitter.java b/src/ddf/minim/SignalSplitter.java
deleted file mode 100644
index 2553d8d..0000000
--- a/src/ddf/minim/SignalSplitter.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede SignalSplitter
acts exactly like a headphone splitter.
- * When you pass it audio with the samples
method, it echoes that
- * audio out to all of its listeners, giving each their own copy of the audio.
- * In other words, changes that the listeners make to the float arrays
- * they receive from a SignalSplitter
will not be reflected in
- * the arrays you pass to samples
. SignalSplitter
is
- * fully synchronized
so that listeners cannot be added and
- * removed while it is in the midst transmitting.
- * SignalSplitter
that will receive
- * audio in the given format and in buffers the size of
- * bufferSize
. Strictly speaking, a SignalSplitter
- * doesn't care about either of these things because it does nothing with
- * the samples it receives other than pass them on. But both things are
- * required to fulfill the Recordable
contract.
- *
- * @param format the AudioFormat
of samples that this will receive
- * @param bufferSize the size of the float arrays this will receive
- */
- public SignalSplitter(AudioFormat format, int bufferSize)
- {
- f = format;
- bs = bufferSize;
- listeners = new Vectorosc.patch( filter ).patch( adsr ).patch( output );
- *
- * You can read this code left to right. It says that the output of an Oscil
- * should be sent through a filter (perhaps a LowPass) and the output of the
- * filter should be sent through an ADSR envelope, which should then be sent to
- * an AudioOutput. It's incredibly clear what the signal path is and it can
- * be stated concisely.
- * frequency
. UGenInputs can be patched to, just like UGens, which
- * means you might have a line of code like this:
- *
- * line.patch( osc.frequency );
- *
- * This says that a Line UGen should control the value of the Oscil's frequency.
- * You may have created a Line that changes it's value from 440 to 880 over 2
- * seconds. The audible result, when you call activate()
on the Line,
- * is that the Oscil will sweep upwards in frequency and then hold there until you activate the
- * Line again. All of this control happens on a sample-by-sample basis, which
- * means (hopefully) no clicks and pops.
- *
- * For a list of all UGens included with Minim, see the UGens package doc.
- *
- * @example Basics/SynthesizeSound
- *
- * @author Damien Di Fede, Anderson Mills
- */
-public abstract class UGen
-{
- /**
- * This enum is used to specify the InputType of the UGenInput.
- * An AUDIO UGenInput will have a last values array that conforms
- * to the channel count of the UGen that owns it, whereas a CONTROL
- * UGenInput will always have only one channel.
- *
- * @author Anderson Mills
- * @nosuperclasses
- */
- // jam3: enum is automatically static so it can't be in the nested class
- public enum InputType
- {
- CONTROL, AUDIO
- };
-
- // ddf: UGen class members are before the UGenInput definition because the
- // UGenInput class
- // refers to some of these. I think it's clearer to see these before reading
- // the
- // UGenInput code.
-
- // list of UGenInputs connected to this UGen
- private ArrayList
- * ugen.anInput.getLastValues()[0] = 1.f;
- * ugen.anInput.getLastValues()[1] = 0.f;
- *
- *
- * @shortdesc Sets all values in the last values array to the provided value.
- *
- * @param value
- * float: the value to set all last values to
- */
- public void setLastValue(float value)
- {
- for ( int i = 0; i < m_lastValues.length; ++i )
- {
- m_lastValues[i] = value;
- }
- }
-
- // this will be called by the owning UGen *only* when something is
- // patched to this input.
- void tick()
- {
- if ( m_incoming != null )
- {
- m_incoming.tick( m_lastValues );
- }
- }
-
- /**
- * @return the InputType as a string (for debugging)
- */
- public String getInputTypeAsString()
- {
- String typeLabel = null;
- switch ( m_inputType )
- {
- case AUDIO:
- typeLabel = "AUDIO";
- break;
- case CONTROL:
- typeLabel = "CONTROL";
- break;
- }
- return typeLabel;
- }
-
- /**
- * Print information about this UGenInput (for debugging)
- */
- public void printInput()
- {
- Minim.debug( "UGenInput: " + " signal = " + getInputTypeAsString() + " " + ( m_incoming != null ) );
- }
- } // ends the UGenInput inner class
-
- /**
- * Constructor for a UGen.
- */
- protected UGen()
- {
- m_allInputs = new ArrayList
- * sine.patch( gain ).patch( out );
- *
- */
- // ddf: this is final because we never want people to override it.
- public final UGen patch(UGen connectToUGen)
- {
- setSampleRate( connectToUGen.m_sampleRate );
- // jam3: connecting to a UGen is the same as connecting to it's first
- // input
- connectToUGen.addInput( this );
- // TODO jam3: m_nOutputs should only increase when this chain will be
- // ticked!
- m_nOutputs += 1;
- Minim.debug( "m_nOutputs = " + m_nOutputs );
- return connectToUGen;
- }
-
- /**
- * Connect the output of this UGen to a specific UGenInput of a UGen.
- *
- * @param connectToInput
- * The UGenInput to patch to.
- * @return the UGen that owns connectToInput
- */
- public final UGen patch(UGenInput connectToInput)
- {
- setSampleRate( connectToInput.getOuterUGen().m_sampleRate );
- connectToInput.setIncomingUGen( this );
- // TODO jam3: m_nOutputs should only increase when this chain will be
- // ticked!
- m_nOutputs += 1;
- Minim.debug( "m_nOutputs = " + m_nOutputs );
-
- return connectToInput.getOuterUGen();
- }
-
- /**
- * Patch the output of this UGen to the provided AudioOuput. Doing so will
- * immediately result in this UGen and all UGens patched into it to begin
- * generating audio.
- *
- * @param audioOutput
- * The AudioOutput you want to connect this UGen to.
- */
- public final void patch(AudioOutput audioOutput)
- {
- Minim.debug( "Patching " + this + " to the output " + audioOutput + "." );
- setSampleRate( audioOutput.sampleRate() );
- setChannelCount( audioOutput.getFormat().getChannels() );
- patch( audioOutput.bus );
- }
-
- /**
- * If you want to do something other than the default behavior when your
- * UGen is patched to, you can override this method in your derived class.
- * Summer, for instance, keeps a list of all the UGens that have been
- * patched to it, so that it can tick them and sum the results when it
- * uGenerates.
- *
- * @param input the UGen to add as an input
- */
- // ddf: Protected because users of UGens should never call this directly.
- // Sub-classes can override this to control what happens when something
- // is patched to them. See the Summer class.
- protected void addInput(UGen input)
- {
- // jam3: This default behavior is that the incoming signal will be added
- // to the first input in the m_allInputs list.
- Minim.debug( "UGen addInput called." );
- // TODO change input checking to an Exception?
- if ( m_allInputs.size() > 0 )
- {
- Minim.debug( "Initializing default input on something" );
- this.m_allInputs.get( 0 ).setIncomingUGen( input );
- }
- else
- {
- System.err.println( "Trying to connect to UGen with no default input." );
- }
- }
-
- /**
- * Unpatch this UGen from an AudioOutput or other UGen.
- * This causes this UGen and all UGens patched into it to stop generating audio
- * if they are not patched to an AudioOuput somewhere else in the chain.
- *
- * @shortdesc Unpatch this UGen from an AudioOutput or other UGen.
- *
- * @param audioOutput
- * The AudioOutput this UGen should be disconnected from.
- */
- public final void unpatch( AudioOutput audioOutput )
- {
- Minim.debug( "Unpatching " + this + " from the output " + audioOutput + "." );
- unpatch( audioOutput.bus );
- }
-
- /**
- * Remove this UGen as an input of fromUGen.
- *
- * @param fromUGen
- * The UGen to unpatch from.
- *
- */
- public final void unpatch( UGen fromUGen )
- {
- fromUGen.removeInput( this );
- // TODO m_nOutputs needs to be updated as the converse of patch above.
- m_nOutputs -= 1;
- Minim.debug( "m_nOutputs = " + m_nOutputs );
- }
-
- /**
- * When a UGen is unpatched from this UGen, removeInput is called.
- * If you've written an UGen subclass that needs to know when this
- * happens or has special handling of input removal, you can override
- * this method. See the implementation of Summer for an example
- * of why you might need to do this.
- *
- * @param input
- * the UGen to remove as an input to this UGen
- */
- // This currently does nothing, but is overridden in Summer.
- protected void removeInput(UGen input)
- {
- Minim.debug( "UGen removeInput called." );
- // see if any of our ugen inputs currently have input as the incoming ugen
- // set their incoming ugen to null if that's the case
- for ( int i = 0; i < m_allInputs.size(); i++ )
- {
- if ( m_allInputs.get( i ).getIncomingUGen() == input )
- {
- this.m_allInputs.get( i ).setIncomingUGen( null );
- }
- }
- }
-
- /**
- * Generates one sample frame for this UGen.
- *
- * @param channels
- * An array that represents one sample frame. To generate a mono
- * signal, pass an array of length 1, if stereo an array of
- * length 2, and so on. How a UGen deals with multi-channel sound
- * will be implementation dependent.
- */
- public final void tick(float[] channels)
- {
- if ( m_nOutputs > 0 )
- {
- // only tick once per sampleframe when multiple outputs
- m_currentTick = ( m_currentTick + 1 ) % ( m_nOutputs );
- }
-
- if ( 0 == m_currentTick )
- {
- for ( int i = 0; i < m_allInputs.size(); ++i )
- {
- m_allInputs.get( i ).tick();
- }
-
- // and then uGenerate for this UGen
- uGenerate( channels );
-
- for( int i = 0; i < channels.length && i < m_lastValues.length; ++i )
- {
- m_lastValues[i] = channels[i];
- }
- }
- else
- {
- for( int i = 0; i < channels.length && i < m_lastValues.length; ++i )
- {
- channels[i] = m_lastValues[i];
- }
- }
- }
-
- /**
- * Implement this method when you extend UGen. It will be called when your
- * UGen needs to generate one sample frame of audio. It is expected that you
- * will assign values to the array and not simply modify the
- * existing values. In the case where you write a UGen that takes audio
- * input and modifies it, the pattern to follow is to have the first
- * UGenInput you create be your audio input and then in uGenerate you will
- * use the getLastValues
method of your audio UGenInput to
- * retrieve the audio you want to modify, which you will then modify however
- * you need to, assigning the result to the values in channels
.
- *
- * @param channels
- * an array representing one sample frame.
- */
- protected abstract void uGenerate(float[] channels);
-
- /**
- * Return the last values generated by this UGen. This will most often be
- * used by sub-classes when pulling data from their inputs.
- *
- * @return float[]: array containing the most recent sample frame this UGen generated
- */
- public final float[] getLastValues()
- {
- return m_lastValues;
- }
-
- /**
- * Returns the sample rate of this UGen.
- *
- * @return float: the current sample rate of this UGen
- */
- public final float sampleRate()
- {
- return m_sampleRate;
- }
-
- /**
- * Override this method in your derived class to receive a notification when
- * the sample rate of your UGen has changed. You might need to do this to
- * recalculate sample rate dependent values, such as the step size for an
- * oscillator.
- *
- */
- protected void sampleRateChanged()
- {
- // default implementation does nothing.
- }
-
- /**
- * Set the sample rate for this UGen.
- *
- * @param newSampleRate
- * float, the sample rate this UGen should generate at.
- */
- // ddf: changed this to public because Summer needs to be able to call it
- // on all of its UGens when it has its sample rate set by being connected
- // to an AudioOuput. Realized it's not actually a big deal for people to
- // set the sample rate of any UGen they create whenever they want. In fact,
- // could actually make total sense to want to do this with something playing
- // back a chunk of audio loaded from disk. Made this final because it should
- // never be overridden. If sub-classes need to know about sample rate
- // changes
- // the should override sampleRateChanged()
- public final void setSampleRate(float newSampleRate)
- {
- if ( m_sampleRate != newSampleRate )
- {
- m_sampleRate = newSampleRate;
- sampleRateChanged();
-
- // these are guaranteed to have an incoming UGen
- // if one doesn't it's probably a bug!
- for ( int i = 0; i < m_allInputs.size(); ++i )
- {
- UGen inputIncoming = m_allInputs.get( i ).getIncomingUGen();
- if ( inputIncoming != null )
- {
- inputIncoming.setSampleRate( newSampleRate );
- }
- }
- }
- }
-
- /**
- * Let this UGen know how many channels of audio you will be asking it for.
- * This will be called automatically when a UGen is patched to an AudioOuput
- * and propagated to all UGenInputs of type AUDIO.
- *
- * @shortdesc Let this UGen know how many channels of audio you will be asking it for.
- *
- * @param numberOfChannels
- * how many channels of audio you will be generating with this UGen
- */
- public void setChannelCount(int numberOfChannels)
- {
- for ( int i = 0; i < m_allInputs.size(); ++i )
- {
- UGenInput input = m_allInputs.get( i );
- if ( input.getInputType() == InputType.AUDIO )
- {
- input.setChannelCount( numberOfChannels );
- }
- }
-
- if ( m_lastValues.length != numberOfChannels )
- {
- m_lastValues = new float[numberOfChannels];
- channelCountChanged();
- }
- }
-
- /**
- * Returns the number of channels this UGen has been configured to generate.
- *
- * @return int: how many channels of audio this UGen will generate
- */
- public int channelCount() { return m_lastValues.length; }
-
- /**
- * This method is only called when setChannelCount results in the channel count
- * of this UGen actually changing. Override this function in
- * sub-classes of UGen if you need to reconfigure things
- * when the channel count changes.
- */
- protected void channelCountChanged() {}
-
- /**
- * Prints all inputs connected to this UGen (for debugging)
- */
- public void printInputs()
- {
- for ( int i = 0; i < m_allInputs.size(); i++ )
- {
- Minim.debug( "m_allInputs " + i + " " );
- if ( m_allInputs.get( i ) == null )
- {
- Minim.debug( "null" );
- }
- else
- {
- m_allInputs.get( i ).printInput();
- }
- }
- }
-
- protected UGenInput addControl()
- {
- return new UGenInput( InputType.CONTROL );
- }
-
- protected UGenInput addControl( float initialValue )
- {
- return new UGenInput( InputType.CONTROL, initialValue );
- }
-
- protected UGenInput addAudio()
- {
- return new UGenInput( InputType.AUDIO );
- }
-}
diff --git a/src/ddf/minim/UGenSignal.java b/src/ddf/minim/UGenSignal.java
deleted file mode 100644
index 8255b5f..0000000
--- a/src/ddf/minim/UGenSignal.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package ddf.minim;
-
-
-/** @invisible */
-@Deprecated
-public class UGenSignal implements AudioSignal
-{
- private UGen generator;
-
- UGenSignal(UGen ugen)
- {
- generator = ugen;
- }
-
- /**
- * Sets the UGen that this UGenSignal wraps.
- *
- * @param ugen the UGen that is used to generate audio
- */
- public void setUGen(UGen ugen)
- {
- generator = ugen;
- }
-
- /**
- * Returns the UGen that is being wrapped by this UGenSignal.
- *
- * @return the wrapped UGen
- */
- public UGen getUGen()
- {
- return generator;
- }
-
- /**
- * Generates a buffer of samples by ticking the wrapped UGen mono.length times.
- */
- public void generate(float[] mono)
- {
- float[] sample = new float[1];
- for(int i = 0; i < mono.length; i++)
- {
- sample[0] = 0;
- generator.tick(sample);
- mono[i] = sample[0];
- }
- }
-
- /**
- * Generates a buffer of samples by ticking the wrapped UGen left.length times.
- */
- public void generate(float[] left, float[] right)
- {
- float[] sample = new float[2];
- for(int i = 0; i < left.length; i++)
- {
- sample[0] = 0;
- sample[1] = 0;
- generator.tick(sample);
- left[i] = sample[0];
- right[i] = sample[1];
- }
- }
-}
diff --git a/src/ddf/minim/analysis/BartlettHannWindow.java b/src/ddf/minim/analysis/BartlettHannWindow.java
deleted file mode 100644
index e9918af..0000000
--- a/src/ddf/minim/analysis/BartlettHannWindow.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede timeSize
and will throw and IllegalArgumentException if this
- * is not the case.
- *
- * @author Damien Di Fede
- *
- * @see FourierTransform
- * @see FFT
- * @see The Discrete Fourier Transform
- *
- * @invisible
- *
- */
-public class DFT extends FourierTransform
-{
- /**
- * Constructs a DFT that expects audio buffers of length timeSize
that
- * have been recorded with a sample rate of sampleRate
. Will throw an
- * IllegalArgumentException if timeSize
is not even.
- *
- * @param timeSize the length of the audio buffers you plan to analyze
- * @param sampleRate the sample rate of the audio samples you plan to analyze
- */
- public DFT(int timeSize, float sampleRate)
- {
- super(timeSize, sampleRate);
- if (timeSize % 2 != 0)
- throw new IllegalArgumentException("DFT: timeSize must be even.");
- buildTrigTables();
- }
-
- protected void allocateArrays()
- {
- spectrum = new float[timeSize / 2 + 1];
- real = new float[timeSize / 2 + 1];
- imag = new float[timeSize / 2 + 1];
- }
-
- /**
- * Not currently implemented.
- */
- public void scaleBand(int i, float s)
- {
- }
-
- /**
- * Not currently implemented.
- */
- public void setBand(int i, float a)
- {
- }
-
- public void forward(float[] samples)
- {
- if (samples.length != timeSize)
- {
- Minim
- .error("DFT.forward: The length of the passed sample buffer must be equal to DFT.timeSize().");
- return;
- }
- doWindow(samples);
- int N = samples.length;
- for (int f = 0; f <= N / 2; f++)
- {
- real[f] = 0.0f;
- imag[f] = 0.0f;
- for (int t = 0; t < N; t++)
- {
- real[f] += samples[t] * cos(t * f);
- imag[f] += samples[t] * -sin(t * f);
- }
- }
- fillSpectrum();
- }
-
- public void inverse(float[] buffer)
- {
- int N = buffer.length;
- real[0] /= N;
- imag[0] = -imag[0] / (N / 2);
- real[N / 2] /= N;
- imag[N / 2] = -imag[0] / (N / 2);
- for (int i = 0; i < N / 2; i++)
- {
- real[i] /= (N / 2);
- imag[i] = -imag[i] / (N / 2);
- }
- for (int t = 0; t < N; t++)
- {
- buffer[t] = 0.0f;
- for (int f = 0; f < N / 2; f++)
- {
- buffer[t] += real[f] * cos(t * f) + imag[f] * sin(t * f);
- }
- }
- }
-
- // lookup table data and functions
-
- private float[] sinlookup;
- private float[] coslookup;
-
- private void buildTrigTables()
- {
- int N = spectrum.length * timeSize;
- sinlookup = new float[N];
- coslookup = new float[N];
- for (int i = 0; i < N; i++)
- {
- sinlookup[i] = (float) Math.sin(i * TWO_PI / timeSize);
- coslookup[i] = (float) Math.cos(i * TWO_PI / timeSize);
- }
- }
-
- private float sin(int i)
- {
- return sinlookup[i];
- }
-
- private float cos(int i)
- {
- return coslookup[i];
- }
-}
diff --git a/src/ddf/minim/analysis/FFT.java b/src/ddf/minim/analysis/FFT.java
deleted file mode 100644
index a01401c..0000000
--- a/src/ddf/minim/analysis/FFT.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede timeSize
that is not a power of two, an IllegalArgumentException will be
- * thrown.
- * N
, there will be
- * N/2
frequency bands in the spectrum.
- * timeSize
of 1024 and and a sampleRate
of 44100
- * Hz, then the spectrum will contain values for frequencies below 22010 Hz,
- * which is the Nyquist frequency (half the sample rate). If you ask for the
- * value of band number 5, this will correspond to a frequency band centered on
- * 5/1024 * 44100 = 0.0048828125 * 44100 = 215 Hz
. The width of
- * that frequency band is equal to 2/1024
, expressed as a
- * fraction of the total bandwidth of the spectrum. The total bandwith of the
- * spectrum is equal to the Nyquist frequency, which in this case is 22050, so
- * the bandwidth is equal to about 50 Hz. It is not necessary for you to
- * remember all of these relationships, though it is good to be aware of them.
- * The function getFreq()
allows you to query the spectrum with a
- * frequency in Hz and the function getBandWidth()
will return
- * the bandwidth in Hz of each frequency band in the spectrum.
- * audio
is an AudioSource and fft
is an FFT.
- *
- *
- * fft.forward(audio.left);
- * for (int i = 0; i < fft.specSize(); i++)
- * {
- * // draw the line for frequency band i, scaling it by 4 so we can see it a bit better
- * line(i, height, i, height - fft.getBand(i) * 4);
- * }
- *
- *
- * Windowing
- * window()
- * function with an appropriate WindowFunction, such as HammingWindow()
,
- * the sample buffers passed to the object for analysis will be shaped by the current
- * window before being transformed. The result of using a window is to reduce
- * the leakage in the spectrum somewhat.
- * linAverages()
allows you to specify the number of averages
- * that you want and will group frequency bands into groups of equal number. So
- * if you have a spectrum with 512 frequency bands and you ask for 64 averages,
- * each average will span 8 bands of the full spectrum.
- * logAverages()
will group frequency bands by octave and allows
- * you to specify the size of the smallest octave to use (in Hz) and also how
- * many bands to split each octave into. So you might ask for the smallest
- * octave to be 60 Hz and to split each octave into two bands. The result is
- * that the bandwidth of each average is different. One frequency is an octave
- * above another when it's frequency is twice that of the lower frequency. So,
- * 120 Hz is an octave above 60 Hz, 240 Hz is an octave above 120 Hz, and so on.
- * When octaves are split, they are split based on Hz, so if you split the
- * octave 60-120 Hz in half, you will get 60-90Hz and 90-120Hz. You can see how
- * these bandwidths increase as your octave sizes grow. For instance, the last
- * octave will always span sampleRate/4 - sampleRate/2
, which in
- * the case of audio sampled at 44100 Hz is 11025-22010 Hz. These
- * logarithmically spaced averages are usually much more useful than the full
- * spectrum or the linearly spaced averages because they map more directly to
- * how humans perceive sound.
- * calcAvg()
allows you to specify the frequency band you want an
- * average calculated for. You might ask for 60-500Hz and this function will
- * group together the bands from the full spectrum that fall into that range and
- * average their amplitudes for you.
- * noAverages()
. This will not impact your ability to use
- * calcAvg()
, it will merely prevent the object from calculating
- * an average array every time you use forward()
.
- * timeSize()
long. The set
and
- * scale
functions allow you the ability to shape the spectrum
- * already stored in the object before taking the inverse transform. You might
- * use these to filter frequencies in a spectrum or modify it in some other way.
- *
- * @example Basics/AnalyzeSound
- *
- * @see FourierTransform
- * @see The Fast Fourier Transform
- *
- * @author Damien Di Fede
- *
- */
-public class FFT extends FourierTransform
-{
- /**
- * Constructs an FFT that will accept sample buffers that are
- * timeSize
long and have been recorded with a sample rate of
- * sampleRate
. timeSize
must be a
- * power of two. This will throw an exception if it is not.
- *
- * @param timeSize
- * int: the length of the sample buffers you will be analyzing
- * @param sampleRate
- * float: the sample rate of the audio you will be analyzing
- */
- public FFT(int timeSize, float sampleRate)
- {
- super(timeSize, sampleRate);
- if ((timeSize & (timeSize - 1)) != 0)
- {
- throw new IllegalArgumentException("FFT: timeSize must be a power of two.");
- }
- buildReverseTable();
- buildTrigTables();
- }
-
- protected void allocateArrays()
- {
- spectrum = new float[timeSize / 2 + 1];
- real = new float[timeSize];
- imag = new float[timeSize];
- }
-
- public void scaleBand(int i, float s)
- {
- if (s < 0)
- {
- Minim.error("Can't scale a frequency band by a negative value.");
- return;
- }
-
- real[i] *= s;
- imag[i] *= s;
- spectrum[i] *= s;
-
- if (i != 0 && i != timeSize / 2)
- {
- real[timeSize - i] = real[i];
- imag[timeSize - i] = -imag[i];
- }
- }
-
- public void setBand(int i, float a)
- {
- if (a < 0)
- {
- Minim.error("Can't set a frequency band to a negative value.");
- return;
- }
- if (real[i] == 0 && imag[i] == 0)
- {
- real[i] = a;
- spectrum[i] = a;
- }
- else
- {
- real[i] /= spectrum[i];
- imag[i] /= spectrum[i];
- spectrum[i] = a;
- real[i] *= spectrum[i];
- imag[i] *= spectrum[i];
- }
- if (i != 0 && i != timeSize / 2)
- {
- real[timeSize - i] = real[i];
- imag[timeSize - i] = -imag[i];
- }
- }
-
- // performs an in-place fft on the data in the real and imag arrays
- // bit reversing is not necessary as the data will already be bit reversed
- private void fft()
- {
- for (int halfSize = 1; halfSize < real.length; halfSize *= 2)
- {
- // float k = -(float)Math.PI/halfSize;
- // phase shift step
- // float phaseShiftStepR = (float)Math.cos(k);
- // float phaseShiftStepI = (float)Math.sin(k);
- // using lookup table
- float phaseShiftStepR = cos(halfSize);
- float phaseShiftStepI = sin(halfSize);
- // current phase shift
- float currentPhaseShiftR = 1.0f;
- float currentPhaseShiftI = 0.0f;
- for (int fftStep = 0; fftStep < halfSize; fftStep++)
- {
- for (int i = fftStep; i < real.length; i += 2 * halfSize)
- {
- int off = i + halfSize;
- float tr = (currentPhaseShiftR * real[off]) - (currentPhaseShiftI * imag[off]);
- float ti = (currentPhaseShiftR * imag[off]) + (currentPhaseShiftI * real[off]);
- real[off] = real[i] - tr;
- imag[off] = imag[i] - ti;
- real[i] += tr;
- imag[i] += ti;
- }
- float tmpR = currentPhaseShiftR;
- currentPhaseShiftR = (tmpR * phaseShiftStepR) - (currentPhaseShiftI * phaseShiftStepI);
- currentPhaseShiftI = (tmpR * phaseShiftStepI) + (currentPhaseShiftI * phaseShiftStepR);
- }
- }
- }
-
- public void forward(float[] buffer)
- {
- if (buffer.length != timeSize)
- {
- Minim
- .error("FFT.forward: The length of the passed sample buffer must be equal to timeSize().");
- return;
- }
- doWindow(buffer);
- // copy samples to real/imag in bit-reversed order
- bitReverseSamples(buffer, 0);
- // perform the fft
- fft();
- // fill the spectrum buffer with amplitudes
- fillSpectrum();
- }
-
- @Override
- public void forward(float[] buffer, int startAt)
- {
- if ( buffer.length - startAt < timeSize )
- {
- Minim.error( "FourierTransform.forward: not enough samples in the buffer between " +
- startAt + " and " + buffer.length + " to perform a transform."
- );
- return;
- }
-
- currentWindow.apply( buffer, startAt, timeSize );
- bitReverseSamples(buffer, startAt);
- fft();
- fillSpectrum();
- }
-
- /**
- * Performs a forward transform on the passed buffers.
- *
- * @param buffReal the real part of the time domain signal to transform
- * @param buffImag the imaginary part of the time domain signal to transform
- */
- public void forward(float[] buffReal, float[] buffImag)
- {
- if (buffReal.length != timeSize || buffImag.length != timeSize)
- {
- Minim
- .error("FFT.forward: The length of the passed buffers must be equal to timeSize().");
- return;
- }
- setComplex(buffReal, buffImag);
- bitReverseComplex();
- fft();
- fillSpectrum();
- }
-
- public void inverse(float[] buffer)
- {
- if (buffer.length > real.length)
- {
- Minim
- .error("FFT.inverse: the passed array's length must equal FFT.timeSize().");
- return;
- }
- // conjugate
- for (int i = 0; i < timeSize; i++)
- {
- imag[i] *= -1;
- }
- bitReverseComplex();
- fft();
- // copy the result in real into buffer, scaling as we do
- for (int i = 0; i < buffer.length; i++)
- {
- buffer[i] = real[i] / real.length;
- }
- }
-
- private int[] reverse;
-
- private void buildReverseTable()
- {
- int N = timeSize;
- reverse = new int[N];
-
- // set up the bit reversing table
- reverse[0] = 0;
- for (int limit = 1, bit = N / 2; limit < N; limit <<= 1, bit >>= 1)
- for (int i = 0; i < limit; i++)
- reverse[i + limit] = reverse[i] + bit;
- }
-
- // copies the values in the samples array into the real array
- // in bit reversed order. the imag array is filled with zeros.
- private void bitReverseSamples(float[] samples, int startAt)
- {
- for (int i = 0; i < timeSize; ++i)
- {
- real[i] = samples[ startAt + reverse[i] ];
- imag[i] = 0.0f;
- }
- }
-
- // bit reverse real[] and imag[]
- private void bitReverseComplex()
- {
- float[] revReal = new float[real.length];
- float[] revImag = new float[imag.length];
- for (int i = 0; i < real.length; i++)
- {
- revReal[i] = real[reverse[i]];
- revImag[i] = imag[reverse[i]];
- }
- real = revReal;
- imag = revImag;
- }
-
- // lookup tables
-
- private float[] sinlookup;
- private float[] coslookup;
-
- private float sin(int i)
- {
- return sinlookup[i];
- }
-
- private float cos(int i)
- {
- return coslookup[i];
- }
-
- private void buildTrigTables()
- {
- int N = timeSize;
- sinlookup = new float[N];
- coslookup = new float[N];
- for (int i = 0; i < N; i++)
- {
- sinlookup[i] = (float) Math.sin(-(float) Math.PI / i);
- coslookup[i] = (float) Math.cos(-(float) Math.PI / i);
- }
- }
-}
diff --git a/src/ddf/minim/analysis/FourierTransform.java b/src/ddf/minim/analysis/FourierTransform.java
deleted file mode 100644
index d7cc075..0000000
--- a/src/ddf/minim/analysis/FourierTransform.java
+++ /dev/null
@@ -1,940 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede N
, there will be
- * N/2
frequency bands in the spectrum.
- * timeSize
of 1024 and and a sampleRate
of 44100
- * Hz, then the spectrum will contain values for frequencies below 22010 Hz,
- * which is the Nyquist frequency (half the sample rate). If you ask for the
- * value of band number 5, this will correspond to a frequency band centered on
- * 5/1024 * 44100 = 0.0048828125 * 44100 = 215 Hz
. The width of
- * that frequency band is equal to 2/1024
, expressed as a
- * fraction of the total bandwidth of the spectrum. The total bandwith of the
- * spectrum is equal to the Nyquist frequency, which in this case is 22050, so
- * the bandwidth is equal to about 50 Hz. It is not necessary for you to
- * remember all of these relationships, though it is good to be aware of them.
- * The function getFreq()
allows you to query the spectrum with a
- * frequency in Hz and the function getBandWidth()
will return
- * the bandwidth in Hz of each frequency band in the spectrum.
- * audio
is an AudioSource and fft
is an FFT (one
- * of the derived classes of FourierTransform).
- *
- *
- * fft.forward(audio.left);
- * for (int i = 0; i < fft.specSize(); i++)
- * {
- * // draw the line for frequency band i, scaling it by 4 so we can see it a bit better
- * line(i, height, i, height - fft.getBand(i) * 4);
- * }
- *
- *
- * Windowing
- * window()
- * function with an appropriate WindowFunction, such as HammingWindow()
,
- * the sample buffers passed to the object for analysis will be shaped by the current
- * window before being transformed. The result of using a window is to reduce
- * the leakage in the spectrum somewhat.
- * linAverages()
allows you to specify the number of averages
- * that you want and will group frequency bands into groups of equal number. So
- * if you have a spectrum with 512 frequency bands and you ask for 64 averages,
- * each average will span 8 bands of the full spectrum.
- * logAverages()
will group frequency bands by octave and allows
- * you to specify the size of the smallest octave to use (in Hz) and also how
- * many bands to split each octave into. So you might ask for the smallest
- * octave to be 60 Hz and to split each octave into two bands. The result is
- * that the bandwidth of each average is different. One frequency is an octave
- * above another when it's frequency is twice that of the lower frequency. So,
- * 120 Hz is an octave above 60 Hz, 240 Hz is an octave above 120 Hz, and so on.
- * When octaves are split, they are split based on Hz, so if you split the
- * octave 60-120 Hz in half, you will get 60-90Hz and 90-120Hz. You can see how
- * these bandwidths increase as your octave sizes grow. For instance, the last
- * octave will always span sampleRate/4 - sampleRate/2
, which in
- * the case of audio sampled at 44100 Hz is 11025-22010 Hz. These
- * logarithmically spaced averages are usually much more useful than the full
- * spectrum or the linearly spaced averages because they map more directly to
- * how humans perceive sound.
- * calcAvg()
allows you to specify the frequency band you want an
- * average calculated for. You might ask for 60-500Hz and this function will
- * group together the bands from the full spectrum that fall into that range and
- * average their amplitudes for you.
- * noAverages()
. This will not impact your ability to use
- * calcAvg()
, it will merely prevent the object from calculating
- * an average array every time you use forward()
.
- * timeSize()
long. The set
and
- * scale
functions allow you the ability to shape the spectrum
- * already stored in the object before taking the inverse transform. You might
- * use these to filter frequencies in a spectrum or modify it in some other way.
- *
- * @author Damien Di Fede
- * @see The Discrete Fourier Transform
- *
- * @invisible
- */
-public abstract class FourierTransform
-{
- /** A constant indicating no window should be used on sample buffers.
- * Also referred as a Rectangular window.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Rectangular window
- * @related WindowFunction
- */
- public static final WindowFunction NONE = new RectangularWindow();
-
- /** A constant indicating a Hamming window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Hamming window
- * @related WindowFunction
- */
- public static final WindowFunction HAMMING = new HammingWindow();
-
- /** A constant indicating a Hann window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Hann window
- * @related WindowFunction
- */
- public static final WindowFunction HANN = new HannWindow();
-
- /** A constant indicating a Cosine window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Cosine window
- * @related WindowFunction
- */
- public static final WindowFunction COSINE = new CosineWindow();
-
- /** A constant indicating a Triangular window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Triangular window
- * @related WindowFunction
- */
- public static final WindowFunction TRIANGULAR = new TriangularWindow();
-
- /** A constant indicating a Bartlett window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Bartlett window
- * @related WindowFunction
- */
- public static final WindowFunction BARTLETT = new BartlettWindow();
-
- /** A constant indicating a Bartlett-Hann window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Bartlett-Hann window
- * @related WindowFunction
- */
- public static final WindowFunction BARTLETTHANN = new BartlettHannWindow();
-
- /** A constant indicating a Lanczos window should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Lanczos window
- * @related WindowFunction
- */
- public static final WindowFunction LANCZOS = new LanczosWindow();
-
- /** A constant indicating a Blackman window with a default value should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Blackman window
- * @related WindowFunction
- */
- public static final WindowFunction BLACKMAN = new BlackmanWindow();
-
- /** A constant indicating a Gauss with a default value should be used on sample buffers.
- *
- * @example Analysis/FFT/Windows
- *
- * @related Gauss window
- * @related WindowFunction
- */
- public static final WindowFunction GAUSS = new GaussWindow();
-
- protected static final int LINAVG = 1;
- protected static final int LOGAVG = 2;
- protected static final int NOAVG = 3;
-
- protected static final float TWO_PI = (float) (2 * Math.PI);
- protected int timeSize;
- protected int sampleRate;
- protected float bandWidth;
- protected WindowFunction currentWindow;
- protected float[] real;
- protected float[] imag;
- protected float[] spectrum;
- protected float[] averages;
- protected int whichAverage;
- protected int octaves;
- protected int avgPerOctave;
-
- /**
- * Construct a FourierTransform that will analyze sample buffers that are
- * ts
samples long and contain samples with a sr
- * sample rate.
- *
- * @param ts
- * the length of the buffers that will be analyzed
- * @param sr
- * the sample rate of the samples that will be analyzed
- */
- FourierTransform(int ts, float sr)
- {
- timeSize = ts;
- sampleRate = (int)sr;
- bandWidth = (2f / timeSize) * ((float)sampleRate / 2f);
- noAverages();
- allocateArrays();
- currentWindow = new RectangularWindow(); // a Rectangular window is analogous to using no window.
- }
-
- // allocating real, imag, and spectrum are the responsibility of derived
- // classes
- // because the size of the arrays will depend on the implementation being used
- // this enforces that responsibility
- protected abstract void allocateArrays();
-
- protected void setComplex(float[] r, float[] i)
- {
- if (real.length != r.length && imag.length != i.length)
- {
- Minim
- .error("FourierTransform.setComplex: the two arrays must be the same length as their member counterparts.");
- }
- else
- {
- System.arraycopy(r, 0, real, 0, r.length);
- System.arraycopy(i, 0, imag, 0, i.length);
- }
- }
-
- // fill the spectrum array with the amps of the data in real and imag
- // used so that this class can handle creating the average array
- // and also do spectrum shaping if necessary
- protected void fillSpectrum()
- {
- for (int i = 0; i < spectrum.length; i++)
- {
- spectrum[i] = (float) Math.sqrt(real[i] * real[i] + imag[i] * imag[i]);
- }
-
- if (whichAverage == LINAVG)
- {
- int avgWidth = (int) spectrum.length / averages.length;
- for (int i = 0; i < averages.length; i++)
- {
- float avg = 0;
- int j;
- for (j = 0; j < avgWidth; j++)
- {
- int offset = j + i * avgWidth;
- if (offset < spectrum.length)
- {
- avg += spectrum[offset];
- }
- else
- {
- break;
- }
- }
- avg /= j + 1;
- averages[i] = avg;
- }
- }
- else if (whichAverage == LOGAVG)
- {
- for (int i = 0; i < octaves; i++)
- {
- float lowFreq, hiFreq, freqStep;
- if (i == 0)
- {
- lowFreq = 0;
- }
- else
- {
- lowFreq = (sampleRate / 2) / (float) Math.pow(2, octaves - i);
- }
- hiFreq = (sampleRate / 2) / (float) Math.pow(2, octaves - i - 1);
- freqStep = (hiFreq - lowFreq) / avgPerOctave;
- float f = lowFreq;
- for (int j = 0; j < avgPerOctave; j++)
- {
- int offset = j + i * avgPerOctave;
- averages[offset] = calcAvg(f, f + freqStep);
- f += freqStep;
- }
- }
- }
- }
-
- /**
- * Sets the object to not compute averages.
- *
- * @related FFT
- */
- public void noAverages()
- {
- averages = new float[0];
- whichAverage = NOAVG;
- }
-
- /**
- * Sets the number of averages used when computing the spectrum and spaces the
- * averages in a linear manner. In other words, each average band will be
- * specSize() / numAvg
bands wide.
- *
- * @param numAvg
- * int: how many averages to compute
- *
- * @example Analysis/SoundSpectrum
- *
- * @related FFT
- */
- public void linAverages(int numAvg)
- {
- if (numAvg > spectrum.length / 2)
- {
- Minim.error("The number of averages for this transform can be at most "
- + spectrum.length / 2 + ".");
- return;
- }
- else
- {
- averages = new float[numAvg];
- }
- whichAverage = LINAVG;
- }
-
- /**
- * Sets the number of averages used when computing the spectrum based on the
- * minimum bandwidth for an octave and the number of bands per octave. For
- * example, with audio that has a sample rate of 44100 Hz,
- * logAverages(11, 1)
will result in 12 averages, each
- * corresponding to an octave, the first spanning 0 to 11 Hz. To ensure that
- * each octave band is a full octave, the number of octaves is computed by
- * dividing the Nyquist frequency by two, and then the result of that by two,
- * and so on. This means that the actual bandwidth of the lowest octave may
- * not be exactly the value specified.
- *
- * @param minBandwidth
- * int: the minimum bandwidth used for an octave, in Hertz.
- * @param bandsPerOctave
- * int: how many bands to split each octave into
- *
- * @example Analysis/SoundSpectrum
- *
- * @related FFT
- */
- public void logAverages(int minBandwidth, int bandsPerOctave)
- {
- float nyq = (float) sampleRate / 2f;
- octaves = 1;
- while ((nyq /= 2) > minBandwidth)
- {
- octaves++;
- }
- Minim.debug("Number of octaves = " + octaves);
- avgPerOctave = bandsPerOctave;
- averages = new float[octaves * bandsPerOctave];
- whichAverage = LOGAVG;
- }
-
- /**
- * Sets the window to use on the samples before taking the forward transform.
- * If an invalid window is asked for, an error will be reported and the
- * current window will not be changed.
- *
- * @param windowFunction
- * the new WindowFunction to use, typically one of the statically defined
- * windows like HAMMING or BLACKMAN
- *
- * @related FFT
- * @related WindowFunction
- *
- * @example Analysis/FFT/Windows
- */
- public void window(WindowFunction windowFunction)
- {
- this.currentWindow = windowFunction;
- }
-
- protected void doWindow(float[] samples)
- {
- currentWindow.apply(samples);
- }
-
- /**
- * Returns the length of the time domain signal expected by this transform.
- *
- * @return int: the length of the time domain signal expected by this transform
- *
- * @related FFT
- */
- public int timeSize()
- {
- return timeSize;
- }
-
- /**
- * Returns the size of the spectrum created by this transform. In other words,
- * the number of frequency bands produced by this transform. This is typically
- * equal to timeSize()/2 + 1
, see above for an explanation.
- *
- * @return int: the size of the spectrum
- *
- * @example Basics/AnalyzeSound
- *
- * @related FFT
- */
- public int specSize()
- {
- return spectrum.length;
- }
-
- /**
- * Returns the amplitude of the requested frequency band.
- *
- * @param i
- * int: the index of a frequency band
- *
- * @return float: the amplitude of the requested frequency band
- *
- * @example Basics/AnalyzeSound
- *
- * @related FFT
- */
- public float getBand(int i)
- {
- if (i < 0) i = 0;
- if (i > spectrum.length - 1) i = spectrum.length - 1;
- return spectrum[i];
- }
-
- /**
- * Returns the width of each frequency band in the spectrum (in Hz). It should
- * be noted that the bandwidth of the first and last frequency bands is half
- * as large as the value returned by this function.
- *
- * @return float: the width of each frequency band in Hz.
- *
- * @related FFT
- */
- public float getBandWidth()
- {
- return bandWidth;
- }
-
- /**
- * Returns the bandwidth of the requested average band. Using this information
- * and the return value of getAverageCenterFrequency you can determine the
- * lower and upper frequency of any average band.
- *
- * @param averageIndex
- * int: the index of the average you want the bandwidth of
- *
- * @return float: the bandwidth of the request average band, in Hertz.
- *
- * @example Analysis/SoundSpectrum
- *
- * @see #getAverageCenterFrequency(int)
- *
- * @related getAverageCenterFrequency ( )
- * @related FFT
- *
- */
- public float getAverageBandWidth( int averageIndex )
- {
- if ( whichAverage == LINAVG )
- {
- // an average represents a certain number of bands in the spectrum
- int avgWidth = (int) spectrum.length / averages.length;
- return avgWidth * getBandWidth();
-
- }
- else if ( whichAverage == LOGAVG )
- {
- // which "octave" is this index in?
- int octave = averageIndex / avgPerOctave;
- float lowFreq, hiFreq, freqStep;
- // figure out the low frequency for this octave
- if (octave == 0)
- {
- lowFreq = 0;
- }
- else
- {
- lowFreq = (sampleRate / 2) / (float) Math.pow(2, octaves - octave);
- }
- // and the high frequency for this octave
- hiFreq = (sampleRate / 2) / (float) Math.pow(2, octaves - octave - 1);
- // each average band within the octave will be this big
- freqStep = (hiFreq - lowFreq) / avgPerOctave;
-
- return freqStep;
- }
-
- return 0;
- }
-
- /**
- * Sets the amplitude of the ith
frequency band to
- * a
. You can use this to shape the spectrum before using
- * inverse()
.
- *
- * @param i
- * int: the frequency band to modify
- * @param a
- * float: the new amplitude
- *
- * @example Analysis/FFT/SetBand
- *
- * @related FFT
- */
- public abstract void setBand(int i, float a);
-
- /**
- * Scales the amplitude of the ith
frequency band
- * by s
. You can use this to shape the spectrum before using
- * inverse()
.
- *
- * @param i
- * int: the frequency band to modify
- * @param s
- * float: the scaling factor
- *
- * @example Analysis/FFT/ScaleBand
- *
- * @related FFT
- */
- public abstract void scaleBand(int i, float s);
-
- /**
- * Returns the index of the frequency band that contains the requested
- * frequency.
- *
- * @param freq
- * float: the frequency you want the index for (in Hz)
- *
- * @return int: the index of the frequency band that contains freq
- *
- * @related FFT
- *
- * @example Analysis/SoundSpectrum
- */
- public int freqToIndex(float freq)
- {
- // special case: freq is lower than the bandwidth of spectrum[0]
- if (freq < getBandWidth() / 2) return 0;
- // special case: freq is within the bandwidth of spectrum[spectrum.length - 1]
- if (freq > sampleRate / 2 - getBandWidth() / 2) return spectrum.length - 1;
- // all other cases
- float fraction = freq / (float) sampleRate;
- int i = Math.round(timeSize * fraction);
- return i;
- }
-
- /**
- * Returns the middle frequency of the ith band.
- *
- * @param i
- * int: the index of the band you want to middle frequency of
- *
- * @return float: the middle frequency, in Hertz, of the requested band of the spectrum
- *
- * @related FFT
- */
- public float indexToFreq(int i)
- {
- float bw = getBandWidth();
- // special case: the width of the first bin is half that of the others.
- // so the center frequency is a quarter of the way.
- if ( i == 0 ) return bw * 0.25f;
- // special case: the width of the last bin is half that of the others.
- if ( i == spectrum.length - 1 )
- {
- float lastBinBeginFreq = (sampleRate / 2) - (bw / 2);
- float binHalfWidth = bw * 0.25f;
- return lastBinBeginFreq + binHalfWidth;
- }
- // the center frequency of the ith band is simply i*bw
- // because the first band is half the width of all others.
- // treating it as if it wasn't offsets us to the middle
- // of the band.
- return i*bw;
- }
-
- /**
- * Returns the center frequency of the ith average band.
- *
- * @param i
- * int: which average band you want the center frequency of.
- *
- * @return float: the center frequency of the ith average band.
- *
- * @related FFT
- *
- * @example Analysis/SoundSpectrum
- */
- public float getAverageCenterFrequency(int i)
- {
- if ( whichAverage == LINAVG )
- {
- // an average represents a certain number of bands in the spectrum
- int avgWidth = (int) spectrum.length / averages.length;
- // the "center" bin of the average, this is fudgy.
- int centerBinIndex = i*avgWidth + avgWidth/2;
- return indexToFreq(centerBinIndex);
-
- }
- else if ( whichAverage == LOGAVG )
- {
- // which "octave" is this index in?
- int octave = i / avgPerOctave;
- // which band within that octave is this?
- int offset = i % avgPerOctave;
- float lowFreq, hiFreq, freqStep;
- // figure out the low frequency for this octave
- if (octave == 0)
- {
- lowFreq = 0;
- }
- else
- {
- lowFreq = (sampleRate / 2) / (float) Math.pow(2, octaves - octave);
- }
- // and the high frequency for this octave
- hiFreq = (sampleRate / 2) / (float) Math.pow(2, octaves - octave - 1);
- // each average band within the octave will be this big
- freqStep = (hiFreq - lowFreq) / avgPerOctave;
- // figure out the low frequency of the band we care about
- float f = lowFreq + offset*freqStep;
- // the center of the band will be the low plus half the width
- return f + freqStep/2;
- }
-
- return 0;
- }
-
-
- /**
- * Gets the amplitude of the requested frequency in the spectrum.
- *
- * @param freq
- * float: the frequency in Hz
- *
- * @return float: the amplitude of the frequency in the spectrum
- *
- * @related FFT
- */
- public float getFreq(float freq)
- {
- return getBand(freqToIndex(freq));
- }
-
- /**
- * Sets the amplitude of the requested frequency in the spectrum to
- * a
.
- *
- * @param freq
- * float: the frequency in Hz
- * @param a
- * float: the new amplitude
- *
- * @example Analysis/FFT/SetFreq
- *
- * @related FFT
- */
- public void setFreq(float freq, float a)
- {
- setBand(freqToIndex(freq), a);
- }
-
- /**
- * Scales the amplitude of the requested frequency by a
.
- *
- * @param freq
- * float: the frequency in Hz
- * @param s
- * float: the scaling factor
- *
- * @example Analysis/FFT/ScaleFreq
- *
- * @related FFT
- */
- public void scaleFreq(float freq, float s)
- {
- scaleBand(freqToIndex(freq), s);
- }
-
- /**
- * Returns the number of averages currently being calculated.
- *
- * @return int: the length of the averages array
- *
- * @related FFT
- */
- public int avgSize()
- {
- return averages.length;
- }
-
- /**
- * Gets the value of the ith
average.
- *
- * @param i
- * int: the average you want the value of
- * @return float: the value of the requested average band
- *
- * @related FFT
- */
- public float getAvg(int i)
- {
- float ret;
- if (averages.length > 0)
- ret = averages[i];
- else
- ret = 0;
- return ret;
- }
-
- /**
- * Calculate the average amplitude of the frequency band bounded by
- * lowFreq
and hiFreq
, inclusive.
- *
- * @param lowFreq
- * float: the lower bound of the band, in Hertz
- * @param hiFreq
- * float: the upper bound of the band, in Hertz
- *
- * @return float: the average of all spectrum values within the bounds
- *
- * @related FFT
- */
- public float calcAvg(float lowFreq, float hiFreq)
- {
- int lowBound = freqToIndex(lowFreq);
- int hiBound = freqToIndex(hiFreq);
- float avg = 0;
- for (int i = lowBound; i <= hiBound; i++)
- {
- avg += spectrum[i];
- }
- avg /= (hiBound - lowBound + 1);
- return avg;
- }
-
- /**
- * Get the Real part of the Complex representation of the spectrum.
- *
- * @return float[]: an array containing the values for the Real part of the spectrum.
- *
- * @related FFT
- */
- public float[] getSpectrumReal()
- {
- return real;
- }
-
- /**
- * Get the Imaginary part of the Complex representation of the spectrum.
- *
- * @return float[]: an array containing the values for the Imaginary part of the spectrum.
- *
- * @related FFT
- */
- public float[] getSpectrumImaginary()
- {
- return imag;
- }
-
-
- /**
- * Performs a forward transform on buffer
.
- *
- * @param buffer
- * float[]: the buffer to analyze, must be the same length as timeSize()
- *
- * @example Basics/AnalyzeSound
- *
- * @related FFT
- */
- public abstract void forward(float[] buffer);
-
- /**
- * Performs a forward transform on values in buffer
.
- *
- * @param buffer
- * float[]: the buffer to analyze, must be the same length as timeSize()
- * @param startAt
- * int: the index to start at in the buffer. there must be at least timeSize() samples
- * between the starting index and the end of the buffer. If there aren't, an
- * error will be issued and the operation will not be performed.
- *
- */
- public void forward(float[] buffer, int startAt)
- {
- if ( buffer.length - startAt < timeSize )
- {
- Minim.error( "FourierTransform.forward: not enough samples in the buffer between " +
- startAt + " and " + buffer.length + " to perform a transform."
- );
- return;
- }
-
- // copy the section of samples we want to analyze
- float[] section = new float[timeSize];
- System.arraycopy(buffer, startAt, section, 0, section.length);
- forward(section);
- }
-
- /**
- * Performs a forward transform on buffer
.
- *
- * @param buffer
- * AudioBuffer: the buffer to analyze
- *
- */
- public void forward(AudioBuffer buffer)
- {
- forward(buffer.toArray());
- }
-
- /**
- * Performs a forward transform on buffer
.
- *
- * @param buffer
- * AudioBuffer: the buffer to analyze
- * @param startAt
- * int: the index to start at in the buffer. there must be at least timeSize() samples
- * between the starting index and the end of the buffer. If there aren't, an
- * error will be issued and the operation will not be performed.
- *
- */
- public void forward(AudioBuffer buffer, int startAt)
- {
- forward(buffer.toArray(), startAt);
- }
-
- /**
- * Performs an inverse transform of the frequency spectrum and places the
- * result in buffer
.
- *
- * @param buffer
- * float[]: the buffer to place the result of the inverse transform in
- *
- *
- * @related FFT
- */
- public abstract void inverse(float[] buffer);
-
- /**
- * Performs an inverse transform of the frequency spectrum and places the
- * result in buffer
.
- *
- * @param buffer
- * AudioBuffer: the buffer to place the result of the inverse transform in
- *
- */
- public void inverse(AudioBuffer buffer)
- {
- inverse(buffer.toArray());
- }
-
- /**
- * Performs an inverse transform of the frequency spectrum represented by
- * freqReal and freqImag and places the result in buffer.
- *
- * @param freqReal
- * float[]: the real part of the frequency spectrum
- * @param freqImag
- * float[]: the imaginary part the frequency spectrum
- * @param buffer
- * float[]: the buffer to place the inverse transform in
- */
- public void inverse(float[] freqReal, float[] freqImag, float[] buffer)
- {
- setComplex(freqReal, freqImag);
- inverse(buffer);
- }
-}
diff --git a/src/ddf/minim/analysis/GaussWindow.java b/src/ddf/minim/analysis/GaussWindow.java
deleted file mode 100644
index 6458b51..0000000
--- a/src/ddf/minim/analysis/GaussWindow.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede window()
- * function with an appropriate WindowFunction, such as HammingWindow()
,
- * the sample buffers passed to the object for analysis will be shaped by the current
- * window before being transformed. The result of using a window is to reduce
- * the leakage in the spectrum somewhat.
- * WindowFunction
handles work associated with various window functions
- * such as the Hamming window. To create your own window function you must extend
- * WindowFunction
and implement the {@link #value(int, int) value}
- * method which defines the shape of the window at a given offset.
- * WindowFunction
will call this method to apply the window to
- * a sample buffer. The number passed to the method is an offset within the length
- * of the window curve.
- *
- * @author Damien Di Fede
- * @author Corban Brook
- *
- * @example Analysis/FFT/Windows
- *
- * @related FFT
- */
-public abstract class WindowFunction
-{
- /** The float value of 2*PI. Provided as a convenience for subclasses. */
- protected static final float TWO_PI = (float) (2 * Math.PI);
- protected int length;
-
- public WindowFunction()
- {
- }
-
- /**
- * Apply the window function to a sample buffer.
- *
- * @param samples a sample buffer
- */
- public void apply(float[] samples)
- {
- this.length = samples.length;
-
- for (int n = 0; n < samples.length; n ++)
- {
- samples[n] *= value(samples.length, n);
- }
- }
-
- /**
- * Apply the window to a portion of this sample buffer,
- * given an offset from the beginning of the buffer
- * and the number of samples to be windowed.
- *
- * @param samples
- * float[]: the array of samples to apply the window to
- * @param offset
- * int: the index in the array to begin windowing
- * @param length
- * int: how many samples to apply the window to
- */
- public void apply(float[] samples, int offset, int length)
- {
- this.length = length;
-
- for(int n = offset; n < offset + length; ++n)
- {
- samples[n] *= value(length, n - offset);
- }
- }
-
- /**
- * Generates the curve of the window function.
- *
- * @param length the length of the window
- * @return the shape of the window function
- */
- public float[] generateCurve(int length)
- {
- float[] samples = new float[length];
- for (int n = 0; n < length; n++)
- {
- samples[n] = 1f * value(length, n);
- }
- return samples;
- }
-
- protected abstract float value(int length, int index);
-}
diff --git a/src/ddf/minim/analysis/package.html b/src/ddf/minim/analysis/package.html
deleted file mode 100644
index 447608a..0000000
--- a/src/ddf/minim/analysis/package.html
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-N
, there will be
- N/2
frequency bands in the spectrum.
-
- The human listening system determines the rhythm of music
- by detecting a pseudo periodical succession of beats. The signal which is
- intercepted by the ear contains a certain energy, this energy is converted
- into an electrical signal which the brain interprets. Obviously, The more
- energy the sound transports, the louder the sound will seem. But a sound will
- be heard as a beat only if his energy is largely superior to the
- sound's energy history, that is to say if the brain detects a
- brutal variation in sound energy. Therefore if the ear intercepts
- a monotonous sound with sometimes big energy peaks it will detect beats,
- however, if you play a continuous loud sound you will not perceive any beats.
- Thus, the beats are big variations of sound energy.
-
- In fact, the two algorithms in this class are based on two algorithms described in
- that paper.
- level()
, is used as the instant energy in each frame. Beats,
- then, are spikes in this value, relative to the previous one second of sound.
- In frequency energy mode, the same process is used but instead of tracking
- the level of the buffer, an FFT is used to obtain a spectrum, which is then
- divided into average bands using logAverages()
, and each of
- these bands is tracked individually. The result is that it is possible to
- track sounds that occur in different parts of the frequency spectrum
- independently (like the kick drum and snare drum).
- isOnset()
to query the algorithm
- and in frequency energy mode you use isOnset(int i)
,
- isKick()
, isSnare()
, and
- isRange()
to query particular frequnecy bands or ranges of
- frequency bands. It should be noted that isKick()
,
- isSnare()
, and isHat()
merely call
- isRange()
with values determined by testing the algorithm
- against music with a heavy beat and they may not be appropriate for all kinds
- of music. If you find they are performing poorly with your music, you should
- use isRange()
directly to locate the bands that provide the
- most meaningful information for you.
-
- @libname Minim Analysis
- @indexname index_analysis
-
-
\ No newline at end of file
diff --git a/src/ddf/minim/effects/BandPass.java b/src/ddf/minim/effects/BandPass.java
deleted file mode 100644
index eafbfd6..0000000
--- a/src/ddf/minim/effects/BandPass.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede
- *
- *
- * @param p -
- * the number of poles
- */
- public void setPoles(int p)
- {
- if (p < 2)
- {
- Minim.error("ChebFilter.setPoles: The number of poles must be at least 2.");
- return;
- }
- if (p % 2 != 0)
- {
- Minim.error("ChebFilter.setPoles: The number of poles must be even.");
- return;
- }
- if (p > 20)
- {
- Minim.error("ChebFilter.setPoles: The maximum number of poles is 20.");
- }
- poles = p;
- calcCoeff();
- }
-
- /**
- * Returns the number of poles in the filter.
- *
- * @return the number of poles
- */
- public int getPoles()
- {
- return poles;
- }
-
- //where the poles will wind up
- float[] ca = new float[23];
- float[] cb = new float[23];
-
- // temporary arrays for working with ca and cb
- float[] ta = new float[23];
- float[] tb = new float[23];
-
- //arrays to hold the two-pole coefficients
- // used during the aggregation process
- float[] pa = new float[3];
- float[] pb = new float[2];
-
- protected synchronized void calcCoeff()
- {
- // System.out.println("ChebFilter is calculating coefficients...");
-
- // initialize our arrays
- for(int i = 0; i < 23; ++i)
- {
- ca[i] = cb[i] = ta[i] = tb[i] = 0.f;
- }
-
- // I don't know why this must be done
- ca[2] = 1.f;
- cb[2] = 1.f;
-
- // calculate two poles at a time
- for (int p = 1; p <= poles / 2; p++)
- {
- // calc pair p, put the results in pa and pb
- calcTwoPole(p, pa, pb);
-
- // copy ca and cb into ta and tb
- System.arraycopy(ca, 0, ta, 0, ta.length);
- System.arraycopy(cb, 0, tb, 0, tb.length);
-
- // add coefficients to the cascade
- for (int i = 2; i < 23; i++)
- {
- ca[i] = pa[0]*ta[i] + pa[1]*ta[i-1] + pa[2]* ta[i-2];
- cb[i] = tb[i] - pb[0]*tb[i-1] - pb[1] * tb[i-2];
- }
- }
-
- // final stage of combining coefficients
- cb[2] = 0;
- for (int i = 0; i < 21; i++)
- {
- ca[i] = ca[i + 2];
- cb[i] = -cb[i + 2];
- }
-
- // normalize the gain
- float sa = 0;
- float sb = 0;
- for (int i = 0; i < 21; i++)
- {
- if (type == LP)
- {
- sa += ca[i];
- sb += cb[i];
- }
- else
- {
- sa += ca[i] * (float) Math.pow(-1, i);
- sb += cb[i] * (float) Math.pow(-1, i);
- }
- }
-
- float gain = sa / (1 - sb);
-
- for (int i = 0; i < 21; i++)
- {
- ca[i] /= gain;
- }
-
- // initialize the coefficient arrays used by process()
- // but only if the number of poles has changed
- if ( a == null || a.length != poles + 1 )
- {
- a = new float[poles + 1];
- }
- if ( b == null || b.length != poles )
- {
- b = new float[poles];
- }
- // copy the values from ca and cb into a and b
- // in this implementation cb[0] = 0 and cb[1] is where
- // the b coefficients begin, so they are numbered the way
- // one normally numbers coefficients when talking about IIR filters
- // however, process() expects b[0] to be the coefficient B1
- // so we copy cb over to b starting at index 1
- System.arraycopy(ca, 0, a, 0, a.length);
- System.arraycopy(cb, 1, b, 0, b.length);
- }
-
- private void calcTwoPole(int p, float[] pa, float[] pb)
- {
- float np = (float) poles;
-
- // precalc
- float angle = PI / (np * 2) + (p - 1) * PI / np;
-
- float rp = -(float) Math.cos(angle);
- float ip = (float) Math.sin(angle);
-
- // warp from a circle to an ellipse
- if (ripple > 0)
- {
- // precalc
- float ratio = 100.f / (100.f - ripple);
- float ratioSquared = ratio * ratio;
-
- float es = 1.f / (float) Math.sqrt( ratioSquared - 1.f );
-
- float oneOverNP = 1.f / np;
- float esSquared = es * es;
-
- float vx = oneOverNP * (float) Math.log( es + Math.sqrt(esSquared + 1.f) );
- float kx = oneOverNP * (float) Math.log( es + Math.sqrt(esSquared - 1.f) );
-
- float expKX = (float)Math.exp(kx);
- float expNKX = (float)Math.exp(-kx);
-
- kx = (expKX + expNKX) * 0.5f;
-
- float expVX = (float)Math.exp(vx);
- float expNVX = (float)Math.exp(-vx);
- float oneOverKX = 1.f / kx;
-
- rp *= ( (expVX - expNVX) * 0.5f ) * oneOverKX;
- ip *= ( (expVX + expNVX) * 0.5f ) * oneOverKX;
- }
-
- // s-domain to z-domain conversion
- float t = 2.f * (float) Math.tan(0.5f);
- float w = TWO_PI * ( frequency() / sampleRate() );
- float m = rp * rp + ip * ip;
-
- // precalc
- float fourTimesRPTimesT = 4.f * rp * t;
- float tSquared = t * t;
- float mTimesTsquared = m * tSquared;
- float tSquaredTimes2 = 2.f * tSquared;
-
- float d = 4.f - fourTimesRPTimesT + mTimesTsquared;
-
- // precalc
- float oneOverD = 1.f / d;
-
- float x0 = tSquared * oneOverD;
- float x1 = tSquaredTimes2 * oneOverD;
- float x2 = x0;
-
- float y1 = ( 8.f - (tSquaredTimes2 * m) ) * oneOverD;
- float y2 = ( -4.f - fourTimesRPTimesT - mTimesTsquared ) * oneOverD;
-
- // LP to LP, or LP to HP transform
- float k;
- float halfW = w*0.5f;
-
- if (type == HP)
- {
- k = -(float)Math.cos( halfW + 0.5f ) / (float)Math.cos( halfW - 0.5f );
- }
- else
- {
- k = (float)Math.sin(0.5f - halfW) / (float)Math.sin(0.5f + halfW);
- }
-
- // precalc
- float kSquared = k * k;
- float x1timesK = x1 * k;
- float kDoubled = 2.f * k;
- float y1timesK = y1 * k;
-
- d = 1.f + y1timesK - y2 * kSquared;
-
- // precalc
- oneOverD = 1.f / d;
-
- pa[0] = ( x0 - x1timesK + (x2 * kSquared) ) * oneOverD;
- pa[1] = ( (-kDoubled * x0) + x1 + (x1 * kSquared) - (kDoubled * x2) ) * oneOverD;
- pa[2] = ( (x0 * kSquared) - x1timesK + x2) * oneOverD;
-
- pb[0] = ( kDoubled + y1 + (y1 * kSquared) - (y2 * kDoubled) ) * oneOverD;
- pb[1] = ( -kSquared - y1timesK + y2 ) * oneOverD;
-
- if (type == HP)
- {
- pa[1] = -pa[1];
- pb[0] = -pb[0];
- }
- }
-}
diff --git a/src/ddf/minim/effects/Convolver.java b/src/ddf/minim/effects/Convolver.java
deleted file mode 100644
index c2c03eb..0000000
--- a/src/ddf/minim/effects/Convolver.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede
- *
- * Cutoff Frequency
- *
- * (expressed as a fraction of the sampling rate)0.02
- * 0.05
- * 0.10
- * 0.25
- * 0.40
- * 0.45
- * 0.48
- *
- *
- * Maximum poles
- * 4
- * 6
- * 10
- * 20
- * 10
- * 6
- * 4
- * Convolver
is an effect that convolves a signal with a kernal.
- * The kernal can be thought of as the impulse response of an audio filter, or
- * simply as a set of weighting coefficients. Convolver
performs
- * brute-force convolution, meaning that it is slow, relatively speaking.
- * However, the algorithm is very straighforward. Each output sample
- * i
is calculated by multiplying each kernal value
- * j
with the input sample i - j
and then summing
- * the resulting values. The output will be
- * kernal.length + signal.length - 1
samples long, so the extra
- * samples are stored in an overlap array. The overlap array from the previous
- * signal convolution is added into the beginning of the output array, which
- * results in a output signal without pops.
- *
- * @author Damien Di Fede
- * @see Convolution
- *
- */
-public class Convolver implements AudioEffect
-{
- protected float[] kernal;
- protected float[] outputL;
- protected float[] overlapL;
- protected float[] outputR;
- protected float[] overlapR;
- protected int sigLen;
-
- /**
- * Constructs a Convolver with the kernal k
that expects buffer
- * of length sigLength
.
- *
- * @param k
- * the kernal of the filter
- * @param sigLength
- * the length of the buffer that will be convolved with the kernal
- */
- public Convolver(float[] k, int sigLength)
- {
- sigLen = sigLength;
- setKernal(k);
- }
-
- /**
- * Sets the kernal to k
. The values in k
are
- * copied so it is not possible to alter the kernal after it has been set
- * except by setting it again.
- *
- * @param k
- * the kernal to use
- */
- public void setKernal(float[] k)
- {
- kernal = new float[k.length];
- System.arraycopy(k, 0, kernal, 0, k.length);
- outputL = new float[sigLen + kernal.length - 1];
- outputR = new float[sigLen + kernal.length - 1];
- overlapL = new float[outputL.length - sigLen];
- overlapR = new float[outputR.length - sigLen];
- }
-
- public void process(float[] signal)
- {
- if (signal.length != sigLen)
- {
- Minim
- .error("Convolver.process: signal.length does not equal sigLen, no processing will occurr.");
- return;
- }
- // store the overlap from the previous convolution
- System.arraycopy(outputL, signal.length, overlapL, 0, overlapL.length);
- // convolve kernal with signal and put the result in outputL
- for (int i = 0; i < outputL.length; i++)
- {
- outputL[i] = 0;
- for (int j = 0; j < kernal.length; j++)
- {
- if (i - j < 0 || i - j > signal.length) continue;
- outputL[i] += kernal[j] * signal[i - j];
- }
- }
- // copy the result into signal
- System.arraycopy(outputL, 0, signal, 0, signal.length);
- // add the overlap from the previous convolution to the beginning of signal
- for (int i = 0; i < overlapL.length; i++)
- {
- signal[i] += overlapL[i];
- }
- }
-
- public void process(float[] sigLeft, float[] sigRight)
- {
- if (sigLeft.length != sigLen || sigRight.length != sigLen)
- {
- Minim
- .error("Convolver.process: signal.length does not equal sigLen, no processing will occurr.");
- return;
- }
- System.arraycopy(outputL, sigLeft.length, overlapL, 0, overlapL.length);
- System.arraycopy(outputR, sigRight.length, overlapR, 0, overlapR.length);
- for (int i = 0; i < outputL.length; i++)
- {
- outputL[i] = 0;
- outputR[i] = 0;
- for (int j = 0; j < kernal.length; j++)
- {
- if (i - j < 0 || i - j >= sigLeft.length) continue;
- outputL[i] += kernal[j] * sigLeft[i - j];
- outputR[i] += kernal[j] * sigRight[i - j];
- }
- }
- System.arraycopy(outputL, 0, sigLeft, 0, sigLeft.length);
- System.arraycopy(outputR, 0, sigRight, 0, sigRight.length);
- for (int i = 0; i < overlapL.length; i++)
- {
- sigLeft[i] += overlapL[i];
- sigRight[i] += overlapR[i];
- }
- }
-}
diff --git a/src/ddf/minim/effects/HighPassSP.java b/src/ddf/minim/effects/HighPassSP.java
deleted file mode 100644
index 9e62015..0000000
--- a/src/ddf/minim/effects/HighPassSP.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede freq
that will be
- * used to filter audio recorded at sampleRate
.
- *
- * @param freq the cutoff frequency
- * @param sampleRate the sample rate of audio that will be filtered
- */
- public HighPassSP(float freq, float sampleRate)
- {
- super(freq, sampleRate);
- }
-
- protected void calcCoeff()
- {
- float fracFreq = frequency()/sampleRate();
- float x = (float)Math.exp(-2 * Math.PI * fracFreq);
- a = new float[] { (1+x)/2, -(1+x)/2 };
- b = new float[] { x };
- }
-}
diff --git a/src/ddf/minim/effects/IIRFilter.java b/src/ddf/minim/effects/IIRFilter.java
deleted file mode 100644
index c491dbe..0000000
--- a/src/ddf/minim/effects/IIRFilter.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede calcCoeff()
function. When filling the
- * coefficient arrays, be aware that b[0]
corresponds to
- * b1
.
- *
- * @author Damien Di Fede
- *
- */
-public abstract class IIRFilter extends UGen implements AudioEffect
-{
- public final UGenInput audio;
- public final UGenInput cutoff;
-
- /** The a coefficients. */
- protected float[] a;
- /** The b coefficients. */
- protected float[] b;
-
- /** The input values to the left of the output value currently being calculated. */
- private float[][] in;
- /** The previous output values. */
- private float[][] out;
-
- private float prevCutoff;
-
- /**
- * Constructs an IIRFilter with the given cutoff frequency that will be used
- * to filter audio recorded at sampleRate
.
- *
- * @param freq
- * the cutoff frequency
- * @param sampleRate
- * the sample rate of audio to be filtered
- */
- public IIRFilter(float freq, float sampleRate)
- {
- super();
- setSampleRate(sampleRate);
-
- audio = new UGenInput(InputType.AUDIO);
- cutoff = new UGenInput(InputType.CONTROL);
-
- // set our center frequency
- cutoff.setLastValue(freq);
-
- // force use to calculate coefficients the first time we generate
- prevCutoff = -1.f;
- }
-
- /**
- * Initializes the in and out arrays based on the number of coefficients being
- * used.
- *
- */
- private final void initArrays(int numChannels)
- {
- int memSize = (a.length >= b.length) ? a.length : b.length;
- in = new float[numChannels][memSize];
- out = new float[numChannels][memSize];
- }
-
- public final synchronized void uGenerate(float[] channels)
- {
- // make sure our coefficients are up-to-date
- if ( cutoff.getLastValue() != prevCutoff )
- {
- calcCoeff();
- prevCutoff = cutoff.getLastValue();
- }
-
- // make sure we have enough filter buffers
- if ( in == null || in.length < channels.length || (in[0].length < a.length && in[0].length < b.length) )
- {
- initArrays(channels.length);
- }
-
- // apply the filter to the sample value in each channel
- for(int i = 0; i < channels.length; i++)
- {
- System.arraycopy(in[i], 0, in[i], 1, in[i].length - 1);
- in[i][0] = audio.getLastValues()[i];
-
- float y = 0;
- for(int ci = 0; ci < a.length; ci++)
- {
- y += a[ci] * in[i][ci];
- }
- for(int ci = 0; ci < b.length; ci++)
- {
- y += b[ci] * out[i][ci];
- }
- System.arraycopy(out[i], 0, out[i], 1, out[i].length - 1);
- out[i][0] = y;
- channels[i] = y;
- }
- }
-
- public final synchronized void process(float[] signal)
- {
- setChannelCount( 1 );
- float[] tmp = new float[1];
- for (int i = 0; i < signal.length; i++)
- {
- audio.setLastValue( signal[i] );
- uGenerate(tmp);
- signal[i] = tmp[0];
- }
- }
-
- public final synchronized void process(float[] sigLeft, float[] sigRight)
- {
- setChannelCount( 2 );
- float[] tmp = new float[2];
- for (int i = 0; i < sigLeft.length; i++)
- {
- audio.getLastValues()[0] = sigLeft[i];
- audio.getLastValues()[1] = sigRight[i];
- uGenerate(tmp);
- sigLeft[i] = tmp[0];
- sigRight[i] = tmp[1];
- }
- }
-
- /**
- * Sets the cutoff/center frequency of the filter.
- * Doing this causes the coefficients to be recalculated.
- *
- * @param f
- * the new cutoff/center frequency (in Hz).
- */
- public final synchronized void setFreq(float f)
- {
- // no need to recalc if the cutoff isn't actually changing
- if ( validFreq(f) && f != cutoff.getLastValue() )
- {
- prevCutoff = f;
- cutoff.setLastValue(f);
- calcCoeff();
- }
- }
-
- /**
- * Returns true if the frequency is valid for this filter. Subclasses can
- * override this method if they want to limit center frequencies to certain
- * ranges to avoid becoming unstable. The default implementation simply
- * makes sure that f
is positive.
- *
- * @param f the frequency (in Hz) to validate
- * @return true if f
is a valid frequency for this filter
- */
- public boolean validFreq(float f)
- {
- return f > 0;
- }
-
- /**
- * Returns the cutoff frequency (in Hz).
- *
- * @return the current cutoff frequency (in Hz).
- */
- public final float frequency()
- {
- return cutoff.getLastValue();
- }
-
- /**
- * Calculates the coefficients of the filter using the current cutoff
- * frequency. To make your own IIRFilters, you must extend IIRFilter and
- * implement this function. The frequency is expressed as a fraction of the
- * sample rate. When filling the coefficient arrays, be aware that
- * b[0]
corresponds to the coefficient b1
.
- *
- */
- protected abstract void calcCoeff();
-
- /**
- * Prints the current values of the coefficients to the console.
- *
- */
- public final void printCoeff()
- {
- System.out.println("Filter coefficients: ");
- if ( a != null )
- {
- for (int i = 0; i < a.length; i++)
- {
- System.out.print(" A" + i + ": " + a[i]);
- }
- }
- System.out.println();
- if ( b != null )
- {
- for (int i = 0; i < b.length; i++)
- {
- System.out.print(" B" + (i + 1) + ": " + b[i]);
- }
- System.out.println();
- }
- }
-}
diff --git a/src/ddf/minim/effects/LowPassFS.java b/src/ddf/minim/effects/LowPassFS.java
deleted file mode 100644
index 9da1567..0000000
--- a/src/ddf/minim/effects/LowPassFS.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede freq
- * that will be used to filter audio recorded at sampleRate
.
- *
- * @param freq
- * the cutoff frequency
- * @param sampleRate
- * the sample rate of the audio that will be filtered
- */
- public LowPassFS(float freq, float sampleRate)
- {
- super(freq, sampleRate);
- }
-
- public boolean validFreq(float f)
- {
- if (f < 60)
- {
- Minim.error("This filter quickly becomes unstable below 60 Hz, setting frequency to 60 Hz.");
- return false;
- }
- return true;
- }
-
- protected void calcCoeff()
- {
- float freqFrac = frequency()/sampleRate();
- float x = (float) Math.exp(-14.445 * freqFrac);
- a = new float[] { (float) Math.pow(1 - x, 4) };
- b = new float[] { 4 * x, -6 * x * x, 4 * x * x * x, -x * x * x * x };
- }
-}
diff --git a/src/ddf/minim/effects/LowPassSP.java b/src/ddf/minim/effects/LowPassSP.java
deleted file mode 100644
index 944799d..0000000
--- a/src/ddf/minim/effects/LowPassSP.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede freq
- * that will be used to filter audio recorded at sampleRate
.
- *
- * @param freq
- * the cutoff frequency
- * @param sampleRate
- * the sample rate of the audio that will be filtered
- */
- public LowPassSP(float freq, float sampleRate)
- {
- super(freq, sampleRate);
- }
-
- protected void calcCoeff()
- {
- float fracFreq = frequency()/sampleRate();
- float x = (float)Math.exp(-2*Math.PI*fracFreq);
- a = new float[] { 1 - x };
- b = new float[] { x };
- }
-}
diff --git a/src/ddf/minim/effects/NotchFilter.java b/src/ddf/minim/effects/NotchFilter.java
deleted file mode 100644
index 5722c82..0000000
--- a/src/ddf/minim/effects/NotchFilter.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede
- *
- *
- * However, backwards compatibility must always be retained, so even when an
- * AudioInputStream implements FloatSampleInput, it must work the same way when
- * any of the byte-based read methods is called.
- * As an example, consider the following set-up:
- *
- *
- * So, what happens when a block of samples is read from pcmAIS2 ?
- *
- *
- * initFromByteArray
method of the float
- * buffer to initialize it with the 8 bit data.
- * convertToByteArray
method of the float buffer to fill the byte
- * buffer with the resulting samples.
- *
- *
- *
- *
- *
- * The lazy mechanism can save many array instantiation (and copy-) operations
- * for the sake of performance. All relevant methods exist in a second version
- * which allows explicitely to disable lazy deletion.
- * sampleCount
is reduced. A subsequent
- * increase of the sample count (which will occur frequently), will check that
- * and eventually reuse the existing array.
- * reset
functions to clear the memory and remove hidden
- * samples and channels.
- * getChannel(int)
may have a greater size than getSampleCount().
- * Consequently, be sure to never rely on the length field of the sample arrays.
- *
- * Having lazy disabled would require for each chunk that is processed
- *
- *
- *
- * By default, this class uses dithering for reduction of sample width (e.g.
- * original data was 16bit, target data is 8bit). As dithering may be needed in
- * other cases (especially when the float samples are processed using DSP
- * algorithms), or it is preferred to switch it off, dithering can be
- * explicitely switched on or off with the method setDitherMode(int).
- * For a discussion about dithering, see here
- * and
- * here.
- *
- * @author Florian Bomers
- */
-
-public class FloatSampleBuffer {
-
- /** Whether the functions without lazy parameter are lazy or not. */
- private static final boolean LAZY_DEFAULT = true;
-
- // one float array for each channel
- private Object[] channels = new Object[2];
- private int sampleCount = 0;
- private int channelCount = 0;
- private float sampleRate = 0;
- private int originalFormatType = 0;
-
- /**
- * Constant for setDitherMode: dithering will be enabled if sample size is
- * decreased
- */
- public static final int DITHER_MODE_AUTOMATIC = 0;
- /** Constant for setDitherMode: dithering will be done */
- public static final int DITHER_MODE_ON = 1;
- /** Constant for setDitherMode: dithering will not be done */
- public static final int DITHER_MODE_OFF = 2;
-
- private float ditherBits = FloatSampleTools.DEFAULT_DITHER_BITS;
-
- // e.g. the sample rate converter may want to force dithering
- private int ditherMode = DITHER_MODE_AUTOMATIC;
-
- // ////////////////////////////// initialization //////////////////////
-
- /**
- * Create an instance with initially no channels.
- */
- public FloatSampleBuffer() {
- this(0, 0, 1);
- }
-
- /**
- * Create an empty FloatSampleBuffer with the specified number of channels,
- * samples, and the specified sample rate.
- */
- public FloatSampleBuffer(int channelCount, int sampleCount, float sampleRate) {
- init(channelCount, sampleCount, sampleRate, LAZY_DEFAULT);
- }
-
- /**
- * Creates a new instance of FloatSampleBuffer and initializes it with audio
- * data given in the interleaved byte array buffer
.
- */
- public FloatSampleBuffer(byte[] buffer, int offset, int byteCount,
- AudioFormat format) {
- this(format.getChannels(), byteCount
- / (format.getSampleSizeInBits() / 8 * format.getChannels()),
- format.getSampleRate());
- initFromByteArray(buffer, offset, byteCount, format);
- }
-
- /**
- * Initialize this sample buffer to have the specified channels, sample
- * count, and sample rate. If LAZY_DEFAULT is true, as much as possible will
- * existing arrays be reused. Otherwise, any hidden channels are freed.
- *
- * @param newChannelCount
- * @param newSampleCount
- * @param newSampleRate
- * @throws IllegalArgumentException if newChannelCount or newSampleCount are
- * negative, or newSampleRate is not positive.
- */
- public void init(int newChannelCount, int newSampleCount,
- float newSampleRate) {
- init(newChannelCount, newSampleCount, newSampleRate, LAZY_DEFAULT);
- }
-
- /**
- * Initialize this sample buffer to have the specified channels, sample
- * count, and sample rate. If lazy is true, as much as possible will
- * existing arrays be reused. Otherwise, any hidden channels are freed.
- *
- * @param newChannelCount
- * @param newSampleCount
- * @param newSampleRate
- * @param lazy
- * @throws IllegalArgumentException if newChannelCount or newSampleCount are
- * negative, or newSampleRate is not positive.
- */
- public void init(int newChannelCount, int newSampleCount,
- float newSampleRate, boolean lazy) {
- if (newChannelCount < 0 || newSampleCount < 0 || newSampleRate <= 0.0f) {
- throw new IllegalArgumentException(
- "invalid parameters in initialization of FloatSampleBuffer.");
- }
- setSampleRate(newSampleRate);
- if (this.sampleCount != newSampleCount
- || this.channelCount != newChannelCount) {
- createChannels(newChannelCount, newSampleCount, lazy);
- }
- }
-
- /**
- * Verify that the specified AudioFormat can be converted to and from. If
- * the format is not supported, an IllegalArgumentException is thrown.
- *
- * @throws IllegalArgumentException if the format is not supported
- */
- public static void checkFormatSupported(AudioFormat format) {
- FloatSampleTools.getFormatType(format);
- }
-
- /**
- * Grow the channels array to allow at least channelCount elements. If
- * !lazy, then channels will be resized to be exactly channelCount elements.
- * The new elements will be null.
- *
- * @param newChannelCount
- * @param lazy
- */
- private final void grow(int newChannelCount, boolean lazy) {
- if (channels.length < newChannelCount || !lazy) {
- Object[] newChannels = new Object[newChannelCount];
- System.arraycopy(channels, 0, newChannels, 0,
- (channelCount < newChannelCount) ? channelCount
- : newChannelCount);
- this.channels = newChannels;
- }
- }
-
- private final void createChannels(int newChannelCount, int newSampleCount,
- boolean lazy) {
- // shortcut
- if (lazy && newChannelCount <= channelCount
- && newSampleCount <= this.sampleCount) {
- setSampleCountImpl(newSampleCount);
- setChannelCountImpl(newChannelCount);
- return;
- }
- setSampleCountImpl(newSampleCount);
- // grow the array, if necessary. Intentionally lazy here!
- grow(newChannelCount, true);
- // lazy delete of all channels. Intentionally lazy !
- setChannelCountImpl(0);
- for (int ch = 0; ch < newChannelCount; ch++) {
- insertChannel(ch, false, lazy);
- }
- // if not lazy, remove hidden channels
- grow(newChannelCount, lazy);
- }
-
- /**
- * Resets this buffer with the audio data specified in the arguments. This
- * FloatSampleBuffer's sample count will be set to
- * byteCount / format.getFrameSize()
. If LAZY_DEFAULT is
- * true, it will use lazy deletion.
- *
- * @throws IllegalArgumentException
- */
- public void initFromByteArray(byte[] buffer, int offset, int byteCount,
- AudioFormat format) {
- initFromByteArray(buffer, offset, byteCount, format, LAZY_DEFAULT);
- }
-
- /**
- * Resets this buffer with the audio data specified in the arguments. This
- * FloatSampleBuffer's sample count will be set to
- * byteCount / format.getFrameSize()
.
- *
- * @param lazy if true, then existing channels will be tried to be re-used
- * to minimize garbage collection.
- * @throws IllegalArgumentException
- */
- public void initFromByteArray(byte[] buffer, int offset, int byteCount,
- AudioFormat format, boolean lazy) {
- if (offset + byteCount > buffer.length) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.initFromByteArray: buffer too small.");
- }
-
- int thisSampleCount = byteCount / format.getFrameSize();
- init(format.getChannels(), thisSampleCount, format.getSampleRate(),
- lazy);
-
- // save format for automatic dithering mode
- originalFormatType = FloatSampleTools.getFormatType(format);
-
- FloatSampleTools.byte2float(buffer, offset, channels, 0, sampleCount,
- format);
- }
-
- /**
- * Resets this sample buffer with the data in source
.
- */
- public void initFromFloatSampleBuffer(FloatSampleBuffer source) {
- init(source.getChannelCount(), source.getSampleCount(),
- source.getSampleRate());
- for (int ch = 0; ch < getChannelCount(); ch++) {
- System.arraycopy(source.getChannel(ch), 0, getChannel(ch), 0,
- sampleCount);
- }
- }
-
- /**
- * Write the contents of the byte array to this buffer, overwriting existing
- * data. If the byte array has fewer channels than this float buffer, only
- * the first channels are written. Vice versa, if the byte buffer has more
- * channels than this float buffer, only the first channels of the byte
- * buffer are written to this buffer.
- * buffer
as an
- * interleaved byte array. buffer
must be large enough to
- * hold all data.
- *
- * @throws IllegalArgumentException when buffer is too small or
- * format
doesn't match
- * @return number of bytes written to buffer
- */
- public int convertToByteArray(byte[] buffer, int offset, AudioFormat format) {
- return convertToByteArray(0, getSampleCount(), buffer, offset, format);
- }
-
- // cache for performance
- private AudioFormat lastConvertToByteArrayFormat = null;
- private int lastConvertToByteArrayFormatCode = 0;
-
- /**
- * Writes this sample buffer's audio data to buffer
as an
- * interleaved byte array. buffer
must be large enough to
- * hold all data.
- *
- * @param readOffset the sample offset from where samples are read from this
- * FloatSampleBuffer
- * @param lenInSamples how many samples are converted
- * @param buffer the byte buffer written to
- * @param writeOffset the byte offset in buffer
- * @throws IllegalArgumentException when buffer is too small or
- * format
doesn't match
- * @return number of bytes written to buffer
- */
- public int convertToByteArray(int readOffset, int lenInSamples,
- byte[] buffer, int writeOffset, AudioFormat format) {
- int byteCount = format.getFrameSize() * lenInSamples;
- if (writeOffset + byteCount > buffer.length) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.convertToByteArray: buffer too small.");
- }
- if (format != lastConvertToByteArrayFormat) {
- if (format.getSampleRate() != getSampleRate()) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.convertToByteArray: different samplerates.");
- }
- if (format.getChannels() != getChannelCount()) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.convertToByteArray: different channel count.");
- }
- lastConvertToByteArrayFormat = format;
- lastConvertToByteArrayFormatCode = FloatSampleTools.getFormatType(format);
- }
- FloatSampleTools.float2byte(channels, readOffset, buffer, writeOffset,
- lenInSamples, lastConvertToByteArrayFormatCode,
- format.getChannels(), format.getFrameSize(),
- getConvertDitherBits(lastConvertToByteArrayFormatCode));
-
- return byteCount;
- }
-
- /**
- * Creates a new byte[] buffer, fills it with the audio data, and returns
- * it.
- *
- * @throws IllegalArgumentException when sample rate or channels do not
- * match
- * @see #convertToByteArray(byte[], int, AudioFormat)
- */
- public byte[] convertToByteArray(AudioFormat format) {
- // throws exception when sampleRate doesn't match
- // creates a new byte[] buffer and returns it
- byte[] res = new byte[getByteArrayBufferSize(format)];
- convertToByteArray(res, 0, format);
- return res;
- }
-
- // ////////////////////////////// actions /////////////////////////////////
-
- /**
- * Resizes this buffer.
- * keepOldSamples
is true, as much as possible samples are
- * retained. If the buffer is enlarged, silence is added at the end. If
- * keepOldSamples
is false, existing samples may get
- * discarded, the buffer may then contain random samples.
- */
- public void changeSampleCount(int newSampleCount, boolean keepOldSamples) {
- int oldSampleCount = getSampleCount();
-
- // shortcut: if we just make this buffer smaller, just set new
- // sampleCount
- if (oldSampleCount >= newSampleCount) {
- setSampleCountImpl(newSampleCount);
- return;
- }
- // shortcut for one or 2 channels
- if (channelCount == 1 || channelCount == 2) {
- float[] ch = getChannel(0);
- if (ch.length < newSampleCount) {
- float[] newCh = new float[newSampleCount];
- if (keepOldSamples && oldSampleCount > 0) {
- // copy old samples
- System.arraycopy(ch, 0, newCh, 0, oldSampleCount);
- }
- channels[0] = newCh;
- } else if (keepOldSamples) {
- // silence out excess samples (according to the specification)
- for (int i = oldSampleCount; i < newSampleCount; i++) {
- ch[i] = 0.0f;
- }
- }
- if (channelCount == 2) {
- ch = getChannel(1);
- if (ch.length < newSampleCount) {
- float[] newCh = new float[newSampleCount];
- if (keepOldSamples && oldSampleCount > 0) {
- // copy old samples
- System.arraycopy(ch, 0, newCh, 0, oldSampleCount);
- }
- channels[1] = newCh;
- } else if (keepOldSamples) {
- // silence out excess samples (according to the
- // specification)
- for (int i = oldSampleCount; i < newSampleCount; i++) {
- ch[i] = 0.0f;
- }
- }
- }
- setSampleCountImpl(newSampleCount);
- return;
- }
-
- Object[] oldChannels = null;
- if (keepOldSamples) {
- oldChannels = getAllChannels();
- }
- init(getChannelCount(), newSampleCount, getSampleRate());
- if (keepOldSamples) {
- // copy old channels and eventually silence out new samples
- int copyCount = newSampleCount < oldSampleCount ? newSampleCount
- : oldSampleCount;
- for (int ch = 0; ch < this.channelCount; ch++) {
- float[] oldSamples = (float[]) oldChannels[ch];
- float[] newSamples = (float[]) channels[ch];
- if (oldSamples != newSamples) {
- // if this sample array was not object of lazy delete
- System.arraycopy(oldSamples, 0, newSamples, 0, copyCount);
- }
- if (oldSampleCount < newSampleCount) {
- // silence out new samples
- for (int i = oldSampleCount; i < newSampleCount; i++) {
- newSamples[i] = 0.0f;
- }
- }
- }
- }
- }
-
- /**
- * Silence the entire audio buffer.
- */
- public void makeSilence() {
- makeSilence(0, getSampleCount());
- }
-
- /**
- * Silence the entire buffer in the specified range on all channels.
- */
- public void makeSilence(int offset, int count) {
- if (offset < 0 || (count + offset) > getSampleCount() || count < 0) {
- throw new IllegalArgumentException(
- "offset and/or sampleCount out of bounds");
- }
- // silence all channels
- int localChannelCount = getChannelCount();
- for (int ch = 0; ch < localChannelCount; ch++) {
- makeSilence(getChannel(ch), offset, count);
- }
- }
-
- /**
- * Silence the specified channel
- */
- public void makeSilence(int channel) {
- makeSilence(channel, 0, getSampleCount());
- }
-
- /**
- * Silence the specified channel in the specified range
- */
- public void makeSilence(int channel, int offset, int count) {
- if (offset < 0 || (count + offset) > getSampleCount() || count < 0) {
- throw new IllegalArgumentException(
- "offset and/or sampleCount out of bounds");
- }
- makeSilence(getChannel(channel), offset, count);
- }
-
- private void makeSilence(float[] samples, int offset, int count) {
- count += offset;
- for (int i = offset; i < count; i++) {
- samples[i] = 0.0f;
- }
- }
-
- /**
- * Fade the volume level of this buffer from the given start volume to the end volume.
- * E.g. to implement a fade in, use startVol=0 and endVol=1.
- *
- * @param startVol the start volume as a linear factor [0..1]
- * @param endVol the end volume as a linear factor [0..1]
- */
- public void linearFade(float startVol, float endVol) {
- linearFade(startVol, endVol, 0, getSampleCount());
- }
-
- /**
- * Fade the volume level of this buffer from the given start volume to the end volume.
- * The fade will start at the offset, and will have reached endVol after count samples.
- * E.g. to implement a fade in, use startVol=0 and endVol=1.
- *
- * @param startVol the start volume as a linear factor [0..1]
- * @param endVol the end volume as a linear factor [0..1]
- * @param offset the offset in this buffer where to start the fade (in samples)
- * @param count the number of samples to fade
- */
- public void linearFade(float startVol, float endVol, int offset, int count) {
- for (int channel = 0; channel < getChannelCount(); channel++) {
- linearFade(channel, startVol, endVol, offset, count);
- }
- }
-
- /**
- * Fade the volume level of the specified channel from the given start volume to
- * the end volume.
- * The fade will start at the offset, and will have reached endVol after count
- * samples.
- * E.g. to implement a fade in, use startVol=0 and endVol=1.
- *
- * @param channel the channel to do the fade
- * @param startVol the start volume as a linear factor [0..1]
- * @param endVol the end volume as a linear factor [0..1]
- * @param offset the offset in this buffer where to start the fade (in samples)
- * @param count the number of samples to fade
- */
- public void linearFade(int channel, float startVol, float endVol, int offset, int count) {
- if (count <= 0) return;
- float end = count+offset;
- float inc = (endVol - startVol) / count;
- float[] samples = getChannel(channel);
- float curr = startVol;
- for (int i = offset; i < end; i++) {
- samples[i] *= curr;
- curr += inc;
- }
- }
-
- /**
- * Add a channel to this buffer, e.g. adding a channel to a mono buffer will make it a stereo buffer.
- *
- * @param silent if true, the channel is explicitly silenced. Otherwise the new channel may contain random data.
- */
- public void addChannel(boolean silent) {
- // creates new, silent channel
- insertChannel(getChannelCount(), silent);
- }
-
- /**
- * Insert a (silent) channel at position index
. If
- * LAZY_DEFAULT is true, this is done lazily.
- */
- public void insertChannel(int index, boolean silent) {
- insertChannel(index, silent, LAZY_DEFAULT);
- }
-
- /**
- * Inserts a channel at position index
.
- * silent
is true, the new channel will be silent.
- * Otherwise it will contain random data.
- * lazy
is true, hidden channels which have at least
- * getSampleCount() elements will be examined for reusage as inserted
- * channel.
- * If lazy
is false, still hidden channels are reused, but it
- * is assured that the inserted channel has exactly getSampleCount()
- * elements, thus not wasting memory.
- */
- public void insertChannel(int index, boolean silent, boolean lazy) {
- // first grow the array of channels, if necessary. Intentionally lazy
- grow(this.channelCount + 1, true);
- int physSize = channels.length;
- int virtSize = this.channelCount;
- float[] newChannel = null;
- if (physSize > virtSize) {
- // there are hidden channels. Try to use one.
- for (int ch = virtSize; ch < physSize; ch++) {
- float[] thisChannel = (float[]) channels[ch];
- if (thisChannel != null
- && ((lazy && thisChannel.length >= getSampleCount()) || (!lazy && thisChannel.length == getSampleCount()))) {
- // we found a matching channel. Use it !
- newChannel = thisChannel;
- channels[ch] = null;
- break;
- }
- }
- }
- if (newChannel == null) {
- newChannel = new float[getSampleCount()];
- }
- // move channels after index
- for (int i = index; i < virtSize; i++) {
- channels[i + 1] = channels[i];
- }
- channels[index] = newChannel;
- setChannelCountImpl(this.channelCount + 1);
- if (silent) {
- makeSilence(index);
- }
- // if not lazy, remove old channels
- grow(this.channelCount, lazy);
- }
-
- /** performs a lazy remove of the channel */
- public void removeChannel(int channel) {
- removeChannel(channel, LAZY_DEFAULT);
- }
-
- /**
- * Removes a channel. If lazy is true, the channel is not physically
- * removed, but only hidden. These hidden channels are reused by subsequent
- * calls to addChannel or insertChannel.
- */
- public void removeChannel(int channel, boolean lazy) {
- float[] toBeDeleted = (float[]) channels[channel];
- // move all channels after it
- for (int i = channel; i < this.channelCount - 1; i++) {
- channels[i] = channels[i + 1];
- }
- if (!lazy) {
- grow(this.channelCount - 1, true);
- } else {
- // if not already, insert this channel at the end
- channels[this.channelCount - 1] = toBeDeleted;
- }
- setChannelCountImpl(channelCount - 1);
- }
-
- /**
- * Copy sourceChannel's audio data to targetChannel, identified by their
- * indices in the channel list. Both source and target channel have to
- * exist. targetChannel will be overwritten
- */
- public void copyChannel(int sourceChannel, int targetChannel) {
- float[] source = getChannel(sourceChannel);
- float[] target = getChannel(targetChannel);
- System.arraycopy(source, 0, target, 0, getSampleCount());
- }
-
- /**
- * Copy sampleCount samples from sourceChannel at position srcOffset to
- * targetChannel at position targetOffset. sourceChannel and targetChannel
- * are indices in the channel list. Both source and target channel have to
- * exist. targetChannel will be overwritten
- */
- public void copyChannel(int sourceChannel, int sourceOffset,
- int targetChannel, int targetOffset, int aSampleCount) {
- float[] source = getChannel(sourceChannel);
- float[] target = getChannel(targetChannel);
- System.arraycopy(source, sourceOffset, target, targetOffset,
- aSampleCount);
- }
-
- /**
- * Copies data inside all channel. When the 2 regions overlap, the behavior
- * is not specified.
- */
- public void copy(int sourceIndex, int destIndex, int length) {
- int count = getChannelCount();
- for (int i = 0; i < count; i++) {
- copy(i, sourceIndex, destIndex, length);
- }
- }
-
- /**
- * Copies data inside a channel. When the 2 regions overlap, the behavior is
- * not specified.
- */
- public void copy(int channel, int sourceIndex, int destIndex, int length) {
- float[] data = getChannel(channel);
- int bufferCount = getSampleCount();
- if (sourceIndex + length > bufferCount
- || destIndex + length > bufferCount || sourceIndex < 0
- || destIndex < 0 || length < 0) {
- throw new IndexOutOfBoundsException("parameters exceed buffer size");
- }
- System.arraycopy(data, sourceIndex, data, destIndex, length);
- }
-
- /**
- * Mix up of 1 channel to n channels.
- * It copies the first channel to all newly created channels.
- *
- * @param targetChannelCount the number of channels that this sample buffer
- * will have after expanding. NOT the number of channels to add !
- * @exception IllegalArgumentException if this buffer does not have one
- * channel before calling this method.
- */
- public void expandChannel(int targetChannelCount) {
- // even more sanity...
- if (getChannelCount() != 1) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer: can only expand channels for mono signals.");
- }
- for (int ch = 1; ch < targetChannelCount; ch++) {
- addChannel(false);
- copyChannel(0, ch);
- }
- }
-
- /**
- * Mix down of n channels to one channel.
- * It uses a simple mixdown: all other channels are added to first channel.
- * The volume is NOT lowered ! Be aware, this might cause clipping when
- * converting back to integer samples.
- */
- public void mixDownChannels() {
- float[] firstChannel = getChannel(0);
- int localSampleCount = getSampleCount();
- for (int ch = getChannelCount() - 1; ch > 0; ch--) {
- float[] thisChannel = getChannel(ch);
- for (int i = 0; i < localSampleCount; i++) {
- firstChannel[i] += thisChannel[i];
- }
- removeChannel(ch);
- }
- }
-
- /**
- * Mixes source
to this buffer by adding all samples. At
- * most, source
's number of samples, number of channels are
- * mixed. None of the sample count, channel count or sample rate of either
- * buffer are changed. In particular, the caller needs to assure that the
- * sample rate of the buffers match.
- *
- * @param source the buffer to be mixed to this buffer
- */
- public void mix(FloatSampleBuffer source) {
- int count = getSampleCount();
- if (count > source.getSampleCount()) {
- count = source.getSampleCount();
- }
- int localChannelCount = getChannelCount();
- if (localChannelCount > source.getChannelCount()) {
- localChannelCount = source.getChannelCount();
- }
- for (int ch = 0; ch < localChannelCount; ch++) {
- float[] thisChannel = getChannel(ch);
- float[] otherChannel = source.getChannel(ch);
- for (int i = 0; i < count; i++) {
- thisChannel[i] += otherChannel[i];
- }
- }
- }
-
- /**
- * Mixes source
samples to this buffer by adding the sample values.
- * None of the sample count, channel count or sample rate of either
- * buffer are changed. In particular, the caller needs to assure that the
- * sample rate of the buffers match.
- * dest
's number of samples, number of
- * channels are copied. None of the sample count, channel count or sample
- * rate of either buffer are changed. In particular, the caller needs to
- * assure that the sample rate of the buffers match.
- *
- * @param dest the buffer to write to
- * @param destOffset the position in dest
where to start
- * writing the samples of this buffer
- * @param count the number of samples to be copied
- * @return the number of samples copied
- */
- public int copyTo(FloatSampleBuffer dest, int destOffset, int count) {
- return copyTo(0, dest, destOffset, count);
- }
-
- /**
- * Copies the specified part of this buffer to the destination buffer.
- * At most, dest
's number of samples, number of
- * channels are copied. None of the sample count, channel count or sample
- * rate of either buffer are changed. In particular, the caller needs to
- * assure that the sample rate of the buffers match.
- *
- * @param srcOffset the start position in this buffer, where to start reading samples
- * @param dest the buffer to write to
- * @param destOffset the position in dest
where to start
- * writing the samples
- * @param count the number of samples to be copied
- * @return the number of samples copied
- */
- public int copyTo(int srcOffset, FloatSampleBuffer dest, int destOffset, int count) {
- if (srcOffset + count > getSampleCount()) {
- count = getSampleCount() - srcOffset;
- }
- if (count + destOffset > dest.getSampleCount()) {
- count = dest.getSampleCount() - destOffset;
- }
- int localChannelCount = getChannelCount();
- if (localChannelCount > dest.getChannelCount()) {
- localChannelCount = dest.getChannelCount();
- }
- for (int ch = 0; ch < localChannelCount; ch++) {
- System.arraycopy(getChannel(ch), srcOffset, dest.getChannel(ch),
- destOffset, count);
- }
- return count;
- }
-
- /**
- * Initializes audio data from the provided byte array. The float samples
- * are written at destOffset
. This FloatSampleBuffer must be
- * big enough to accomodate the samples.
- * srcBuffer
is read from index srcOffset
to
- * (srcOffset + (lengthInSamples * format.getFrameSize()))
.
- *
- * @param input the input buffer in interleaved audio data
- * @param inByteOffset the offset in input
- * @param format input buffer's audio format
- * @param floatOffset the offset where to write the float samples
- * @param frameCount number of samples to write to this sample buffer
- */
- public void setSamplesFromBytes(byte[] input, int inByteOffset,
- AudioFormat format, int floatOffset, int frameCount) {
- if (floatOffset < 0 || frameCount < 0 || inByteOffset < 0) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.setSamplesFromBytes: negative inByteOffset, floatOffset, or frameCount");
- }
- if (inByteOffset + (frameCount * format.getFrameSize()) > input.length) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.setSamplesFromBytes: input buffer too small.");
- }
- if (floatOffset + frameCount > getSampleCount()) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer.setSamplesFromBytes: frameCount too large");
- }
- FloatSampleTools.byte2float(input, inByteOffset, channels, floatOffset,
- frameCount, format, false);
- }
-
- // ////////////////////////////// properties /////////////////////////////
-
- public int getChannelCount() {
- return channelCount;
- }
-
- public int getSampleCount() {
- return sampleCount;
- }
-
- public float getSampleRate() {
- return sampleRate;
- }
-
- /**
- * internal setter for channel count, just change the variable. From
- * outside, use addChannel, insertChannel, removeChannel
- */
- protected void setChannelCountImpl(int newChannelCount) {
- if (channelCount != newChannelCount) {
- channelCount = newChannelCount;
- // remove cache
- this.lastConvertToByteArrayFormat = null;
- }
- }
-
- /**
- * internal setter for sample count, just change the variable. From outside,
- * use changeSampleCount
- */
- protected void setSampleCountImpl(int newSampleCount) {
- if (sampleCount != newSampleCount) {
- sampleCount = newSampleCount;
- }
- }
-
- /**
- * Alias for changeSampleCount
- *
- * @param newSampleCount the new number of samples for this buffer
- * @param keepOldSamples if true, the new buffer will keep the current
- * samples in the arrays
- * @see #changeSampleCount(int, boolean)
- */
- public void setSampleCount(int newSampleCount, boolean keepOldSamples) {
- changeSampleCount(newSampleCount, keepOldSamples);
- }
-
- /**
- * Sets the sample rate of this buffer. NOTE: no conversion is done. The
- * samples are only re-interpreted.
- */
- public void setSampleRate(float sampleRate) {
- if (sampleRate <= 0) {
- throw new IllegalArgumentException(
- "Invalid samplerate for FloatSampleBuffer.");
- }
- if (this.sampleRate != sampleRate) {
- this.sampleRate = sampleRate;
- // remove cache
- lastConvertToByteArrayFormat = null;
- }
- }
-
- /**
- * Get the actual audio data of one channel.
- * Modifying this array will modify the audio samples of this
- * FloatSampleBuffer.
- * NOTE: the returned array may be larger than sampleCount. So in any case,
- * sampleCount is to be respected.
- * @throws IllegalArgumentException if channel is out of bounds
- */
- public float[] getChannel(int channel) {
- if (channel >= this.channelCount) {
- throw new IllegalArgumentException(
- "FloatSampleBuffer: invalid channel number.");
- }
- return (float[]) channels[channel];
- }
-
- /**
- * Low-level method to directly set the array for the given channel.
- * Normally, you do not need this method, as you can conveniently
- * resize the array with changeSampleCount()
. This method
- * may be useful for advanced optimization techniques.
- * @param channel the channel to replace
- * @param data the audio sample array
- * @return the audio data array that was replaced
- * @throws IllegalArgumentException if channel is out of bounds or data is null
- * @see #changeSampleCount(int, boolean)
- */
- public float[] setRawChannel(int channel, float[] data) {
- if (data == null) {
- throw new IllegalArgumentException(
- "cannot set a channel to a null array");
- }
- float[] ret = getChannel(channel);
- channels[channel] = data;
- return ret;
- }
-
- /**
- * Get an array of all channels.
- * @return all channels as array
- */
- public Object[] getAllChannels() {
- Object[] res = new Object[getChannelCount()];
- for (int ch = 0; ch < getChannelCount(); ch++) {
- res[ch] = getChannel(ch);
- }
- return res;
- }
-
- /**
- * Set the number of bits for dithering. Typically, a value between 0.2 and
- * 0.9 gives best results.
- *
- *
- */
- public void setDitherMode(int mode) {
- if (mode != DITHER_MODE_AUTOMATIC && mode != DITHER_MODE_ON
- && mode != DITHER_MODE_OFF) {
- throw new IllegalArgumentException("Illegal DitherMode");
- }
- this.ditherMode = mode;
- }
-
- public int getDitherMode() {
- return ditherMode;
- }
-
- /**
- * @return the ditherBits parameter for the float2byte functions
- */
- protected float getConvertDitherBits(int newFormatType) {
- // let's see whether dithering is necessary
- boolean doDither = false;
- switch (ditherMode) {
- case DITHER_MODE_AUTOMATIC:
- doDither = (originalFormatType & FloatSampleTools.F_SAMPLE_WIDTH_MASK) > (newFormatType & FloatSampleTools.F_SAMPLE_WIDTH_MASK);
- break;
- case DITHER_MODE_ON:
- doDither = true;
- break;
- case DITHER_MODE_OFF:
- doDither = false;
- break;
- }
- return doDither ? ditherBits : 0.0f;
- }
-}
diff --git a/src/ddf/minim/javasound/FloatSampleTools.java b/src/ddf/minim/javasound/FloatSampleTools.java
deleted file mode 100644
index 0b875a6..0000000
--- a/src/ddf/minim/javasound/FloatSampleTools.java
+++ /dev/null
@@ -1,880 +0,0 @@
-/*
- * FloatSampleTools.java
- *
- * This file is part of Tritonus: http://www.tritonus.org/
- */
-
-/*
- * Copyright (c) 2000-2006 by Florian Bomers
- *
- * 8-bit data can be unsigned or signed. All other data is only supported in
- * signed encoding.
- *
- * @see FloatSampleBuffer
- * @author Florian Bomers
- */
-
-public class FloatSampleTools {
-
- /** default number of bits to be dithered: 0.7f */
- public static final float DEFAULT_DITHER_BITS = 0.7f;
-
- private static Random random = null;
-
- // sample width (must be in order !)
- static final int F_8 = 1;
- static final int F_16 = 2;
- static final int F_24_3 = 3;
- static final int F_24_4 = 4;
- static final int F_32 = 5;
- static final int F_SAMPLE_WIDTH_MASK = F_8 | F_16 | F_24_3 | F_24_4 | F_32;
-
- // format bit-flags
- static final int F_SIGNED = 8;
- static final int F_BIGENDIAN = 16;
-
- // supported formats
- static final int CT_8S = F_8 | F_SIGNED;
- static final int CT_8U = F_8;
- static final int CT_16SB = F_16 | F_SIGNED | F_BIGENDIAN;
- static final int CT_16SL = F_16 | F_SIGNED;
- static final int CT_24_3SB = F_24_3 | F_SIGNED | F_BIGENDIAN;
- static final int CT_24_3SL = F_24_3 | F_SIGNED;
- static final int CT_24_4SB = F_24_4 | F_SIGNED | F_BIGENDIAN;
- static final int CT_24_4SL = F_24_4 | F_SIGNED;
- static final int CT_32SB = F_32 | F_SIGNED | F_BIGENDIAN;
- static final int CT_32SL = F_32 | F_SIGNED;
-
- // ///////////////////////// initialization ////////////////////// //
-
- /** prevent instanciation */
- private FloatSampleTools() {
- }
-
- // /////////////// FORMAT / FORMAT TYPE /////////////////////////// //
-
- /**
- * only allow "packed" samples -- currently no support for 18, 20 bits --
- * except 24 bits stored in 4 bytes.
- *
- * @throws IllegalArgumentException
- */
- static void checkSupportedSampleSize(int ssib, int channels, int frameSize) {
- if (ssib == 24 && frameSize == 4 * channels) {
- // 24 bits stored in 4 bytes is OK (24_4)
- return;
- }
- if ((ssib * channels) != frameSize * 8) {
- throw new IllegalArgumentException("unsupported sample size: "
- + ssib + " bits stored in " + (frameSize / channels)
- + " bytes.");
- }
- }
-
- /**
- * Get the formatType code from the given format.
- *
- * @throws IllegalArgumentException
- */
- static int getFormatType(AudioFormat format) {
- boolean signed = format.getEncoding().equals(
- AudioFormat.Encoding.PCM_SIGNED);
- if (!signed
- && !format.getEncoding().equals(
- AudioFormat.Encoding.PCM_UNSIGNED)) {
- throw new IllegalArgumentException(
- "unsupported encoding: only PCM encoding supported.");
- }
- if (!signed && format.getSampleSizeInBits() != 8) {
- throw new IllegalArgumentException(
- "unsupported encoding: only 8-bit can be unsigned");
- }
- checkSupportedSampleSize(format.getSampleSizeInBits(),
- format.getChannels(), format.getFrameSize());
-
- int formatType = getFormatType(format.getSampleSizeInBits(),
- format.getFrameSize() / format.getChannels(), signed,
- format.isBigEndian());
- return formatType;
- }
-
- /**
- * @throws IllegalArgumentException
- */
- static int getFormatType(int ssib, int bytesPerSample, boolean signed,
- boolean bigEndian) {
- int res = 0;
- if (ssib == 24 || (bytesPerSample == ssib / 8)) {
- if (ssib == 8) {
- res = F_8;
- } else if (ssib == 16) {
- res = F_16;
- } else if (ssib == 24) {
- if (bytesPerSample == 3) {
- res = F_24_3;
- } else if (bytesPerSample == 4) {
- res = F_24_4;
- }
- } else if (ssib == 32) {
- res = F_32;
- }
- }
- if (res == 0) {
- throw new IllegalArgumentException(
- "ConversionTool: unsupported sample size of " + ssib
- + " bits per sample in " + bytesPerSample
- + " bytes.");
- }
- if (!signed && bytesPerSample > 1) {
- throw new IllegalArgumentException(
- "ConversionTool: unsigned samples larger than "
- + "8 bit are not supported");
- }
- if (signed) {
- res |= F_SIGNED;
- }
- if (bigEndian && (ssib != 8)) {
- res |= F_BIGENDIAN;
- }
- return res;
- }
-
- static int getSampleSize(int formatType) {
- switch (formatType & F_SAMPLE_WIDTH_MASK) {
- case F_8:
- return 1;
- case F_16:
- return 2;
- case F_24_3:
- return 3;
- case F_24_4:
- return 4;
- case F_32:
- return 4;
- }
- return 0;
- }
-
- /**
- * Return a string representation of this format
- */
- static String formatType2Str(int formatType) {
- String res = "" + formatType + ": ";
- switch (formatType & F_SAMPLE_WIDTH_MASK) {
- case F_8:
- res += "8bit";
- break;
- case F_16:
- res += "16bit";
- break;
- case F_24_3:
- res += "24_3bit";
- break;
- case F_24_4:
- res += "24_4bit";
- break;
- case F_32:
- res += "32bit";
- break;
- }
- res += ((formatType & F_SIGNED) == F_SIGNED) ? " signed" : " unsigned";
- if ((formatType & F_SAMPLE_WIDTH_MASK) != F_8) {
- res += ((formatType & F_BIGENDIAN) == F_BIGENDIAN) ? " big endian"
- : " little endian";
- }
- return res;
- }
-
- // /////////////////// BYTE 2 FLOAT /////////////////////////////////// //
-
- private static final float twoPower7 = 128.0f;
- private static final float twoPower15 = 32768.0f;
- private static final float twoPower23 = 8388608.0f;
- private static final float twoPower31 = 2147483648.0f;
-
- private static final float invTwoPower7 = 1 / twoPower7;
- private static final float invTwoPower15 = 1 / twoPower15;
- private static final float invTwoPower23 = 1 / twoPower23;
- private static final float invTwoPower31 = 1 / twoPower31;
-
- /**
- * @see #byte2float(byte[] input, int inByteOffset, Object[] output, int
- * outOffset, int frameCount, AudioFormat format, boolean
- * allowAddChannel)
- */
- public static void byte2float(byte[] input, int inByteOffset,
- Listformat
.
- * input[inByteOffset]
to
- * input[inByteOffset + (frameCount * format.getFrameSize()) - 1]
- * to floats from output(n)[outOffset]
to
- * output(n)[outOffset + frameCount - 1]
- *
- * @param input the audio data in an byte array
- * @param inByteOffset index in input where to start the conversion
- * @param output list of float[] arrays which receive the converted audio
- * data. if the list does not contain enough elements, or
- * individual float arrays are not large enough, they are
- * created.
- * @param outOffset the start offset in output
- * @param frameCount number of frames to be converted
- * @param format the input format. Only packed PCM is allowed
- * @param allowAddChannel if true, channels may be added to
- * output
to match the number of input channels,
- * otherwise, only the first output.size() channels of input data
- * are converted.
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- * @see #byte2floatInterleaved(byte[],int,float[],int,int,AudioFormat)
- */
- public static void byte2float(byte[] input, int inByteOffset,
- Listformat
.
- * input[inByteOffset]
to
- * input[inByteOffset + (frameCount * format.getFrameSize()) - 1]
- * to floats from output(n)[outOffset]
to
- * output(n)[outOffset + frameCount - 1]
- *
- * @param channel the channel number to extract from the input audio data
- * @param input the audio data in an byte array
- * @param inByteOffset index in input where to start the conversion
- * @param output the of float array which receives the converted audio data.
- * @param outOffset the start offset in output
- * @param frameCount number of frames to be converted
- * @param format the input format. Only packed PCM is allowed
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- */
- public static void byte2float(int channel, byte[] input, int inByteOffset,
- float[] output, int outOffset, int frameCount, AudioFormat format) {
-
- if (channel >= format.getChannels()) {
- throw new IllegalArgumentException("channel out of bounds");
- }
- if (output.length < frameCount + outOffset) {
- throw new IllegalArgumentException("data is too small");
- }
-
- // "select" the channel
- inByteOffset += format.getFrameSize() / format.getChannels() * channel;
- byte2floatGeneric(input, inByteOffset, format.getFrameSize(), output,
- outOffset, frameCount, format);
- }
-
- /**
- * Conversion function to convert an interleaved byte array to an
- * interleaved float array. The float array will contain normalized samples
- * in the range [-1.0f, +1.0f]. The input array provides bytes in the format
- * specified in format
.
- * input[inByteOffset]
to
- * input[inByteOffset + (frameCount * format.getFrameSize()) - 1]
- * to floats from output[outOffset]
to
- * output[outOffset + (frameCount * format.getChannels()) - 1]
- *
- * @param input the audio data in an byte array
- * @param inByteOffset index in input where to start the conversion
- * @param output the float array that receives the converted audio data
- * @param outOffset the start offset in output
- * @param frameCount number of frames to be converted
- * @param format the input format. Only packed PCM is allowed
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- * @see #byte2float(byte[],int,List,int,int,AudioFormat)
- */
- public static void byte2floatInterleaved(byte[] input, int inByteOffset,
- float[] output, int outOffset, int frameCount, AudioFormat format) {
-
- byte2floatGeneric(input, inByteOffset, format.getFrameSize()
- / format.getChannels(), output, outOffset, frameCount
- * format.getChannels(), format);
- }
-
- /**
- * Generic conversion function to convert a byte array to a float array.
- * input[inByteOffset]
to
- * input[inByteOffset + (sampleCount * (inByteStep - 1)]
to
- * samples from output[outOffset]
to
- * output[outOffset+sampleCount-1]
.
- * format
's channel count is ignored.
- * inByteOffset
to
- * format.getFrameSize()
.
- * For converting interleaved input data, multiply sampleCount
- * by the number of channels and set inByteStep to
- * format.getFrameSize() / format.getChannels()
.
- *
- * @param sampleCount number of samples to be written to output
- * @param inByteStep how many bytes advance for each output sample in
- * output
.
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- * @see #byte2floatInterleaved(byte[],int,float[],int,int,AudioFormat)
- * @see #byte2float(byte[],int,List,int,int,AudioFormat)
- */
- static void byte2floatGeneric(byte[] input, int inByteOffset,
- int inByteStep, float[] output, int outOffset, int sampleCount,
- AudioFormat format) {
- int formatType = getFormatType(format);
-
- byte2floatGeneric(input, inByteOffset, inByteStep, output, outOffset,
- sampleCount, formatType);
- }
-
- /**
- * Central conversion function from a byte array to a normalized float
- * array. In order to accomodate interleaved and non-interleaved samples,
- * this method takes inByteStep as parameter which can be used to flexibly
- * convert the data.
- *
- * mono->mono: inByteStep=format.getFrameSize()
- * interleaved_stereo->interleaved_stereo:
- * inByteStep=format.getFrameSize()/2, sampleCount*2
- * stereo->2 mono arrays:
- * ---inByteOffset=0, outOffset=0, inByteStep=format.getFrameSize()
- * ---inByteOffset=format.getFrameSize()/2, outOffset=1,
- * inByteStep=format.getFrameSize()
- */
- static void byte2floatGeneric(byte[] input, int inByteOffset,
- int inByteStep, float[] output, int outOffset, int sampleCount,
- int formatType) {
- // if (TDebug.TraceAudioConverter) {
- // TDebug.out("FloatSampleTools.byte2floatGeneric, formatType="
- // +formatType2Str(formatType));
- // }
- int endCount = outOffset + sampleCount;
- int inIndex = inByteOffset;
- for (int outIndex = outOffset; outIndex < endCount; outIndex++, inIndex += inByteStep) {
- // do conversion
- switch (formatType) {
- case CT_8S:
- output[outIndex] = input[inIndex] * invTwoPower7;
- break;
- case CT_8U:
- output[outIndex] = ((input[inIndex] & 0xFF) - 128) * invTwoPower7;
- break;
- case CT_16SB:
- output[outIndex] = ((input[inIndex] << 8)
- | (input[inIndex + 1] & 0xFF))
- * invTwoPower15;
- break;
- case CT_16SL:
- output[outIndex] = ((input[inIndex + 1] << 8)
- | (input[inIndex] & 0xFF))
- * invTwoPower15;
- break;
- case CT_24_3SB:
- output[outIndex] = ((input[inIndex] << 16)
- | ((input[inIndex + 1] & 0xFF) << 8)
- | (input[inIndex + 2] & 0xFF))
- * invTwoPower23;
- break;
- case CT_24_3SL:
- output[outIndex] = ((input[inIndex + 2] << 16)
- | ((input[inIndex + 1] & 0xFF) << 8)
- | (input[inIndex] & 0xFF))
- * invTwoPower23;
- break;
- case CT_24_4SB:
- output[outIndex] = ((input[inIndex + 1] << 16)
- | ((input[inIndex + 2] & 0xFF) << 8)
- | (input[inIndex + 3] & 0xFF))
- * invTwoPower23;
- break;
- case CT_24_4SL:
- // TODO: verify the indexes
- output[outIndex] = ((input[inIndex + 3] << 16)
- | ((input[inIndex + 2] & 0xFF) << 8)
- | (input[inIndex + 1] & 0xFF))
- * invTwoPower23;
- break;
- case CT_32SB:
- output[outIndex] = ((input[inIndex] << 24)
- | ((input[inIndex + 1] & 0xFF) << 16)
- | ((input[inIndex + 2] & 0xFF) << 8)
- | (input[inIndex + 3] & 0xFF))
- * invTwoPower31;
- break;
- case CT_32SL:
- output[outIndex] = ((input[inIndex + 3] << 24)
- | ((input[inIndex + 2] & 0xFF) << 16)
- | ((input[inIndex + 1] & 0xFF) << 8)
- | (input[inIndex] & 0xFF))
- * invTwoPower31;
- break;
- default:
- throw new IllegalArgumentException("unsupported format="
- + formatType2Str(formatType));
- }
- }
- }
-
- // /////////////////// FLOAT 2 BYTE /////////////////////////////////// //
-
- private static byte quantize8(float sample, float ditherBits) {
- if (ditherBits != 0) {
- sample += random.nextFloat() * ditherBits;
- }
- if (sample >= 127.0f) {
- return (byte) 127;
- } else if (sample <= -128.0f) {
- return (byte) -128;
- } else {
- return (byte) (sample < 0 ? (sample - 0.5f) : (sample + 0.5f));
- }
- }
-
- private static int quantize16(float sample, float ditherBits) {
- if (ditherBits != 0) {
- sample += random.nextFloat() * ditherBits;
- }
- if (sample >= 32767.0f) {
- return 32767;
- } else if (sample <= -32768.0f) {
- return -32768;
- } else {
- return (int) (sample < 0 ? (sample - 0.5f) : (sample + 0.5f));
- }
- }
-
- private static int quantize24(float sample, float ditherBits) {
- if (ditherBits != 0) {
- sample += random.nextFloat() * ditherBits;
- }
- if (sample >= 8388607.0f) {
- return 8388607;
- } else if (sample <= -8388608.0f) {
- return -8388608;
- } else {
- return (int) (sample < 0 ? (sample - 0.5f) : (sample + 0.5f));
- }
- }
-
- private static int quantize32(float sample, float ditherBits) {
- if (ditherBits != 0) {
- sample += random.nextFloat() * ditherBits;
- }
- if (sample >= 2147483647.0f) {
- return 2147483647;
- } else if (sample <= -2147483648.0f) {
- return -2147483648;
- } else {
- return (int) (sample < 0 ? (sample - 0.5f) : (sample + 0.5f));
- }
- }
-
- /**
- * Conversion function to convert a non-interleaved float audio data to an
- * interleaved byte array. The float arrays contains normalized samples in
- * the range [-1.0f, +1.0f]. The output array will receive bytes in the
- * format specified in format
. Exactly
- * format.getChannels()
channels are converted regardless of
- * the number of elements in input
. If input
- * does not provide enough channels, an IllegalArgumentException
- * is thrown.
- * input(n)[inOffset]
to
- * input(n)[inOffset + frameCount - 1]
to byte values from
- * output[outByteOffset]
to
- * output[outByteOffset + (frameCount * format.getFrameSize()) - 1]
- * output
- * @param frameCount number of frames to be converted.
- * @param format the output format. Only packed PCM is allowed
- * @param ditherBits if 0, do not dither. Otherwise the number of bits to be
- * dithered
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- * @see #DEFAULT_DITHER_BITS
- * @see #float2byteInterleaved(float[],int,byte[],int,int,AudioFormat,float)
- */
- public static void float2byte(Listformat
.
- * input[inOffset]
to
- * input[inOffset + (frameCount * format.getChannels()) - 1]
- * to byte values from output[outByteOffset]
to
- * output[outByteOffset + (frameCount * format.getFrameSize()) - 1]
- * output
- * @param frameCount number of frames to be converted.
- * @param format the output format. Only packed PCM is allowed
- * @param ditherBits if 0, do not dither. Otherwise the number of bits to be
- * dithered
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- * @see #DEFAULT_DITHER_BITS
- * @see #float2byte(List,int,byte[],int,int,AudioFormat,float)
- */
- public static void float2byteInterleaved(float[] input, int inOffset,
- byte[] output, int outByteOffset, int frameCount,
- AudioFormat format, float ditherBits) {
- float2byteGeneric(input, inOffset, output, outByteOffset,
- format.getFrameSize() / format.getChannels(), frameCount
- * format.getChannels(), format, ditherBits);
- }
-
- /**
- * Generic conversion function to convert a float array to a byte array.
- * input[inOffset]
to
- * input[inOffset+sampleCount-1]
to byte values from
- * output[outByteOffset]
to
- * output[outByteOffset + (sampleCount * (outByteStep - 1)]
.
- * format
's channel count is ignored.
- * outByteOffset
to
- * format.getFrameSize()
.
- * For converting interleaved input data, multiply sampleCount
- * by the number of channels and set outByteStep to
- * format.getFrameSize() / format.getChannels()
.
- *
- * @param sampleCount number of samples in input to be converted.
- * @param outByteStep how many bytes advance for each input sample in
- * input
.
- * @throws IllegalArgumentException if one of the parameters is out of
- * bounds
- * @see #float2byteInterleaved(float[],int,byte[],int,int,AudioFormat,float)
- * @see #float2byte(List,int,byte[],int,int,AudioFormat,float)
- */
- static void float2byteGeneric(float[] input, int inOffset, byte[] output,
- int outByteOffset, int outByteStep, int sampleCount,
- AudioFormat format, float ditherBits) {
- int formatType = getFormatType(format);
-
- float2byteGeneric(input, inOffset, output, outByteOffset, outByteStep,
- sampleCount, formatType, ditherBits);
- }
-
- /**
- * Central conversion function from normalized float array to a byte array.
- * In order to accomodate interleaved and non-interleaved samples, this
- * method takes outByteStep as parameter which can be used to flexibly
- * convert the data.
- *
- * mono->mono: outByteStep=format.getFrameSize()
- * interleaved stereo->interleaved stereo:
- * outByteStep=format.getFrameSize()/2, sampleCount*2
- * 2 mono arrays->stereo:
- * ---inOffset=0, outByteOffset=0, outByteStep=format.getFrameSize()
- * ---inOffset=1, outByteOffset=format.getFrameSize()/2,
- * outByteStep=format.getFrameSize()
- */
- static void float2byteGeneric(float[] input, int inOffset, byte[] output,
- int outByteOffset, int outByteStep, int sampleCount,
- int formatType, float ditherBits) {
- // if (TDebug.TraceAudioConverter) {
- // TDebug.out("FloatSampleBuffer.float2byteGeneric, formatType="
- // +"formatType2Str(formatType));
- // }
-
- if (inOffset < 0 || inOffset + sampleCount > input.length
- || sampleCount < 0) {
- throw new IllegalArgumentException("invalid input index: "
- + "input.length=" + input.length + " inOffset=" + inOffset
- + " sampleCount=" + sampleCount);
- }
- if (outByteOffset < 0
- || outByteOffset + (sampleCount * outByteStep) >= (output.length + outByteStep)
- || outByteStep < getSampleSize(formatType)) {
- throw new IllegalArgumentException("invalid output index: "
- + "output.length=" + output.length + " outByteOffset="
- + outByteOffset + " outByteStep=" + outByteStep
- + " sampleCount=" + sampleCount + " format="
- + formatType2Str(formatType));
- }
-
- if (ditherBits != 0.0f && random == null) {
- // create the random number generator for dithering
- random = new Random();
- }
- int endSample = inOffset + sampleCount;
- int iSample;
- int outIndex = outByteOffset;
- for (int inIndex = inOffset; inIndex < endSample; inIndex++, outIndex += outByteStep) {
- // do conversion
- switch (formatType) {
- case CT_8S:
- output[outIndex] = quantize8(input[inIndex] * twoPower7,
- ditherBits);
- break;
- case CT_8U:
- output[outIndex] = (byte) (quantize8(
- (input[inIndex] * twoPower7), ditherBits) + 128);
- break;
- case CT_16SB:
- iSample = quantize16(input[inIndex] * twoPower15, ditherBits);
- output[outIndex] = (byte) (iSample >> 8);
- output[outIndex + 1] = (byte) (iSample & 0xFF);
- break;
- case CT_16SL:
- iSample = quantize16(input[inIndex] * twoPower15, ditherBits);
- output[outIndex + 1] = (byte) (iSample >> 8);
- output[outIndex] = (byte) (iSample & 0xFF);
- break;
- case CT_24_3SB:
- iSample = quantize24(input[inIndex] * twoPower23, ditherBits);
- output[outIndex] = (byte) (iSample >> 16);
- output[outIndex + 1] = (byte) ((iSample >>> 8) & 0xFF);
- output[outIndex + 2] = (byte) (iSample & 0xFF);
- break;
- case CT_24_3SL:
- iSample = quantize24(input[inIndex] * twoPower23, ditherBits);
- output[outIndex + 2] = (byte) (iSample >> 16);
- output[outIndex + 1] = (byte) ((iSample >>> 8) & 0xFF);
- output[outIndex] = (byte) (iSample & 0xFF);
- break;
- case CT_24_4SB:
- // TODO: verify
- iSample = quantize24(input[inIndex] * twoPower23, ditherBits);
- output[outIndex + 0] = 0;
- output[outIndex + 1] = (byte) (iSample >> 16);
- output[outIndex + 2] = (byte) ((iSample >>> 8) & 0xFF);
- output[outIndex + 3] = (byte) (iSample & 0xFF);
- break;
- case CT_24_4SL:
- // TODO: verify
- iSample = quantize24(input[inIndex] * twoPower23, ditherBits);
- output[outIndex + 3] = (byte) (iSample >> 16);
- output[outIndex + 2] = (byte) ((iSample >>> 8) & 0xFF);
- output[outIndex + 1] = (byte) (iSample & 0xFF);
- output[outIndex + 0] = 0;
- break;
- case CT_32SB:
- iSample = quantize32(input[inIndex] * twoPower31, ditherBits);
- output[outIndex] = (byte) (iSample >> 24);
- output[outIndex + 1] = (byte) ((iSample >>> 16) & 0xFF);
- output[outIndex + 2] = (byte) ((iSample >>> 8) & 0xFF);
- output[outIndex + 3] = (byte) (iSample & 0xFF);
- break;
- case CT_32SL:
- iSample = quantize32(input[inIndex] * twoPower31, ditherBits);
- output[outIndex + 3] = (byte) (iSample >> 24);
- output[outIndex + 2] = (byte) ((iSample >>> 16) & 0xFF);
- output[outIndex + 1] = (byte) ((iSample >>> 8) & 0xFF);
- output[outIndex] = (byte) (iSample & 0xFF);
- break;
- default:
- throw new IllegalArgumentException("unsupported format="
- + formatType2Str(formatType));
- }
- }
- }
-}
diff --git a/src/ddf/minim/javasound/JSAudioInput.java b/src/ddf/minim/javasound/JSAudioInput.java
deleted file mode 100644
index 027fbe4..0000000
--- a/src/ddf/minim/javasound/JSAudioInput.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2007 by Damien Di Fede save()
is called.
- * Because of this it is possible to specify the file format to use for saving after
- * the audio has already been recorded. It is also possible to save the recorded audio to
- * multiple formats by calling save(type)
for each file format you want to
- * save to. Because the saving is performed in the same thread of execution as your
- * Processing sketch, you can expect your sketch to hang while the audio is written to
- * disk. How long it hangs will be proportional to the length of the audio buffer.
- *
- * @author Damien Di Fede
- *
- */
-final class JSBufferedSampleRecorder implements SampleRecorder
-{
- private ArrayList
-
- Minim
class
- that takes a plain Object
and you simply need to
- define two methods
- that we will find using reflection.
-
- @libname Minim
-
-
\ No newline at end of file
diff --git a/src/ddf/minim/signals/Oscillator.java b/src/ddf/minim/signals/Oscillator.java
deleted file mode 100644
index 0ffcb05..0000000
--- a/src/ddf/minim/signals/Oscillator.java
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede Oscillator
is an implementation of an AudioSignal
- * that handles most of the work associated with an oscillatory signal like a
- * sine wave. To create your own oscillator you must extend
- * Oscillator
and implement the {@link #value(float) value}
- * method. Oscillator
will call this method every time it needs
- * to sample your waveform. The number passed to the method is an offset from
- * the beginning of the waveform's period and should be used to sample your
- * waveform at that point.
- *
- * @author Damien Di Fede
- *
- */
-public abstract class Oscillator implements AudioSignal
-{
- /** The float value of 2*PI. Provided as a convenience for subclasses. */
- protected static final float TWO_PI = (float) (2 * Math.PI);
- /** The current frequency of the oscillator. */
- private float freq;
- /** The frequency to transition to. */
- private float newFreq;
- /** The sample rate of the oscillator. */
- private float srate;
- /** The current amplitude of the oscillator. */
- private float amp;
- /** The amplitude to transition to. */
- private float newAmp;
- /** The current position in the waveform's period. */
- private float step;
- private float stepSize;
- /** The portamento state. */
- private boolean port;
- /** The portamento speed in milliseconds. */
- private float portSpeed; // in milliseconds
- /**
- * The amount to increment or decrement freq
during the
- * transition to newFreq
.
- */
- private float portStep;
- /** The current pan position. */
- private float pan;
- /** The pan position to transition to. */
- private float newPan;
- /**
- * The amount to scale the left channel's amplitude to achieve the current pan
- * setting.
- */
- private float leftScale;
- /**
- * The amount to scale the right channel's amplitude to achieve the current
- * pan setting.
- */
- private float rightScale;
-
- private AudioListener listener;
-
- private AudioSignal ampMod;
- private AudioSignal freqMod;
-
- /**
- * Constructs an Oscillator with the requested frequency, amplitude and sample
- * rate.
- *
- * @param frequency
- * the frequency of the Oscillator
- * @param amplitude
- * the amplitude of the Oscillator
- * @param sampleRate
- * the sample rate of the Oscillator
- */
- public Oscillator(float frequency, float amplitude, float sampleRate)
- {
- freq = frequency;
- newFreq = freq;
- amp = amplitude;
- newAmp = amp;
- srate = sampleRate;
- step = 0;
- stepSize = freq / (sampleRate);
- port = false;
- portStep = 0.01f;
- pan = 0;
- newPan = 0;
- leftScale = rightScale = 1;
- listener = null;
- ampMod = null;
- freqMod = null;
- }
-
- public final float sampleRate()
- {
- return srate;
- }
-
- /**
- * Sets the frequency of the Oscillator in Hz. If portamento is on, the
- * frequency of the Oscillator will transition from the current frequency to
- * f
.
- *
- * @param f
- * the new frequency of the Oscillator
- */
- public final void setFreq(float f)
- {
- newFreq = f;
- // we want to step from freq to new newFreq in portSpeed milliseconds
- // first off, we want to divide the difference between the two freqs
- // by the number of milliseconds it's supposed to take to get there
- float msStep = (newFreq - freq) / portSpeed;
- // but since freq is incremented at every sample, we need to divide
- // again by the number of samples per millisecond
- float spms = srate / 1000;
- portStep = msStep / spms;
- }
-
- /**
- * Returns the current frequency.
- *
- * @return the current frequency
- */
- public final float frequency()
- {
- return freq;
- }
-
- /**
- * Set the amplitude of the Oscillator, range is [0, 1].
- *
- * @param a
- * the new amplitude, it will be constrained to [0, 1]
- */
- public final void setAmp(float a)
- {
- newAmp = constrain( a, 0, 1 );
- }
-
- /**
- * Returns the current amplitude.
- *
- * @return the current amplitude
- */
- public final float amplitude()
- {
- return amp;
- }
-
- /**
- * Set the pan of the Oscillator, range is [-1, 1].
- *
- * @param p -
- * the new pan value, it will be constrained to [-1, 1]
- */
- public final void setPan(float p)
- {
- newPan = constrain(p, -1, 1);
- }
-
- /**
- * Set the pan of the Oscillator, but don't smoothly transition from
- * whatever the current pan value is to this new one.
- *
- * @param p -
- * the new pan value, it will be constrained to [-1,1]
- */
- public final void setPanNoGlide(float p)
- {
- setPan(p);
- pan = constrain(p, -1, 1);
- }
-
- /**
- * Returns the current pan value.
- *
- * @return the current pan value
- */
- public final float pan()
- {
- return pan;
- }
-
- /**
- * Sets how many milliseconds it should take to transition from one frequency
- * to another when setting a new frequency.
- *
- * @param millis
- * the length of the portamento
- */
- public final void portamento(int millis)
- {
- if (millis <= 0)
- {
- Minim.error("Oscillator.portamento: The portamento speed must be greater than zero.");
- }
- port = true;
- portSpeed = millis;
- }
-
- /**
- * Turns off portamento.
- *
- */
- public final void noPortamento()
- {
- port = false;
- }
-
- private final void updateFreq()
- {
- if ( freq != newFreq )
- {
- if ( port )
- {
- if (Math.abs(freq - newFreq) < 0.1f)
- {
- freq = newFreq;
- }
- else
- {
- freq += portStep;
- }
- }
- else
- {
- freq = newFreq;
- }
- }
- stepSize = freq / srate;
- }
-
- // holy balls, amplitude and frequency modulation
- // all rolled up into one.
- private final float generate(float fmod, float amod)
- {
- step += fmod;
- step = step - (float)Math.floor(step);
- return amp * amod * value(step);
- }
-
- public final void generate(float[] signal)
- {
- float[] fmod = new float[signal.length];
- float[] amod = new float[signal.length];
- if ( freqMod != null )
- {
- freqMod.generate(fmod);
- }
- if ( ampMod != null )
- {
- ampMod.generate(amod);
- }
- for(int i = 0; i < signal.length; i++)
- {
- // do the portamento stuff / freq updating
- updateFreq();
- if ( ampMod != null )
- {
- signal[i] = generate(fmod[i], amod[i]);
- }
- else
- {
- signal[i] = generate(fmod[i], 1);
- }
- monoStep();
- }
- // broadcast to listener
- if ( listener != null )
- {
- listener.samples(signal);
- }
- }
-
- public final void generate(float[] left, float[] right)
- {
- float[] fmod = new float[left.length];
- float[] amod = new float[right.length];
- if ( freqMod != null )
- {
- freqMod.generate(fmod);
- }
- if ( ampMod != null )
- {
- ampMod.generate(amod);
- }
- for(int i = 0; i < left.length; i++)
- {
- // do the portamento stuff / freq updating
- updateFreq();
- if ( ampMod != null )
- {
- left[i] = generate(fmod[i], amod[i]);
- }
- else
- {
- left[i] = generate(fmod[i], 1);
- }
- right[i] = left[i];
- // scale amplitude to add pan
- left[i] *= leftScale;
- right[i] *= rightScale;
- stereoStep();
- }
- if ( listener != null )
- {
- listener.samples(left, right);
- }
- }
-
- public final void setAudioListener(AudioListener al)
- {
- listener = al;
- }
-
- // Not visible for 2.0.2
- final void setAmplitudeModulator(AudioSignal s)
- {
- ampMod = s;
- }
-
- // Not visible for 2.0.2
- final void setFrequencyModulator(AudioSignal s)
- {
- freqMod = s;
- }
-
- private void monoStep()
- {
- stepStep();
- stepAmp();
- }
-
- private void stereoStep()
- {
- stepStep();
- stepAmp();
- calcLRScale();
- stepPan();
- }
-
- private void stepStep()
- {
- step += stepSize;
- step = step - (float)Math.floor(step);
- }
-
- private void calcLRScale()
- {
- if (pan <= 0)
- {
- // map -1, 0 to 0, 1
- rightScale = pan + 1;
- leftScale = 1;
- }
- if (pan >= 0)
- {
- // map 0, 1 to 1, 0;
- leftScale = 1 - pan;
- rightScale = 1;
- }
- if (pan == 0)
- {
- leftScale = rightScale = 1;
- }
- }
-
- private static float panAmpStep = 0.0001f;
-
- private void stepPan()
- {
- if (pan != newPan)
- {
- if (pan < newPan)
- pan += panAmpStep;
- else
- pan -= panAmpStep;
- if (Math.abs(pan - newPan) < panAmpStep) pan = newPan;
- }
- }
-
- private void stepAmp()
- {
- if (amp != newAmp)
- {
- if (amp < newAmp)
- amp += panAmpStep;
- else
- amp -= panAmpStep;
- if (Math.abs(amp - newAmp) < panAmpStep) pan = newPan;
- }
- }
-
- /**
- * Returns the period of the waveform (the inverse of the frequency).
- *
- * @return the period of the waveform
- */
- public final float period()
- {
- return 1 / freq;
- }
-
- /**
- * Returns the value of the waveform at step
. To take
- * advantage of all of the work that Oscillator
does, you can
- * create your own periodic waveforms by extending Oscillator
- * and implementing this function. All of the oscillators included with Minim
- * were created in this way.
- *
- * @param step
- * an offset from the beginning of the waveform's period
- * @return the value of the waveform at step
- */
- protected abstract float value(float step);
-
- float constrain( float val, float min, float max )
- {
- return val < min ? min : ( val > max ? max : val );
- }
-}
diff --git a/src/ddf/minim/signals/PinkNoise.java b/src/ddf/minim/signals/PinkNoise.java
deleted file mode 100644
index b77687c..0000000
--- a/src/ddf/minim/signals/PinkNoise.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede amp
.
- * amp
should be between 0 and 1.
- *
- * @param amp
- */
- public PinkNoise(float amp)
- {
- setAmp(amp);
- pan = 0;
- leftScale = rightScale = 1;
- initPink();
- }
-
- /**
- * Sets the amplitude of the signal to a
.
- *
- * @param a
- * the new amplitude, it will be constrained to [0, 1].
- */
- public void setAmp(float a)
- {
- amp = constrain(a, 0, 1);
- }
-
- /**
- * Sets the pan of the signal to p
.
- *
- * @param p
- * the new pan, it will be constrained to [-1, 1]
- */
- public void setPan(float p)
- {
- pan = constrain(p, -1, 1);
- calcLRScale();
- }
-
- public void generate(float[] signal)
- {
- for (int i = 0; i < signal.length; i++)
- {
- signal[i] = amp * pink();
- }
- }
-
- public void generate(float[] left, float[] right)
- {
- for (int i = 0; i < left.length; i++)
- {
- left[i] = leftScale * amp * pink();
- right[i] = rightScale * amp * pink();
- }
- }
-
- // This is the Voss algorithm for creating pink noise
-
- private int maxKey, key, range;
- private float whiteValues[];
- private float maxSumEver;
-
- private void initPink()
- {
- maxKey = 0x1f;
- range = 128;
- maxSumEver = 90;
- key = 0;
- whiteValues = new float[6];
- for (int i = 0; i < 6; i++)
- whiteValues[i] = ((float) Math.random() * Long.MAX_VALUE) % (range / 6);
- }
-
- // return a pink noise value
- private float pink()
- {
- int last_key = key;
- float sum;
-
- key++;
- if (key > maxKey) key = 0;
- // Exclusive-Or previous value with current value. This gives
- // a list of bits that have changed.
- int diff = last_key ^ key;
- sum = 0;
- for (int i = 0; i < 6; i++)
- {
- // If bit changed get new random number for corresponding
- // white_value
- if ((diff & (1 << i)) != 0)
- {
- whiteValues[i] = ((float) Math.random() * Long.MAX_VALUE) % (range / 6);
- }
- sum += whiteValues[i];
- }
- if (sum > maxSumEver) maxSumEver = sum;
- sum = 2f * (sum / maxSumEver) - 1f;
- return sum;
- }
-
- private void calcLRScale()
- {
- if (pan <= 0)
- {
- // map -1, 0 to 0, 1
- rightScale = pan + 1;
- leftScale = 1;
- }
- if (pan >= 0)
- {
- // map 0, 1 to 1, 0;
- leftScale = 1 - pan;
- rightScale = 1;
- }
- if (pan == 0)
- {
- leftScale = rightScale = 1;
- }
- }
-
- float constrain( float val, float min, float max )
- {
- return val < min ? min : ( val > max ? max : val );
- }
-
-}
diff --git a/src/ddf/minim/signals/PulseWave.java b/src/ddf/minim/signals/PulseWave.java
deleted file mode 100644
index df8f19a..0000000
--- a/src/ddf/minim/signals/PulseWave.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede amp
- * should be between 0 and 1.
- *
- * @param amp the amplitude
- */
- public WhiteNoise(float amp)
- {
- setAmp(amp);
- pan = 0;
- leftScale = rightScale = 1;
- }
-
- /**
- * Sets the amplitude to a
. This value will be constrained to [0, 1].
- * @param a the new amplitude
- */
- public void setAmp(float a)
- {
- amp = constrain(a, 0, 1);
- }
-
- /**
- * Sets the pan to p
. This value will be constrained to [-1, 1].
- *
- * @param p the new pan
- */
- public void setPan(float p)
- {
- pan = constrain(p, -1, 1);
- calcLRScale();
- }
-
- public void generate(float[] signal)
- {
- for (int i = 0; i < signal.length; i++)
- {
- signal[i] = amp * (2 * (float) Math.random() - 1);
- }
- }
-
- public void generate(float[] left, float[] right)
- {
- for (int i = 0; i < left.length; i++)
- {
- left[i] = leftScale * amp * (2 * (float) Math.random() - 1);
- right[i] = rightScale * amp * (2 * (float) Math.random() - 1);
- }
- }
-
- private void calcLRScale()
- {
- if (pan <= 0)
- {
- // map -1, 0 to 0, 1
- rightScale = pan + 1;
- leftScale = 1;
- }
- if (pan >= 0)
- {
- // map 0, 1 to 1, 0;
- leftScale = 1 - pan;
- rightScale = 1;
- }
- if (pan == 0)
- {
- leftScale = rightScale = 1;
- }
- }
-
- float constrain( float val, float min, float max )
- {
- return val < min ? min : ( val > max ? max : val );
- }
-}
diff --git a/src/ddf/minim/spi/AudioOut.java b/src/ddf/minim/spi/AudioOut.java
deleted file mode 100644
index ef6f163..0000000
--- a/src/ddf/minim/spi/AudioOut.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioSythesizer
is an AudioStream
that generates
- * sound, rather than reading sound. It uses the attached
- * AudioSignal
and AudioEffect
to generate a signal.
- *
- * @author Damien Di Fede
- *
- */
-public interface AudioOut extends AudioResource
-{
- /**
- * @return the size of the buffer used by this output.
- */
- int bufferSize();
-
- /**
- * Sets the AudioSignal that this output will use to generate sound.
- *
- * @param signal
- * the AudioSignal used to generate sound
- */
- @Deprecated
- void setAudioSignal(AudioSignal signal);
-
- /**
- * Sets the AudioStream that this output will use to generate sound.
- *
- * @param stream
- */
- void setAudioStream(AudioStream stream);
-
- /**
- * Sets the AudioEffect to apply to the signal.
- *
- * @param effect
- * the AudioEffect to apply to the signal
- */
- @Deprecated
- void setAudioEffect(AudioEffect effect);
-
- /**
- * Sets the AudioListener that will have sound broadcasted to it as the
- * output generates.
- *
- * @param listen
- */
- void setAudioListener(AudioListener listen);
-}
diff --git a/src/ddf/minim/spi/AudioRecording.java b/src/ddf/minim/spi/AudioRecording.java
deleted file mode 100644
index 2964371..0000000
--- a/src/ddf/minim/spi/AudioRecording.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioRecording
is an AudioResource
that has methods that
- * allow the audio to played, similar to the Playable
interface.
- *
- * @author Damien Di Fede
- *
- */
-/** @deprecated */
-public interface AudioRecording extends AudioResource, AudioStream
-{
- /**
- * Allows playback/reads of the source.
- *
- */
- void play();
-
- /**
- * Disallows playback/reads of the source. If this is pause, all calls to read
- * will generate arrays full of zeros (silence).
- *
- */
- void pause();
-
- boolean isPlaying();
-
- /**
- * Starts looping playback from the current position. Playback will continue
- * to the loop's end point, then loop back to the loop start point count
- * times, and finally continue playback to the end of the clip.
- *
- * If the current position when this method is invoked is greater than the
- * loop end point, playback simply continues to the end of the source without
- * looping.
- *
- * A count value of 0 indicates that any current looping should cease and
- * playback should continue to the end of the clip. The behavior is undefined
- * when this method is invoked with any other value during a loop operation.
- *
- * If playback is stopped during looping, the current loop status is cleared;
- * the behavior of subsequent loop and start requests is not affected by an
- * interrupted loop operation.
- *
- * @param count
- * the number of times playback should loop back from the loop's
- * end position to the loop's start position, or
- * Minim.LOOP_CONTINUOUSLY to indicate that looping should continue
- * until interrupted
- */
- void loop(int count);
-
- /**
- * Sets the loops points in the source, in milliseconds
- *
- * @param start
- * the position of the beginning of the loop
- * @param stop
- * the position of the end of the loop
- */
- void setLoopPoints(int start, int stop);
-
- /**
- * How many loops are left to go. 0 means this isn't looping and -1 means
- * that it is looping continuously.
- *
- * @return how many loops left
- */
- int getLoopCount();
-
- /**
- * Gets the current millisecond position of the source.
- *
- * @return the current possition, in milliseconds in the source
- */
- int getMillisecondPosition();
-
- /**
- * Sets the current millisecond position of the source.
- *
- * @param pos
- * the posititon to cue the playback head to
- */
- void setMillisecondPosition(int pos);
-
- /**
- * Returns the length of the source in milliseconds. Infinite sources, such
- * as internet radio streams, should return -1.
- *
- * @return the length of the source, in milliseconds
- */
- int getMillisecondLength();
-
- /**
- * Returns meta data about the recording, such as duration, name, ID3 tags
- * perhaps.
- *
- * @return the MetaData of the recording
- */
- AudioMetaData getMetaData();
-}
diff --git a/src/ddf/minim/spi/AudioRecordingStream.java b/src/ddf/minim/spi/AudioRecordingStream.java
deleted file mode 100644
index a2e7a7c..0000000
--- a/src/ddf/minim/spi/AudioRecordingStream.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede AudioStream
is a stream of samples that is coming from
- * somewhere. Users of an AudioStream
don't really need to know
- * where the samples are coming from. However, typically they will be read
- * from a Line
or a file. An AudioStream
needs to
- * be opened before being used and closed when you are finished with it.
- *
- * @author Damien Di Fede
- *
- */
-public interface AudioStream extends AudioResource
-{
- /**
- * Reads the next sample frame.
- *
- * @return an array of floats containing the value of each channel in the sample frame just read.
- * The size of the returned array will be the same size as getFormat().getChannels().
- */
- @Deprecated
- float[] read();
-
- /**
- * Reads buffer.getBufferSize() sample frames and puts them into buffer's channels.
- * The provided buffer will be forced to have the same number of channels that this
- * AudioStream does.
- *
- * @param buffer The MultiChannelBuffer to fill with audio samples.
- *
- * @return int: the number of sample frames that were actually read, could be smaller than the size of the buffer.
- */
- int read(MultiChannelBuffer buffer);
-}
diff --git a/src/ddf/minim/spi/MinimServiceProvider.java b/src/ddf/minim/spi/MinimServiceProvider.java
deleted file mode 100644
index 83b2e34..0000000
--- a/src/ddf/minim/spi/MinimServiceProvider.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede MinimServiceProvider
is the interface that an audio implementation must
- * provide to Minim
, to guarantee that it can provide all of the functionality
- * that Minim promises. All of the interfaces in this package define functionality in the
- * most minimal way possible, to make it easy for people write their own implementations, since
- * much of what Minim provides can be done so without regard for the details of audio intput and
- * output. If you write your own implementation of this interface, or if you are using one that
- * someone else has written, all you must do is pass an instantiation of it to the Minim
- * constructor. That Minim object will then delegate most of the work to the implementation.
- *
- * @author ddf
- *
- */
-
-public interface MinimServiceProvider
-{
- /**
- * Called inside the Minim constructor. Implementations should load any libraries and
- * resources they need at this time.
- */
- void start();
-
- /**
- * Called when stop()
is called by the Minim object that owns this.
- * Implementations should release all resources and stop all Threads at this time.
- *
- */
- void stop();
-
- /**
- * Tells the implementation it should produce debug output, if that's something it does.
- *
- */
- void debugOn();
-
- /**
- * Tells the implementation it should not produce debug output.
- *
- */
- void debugOff();
-
- /**
- * Should return an {@link AudioRecording} that can play the file requested. The filename could
- * be a URL, an absolute path, or just a filename that the user expects the system to find in
- * their sketch somewhere.
- *
- * @param filename the name of the file to load into the AudioRecording
- * @return an AudioRecording that can play the file
- */
- /** @deprecated */
- AudioRecording getAudioRecording(String filename);
-
- /**
- * Should return an {@link AudioRecordingStream} that will stream the file requested. The filename
- * could be a URL, an absolute path, or just a filename that the user expects the system to find
- * in their sketch somewhere.
- *
- * @param filename the name of the file to load into the AudioRecordingStream
- * @param bufferSize the bufferSize to use in memory (implementations are free to ignore this, if they must)
- * @param inMemory TODO figure out if this inMemory thing really makes sense.
- * @return an AudioRecording stream that will stream the file
- */
- AudioRecordingStream getAudioRecordingStream(String filename, int bufferSize, boolean inMemory);
-
- /**
- * Should return an {@link AudioStream} with the requested parameters. What Minim is
- * expecting this stream to be reading from is the active audio input of the computer,
- * such as the microphone or line-in.
- *
- * @param type Minim.STEREO or Minim.MONO
- * @param bufferSize how big the in-memory buffer should be
- * @param sampleRate what the sample rate of the stream should be
- * @param bitDepth what the bit depth of the stream should be
- * @return an AudioStream that is reading from the active audio input of the computer
- */
- AudioStream getAudioInput(int type, int bufferSize, float sampleRate, int bitDepth);
-
- /**
- * Should return an {@link AudioOut} that can be used to generate audio that will
- * be heard through the computer's speakers.
- *
- * @param type Minim.STEREO or Minim.MONO
- * @param bufferSize how big the in-memory buffer should be
- * @param sampleRate what the sample rate of the generated audio should be
- * @param bitDepth what the bit depth of the generated audio should be
- * @return an AudioSynthesizer that will output to the computer's speakers
- */
- AudioOut getAudioOutput(int type, int bufferSize, float sampleRate, int bitDepth);
-
- /**
- * Should return an {@link AudioSample} that will load the requested file into memory.
- *
- * @param filename the name of the file to load, this might be a URL, an absolute path, or a
- * file that the user expects the implementation to find in their sketch somewhere.
- * @param bufferSize how big the output buffer used for playing the sample should be
- * @return an AudioSample that contains the file
- */
- AudioSample getAudioSample(String filename, int bufferSize);
-
- /**
- * Should return an {@link AudioSample} that will store the provided samples.
- *
- * @param samples
- * the array of audio samples
- * @param bufferSize
- * how large the output buffer should be
- * @return
- * an AudioSample that contains the samples
- */
- AudioSample getAudioSample(float[] samples, AudioFormat format, int bufferSize);
-
- /**
- * Should return an {@link AudioSample} that will store the provided samples.
- *
- * @param left
- * the left channel of the stereo sample
- * @param right
- * the right channel of a stereo sample
- * @param bufferSize
- * how large the output buffer should be
- * @return
- * an AudioSample that contains the samples
- */
- AudioSample getAudioSample(float[] left, float[] right, AudioFormat format, int bufferSize);
-
- /**
- * Should return a {@link SampleRecorder} that can record the source
in a
- * buffered (in-memory) or non-buffered (streamed) manner, to the file specified by saveTo
- * @param source the audio source that should be recorded
- * @param saveTo the file to save the recorded audio to
- * @param buffered whether or not to buffer all recorded audio in memory or stream directly to the file
- * @return an appropriate SampleRecorder
- */
- SampleRecorder getSampleRecorder(Recordable source, String saveTo, boolean buffered);
-}
diff --git a/src/ddf/minim/spi/SampleRecorder.java b/src/ddf/minim/spi/SampleRecorder.java
deleted file mode 100644
index dbb650a..0000000
--- a/src/ddf/minim/spi/SampleRecorder.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2007 - 2008 by Damien Di Fede SampleRecorder
is an interface that describes the limited
- * set of functionality required of an object that records samples. It is
- * not required that the SampleRecorder
records to disk, though
- * the implementations in Minim do so.
- *
- * @author Damien Di Fede
- *
- */
-
-public interface SampleRecorder extends AudioListener
-{
- /**
- * Returns the full path to the file this is saving to, if it exists.
- * If this is not saving to a file, an empty String
will be
- * returned.
- *
- * @return the full path to the file or an empty String
- */
- String filePath();
-
- /**
- * Begins recording.
- *
- */
- void beginRecord();
-
- /**
- * Halts recording.
- *
- */
- void endRecord();
-
- /**
- * Returns the current record state.
- *
- * @return true if this is recording
- */
- boolean isRecording();
-
- /**
- * Saves the recorded samples, probably to disk.
- * Returns the recorded audio as an AudioRecordingStream.
- *
- */
- AudioRecordingStream save();
-}
diff --git a/src/ddf/minim/ugens/ADSR.java b/src/ddf/minim/ugens/ADSR.java
deleted file mode 100644
index 6697913..0000000
--- a/src/ddf/minim/ugens/ADSR.java
+++ /dev/null
@@ -1,389 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.AudioOutput;
-import ddf.minim.UGen;
-
-/**
- * A UGen that plays input audio through a standard ADSR (Attack, Decay, Sustain, Release)
- * envelope based on time from noteOn and noteOff.
- *
- * @example Synthesis/ADSRExample
- *
- * @author Anderson Mills
- *
- */
-public class ADSR extends UGen
-{
- /**
- * The default input is "audio."
- * You won't need to patch to this directly, since
- * simply patching to the ADSR itself will achieve
- * the same result.
- *
- * @related ADSR
- */
- public UGenInput audio;
-
- // amplitude before the ADSR hits
- private float beforeAmplitude;
- // amplitude after the release of the ADSR
- private float afterAmplitude;
- // the max amplitude of the envelope
- private float maxAmplitude;
- // the current amplitude
- private float amplitude;
- // the time of the attack
- private float attackTime;
- // the time of the decay
- private float decayTime;
- // the level of the sustain
- private float sustainLevel;
- // the time of the release
- private float releaseTime;
- // the current size of the step
- private float timeStepSize;
- // the time from noteOn
- private float timeFromOn;
- // the time from noteOff
- private float timeFromOff;
- // the envelope has received noteOn
- private boolean isTurnedOn;
- // the envelope has received noteOff
- private boolean isTurnedOff;
- // unpatch the note after it's finished
- private boolean unpatchAfterRelease;
- private AudioOutput output;
- private UGen ugenOutput;
-
- /**
- * Constructor for an ADSR envelope.
- * Maximum amplitude is set to 1.0.
- * Attack and decay times are set to 1 sec.
- * Sustain level is set to 0.0. Release time is set to 1 sec.
- * Amplitude before and after the envelope is set to 0.
- */
- public ADSR()
- {
- this(1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope with maximum amplitude.
- * Attack and decay times are set to 1 sec.
- * Sustain level is set to 0.0. Release time is set to 1 sec.
- * Amplitude before and after the envelope is set to 0.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- */
- public ADSR(float maxAmp)
- {
- this(maxAmp, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope with maximum amplitude, attack Time.
- * Decay time is set to 1 sec.
- * Sustain level is set to 0.0. Release time is set to 1 sec.
- * Amplitude before and after the envelope is set to 0.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- */
- public ADSR( float maxAmp, float attTime )
- {
- this(maxAmp, attTime, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope with maximum amplitude, attack Time, and decay time.
- * Sustain level is set to 0.0. Release time is set to 1 sec.
- * Amplitude before and after the envelope is set to 0.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- * @param decTime
- * float: the decay time, in seconds
- *
- */
- public ADSR( float maxAmp, float attTime, float decTime )
- {
- this(maxAmp, attTime, decTime, 0.0f, 1.0f, 0.0f, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope with maximum amplitude, attack Time, decay time, and sustain level.
- * Release time is set to 1 sec. Amplitude before and after the envelope is set to 0.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- * @param decTime
- * float: the decay time, in seconds
- * @param susLvl
- * float: the percentage of the maximum amplitude to maintain after the decay completes
- */
- public ADSR( float maxAmp, float attTime, float decTime, float susLvl )
- {
- this(maxAmp, attTime, decTime, susLvl, 1.0f, 0.0f, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope with maximum amplitude, attack Time, decay time, sustain level,
- * and release time. Amplitude before and after the envelope is set to 0.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- * @param decTime
- * float: the decay time, in seconds
- * @param susLvl
- * float: the percentage of the maximum amplitude to maintain after the decay completes
- * @param relTime
- * float: the release time, in seconds
- */
- public ADSR(float maxAmp, float attTime, float decTime, float susLvl, float relTime)
- {
- this(maxAmp, attTime, decTime, susLvl, relTime, 0.0f, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope with maximum amplitude, attack Time, decay time, sustain level,
- * release time, an amplitude before the envelope. Amplitude after the envelope is set to 0.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- * @param decTime
- * float: the decay time, in seconds
- * @param susLvl
- * float: the percentage of the maximum amplitude to maintain after the decay completes
- * @param relTime
- * float: the release time, in seconds
- * @param befAmp
- * float: the amplitude to apply before the envelope is activated
- */
- public ADSR(float maxAmp, float attTime, float decTime, float susLvl, float relTime, float befAmp)
- {
- this(maxAmp, attTime, decTime, susLvl, relTime, befAmp, 0.0f);
- }
-
- /**
- * Constructor for an ADSR envelope.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- * @param decTime
- * float: the decay time, in seconds
- * @param susLvl
- * float: the percentage of the maximum amplitude to maintain after the decay completes
- * @param relTime
- * float: the release time, in seconds
- * @param befAmp
- * float: the amplitude to apply before the envelope is activated
- * @param aftAmp
- * float: the amplitude to apply once the envelope has completed
- */
- public ADSR(float maxAmp, float attTime, float decTime, float susLvl, float relTime, float befAmp, float aftAmp)
- {
- super();
- audio = new UGenInput(InputType.AUDIO);
- maxAmplitude = maxAmp;
- attackTime = attTime;
- decayTime = decTime;
- sustainLevel = susLvl;
- releaseTime = relTime;
- beforeAmplitude = befAmp;
- afterAmplitude = aftAmp;
- amplitude = beforeAmplitude;
- isTurnedOn = false;
- isTurnedOff = false;
- timeFromOn = -1.0f;
- timeFromOff = -1.0f;
- unpatchAfterRelease = false;
- }
-
- /**
- * Permits the changing of the ADSR parameters.
- *
- * @param maxAmp
- * float: the maximum amplitude for the envelope
- * @param attTime
- * float: the attack time, in seconds
- * @param decTime
- * float: the decay time, in seconds
- * @param susLvl
- * float: the percentage of the maximum amplitude to maintain after the decay completes
- * @param relTime
- * float: the release time, in seconds
- * @param befAmp
- * float: the amplitude to apply before the envelope is activated
- * @param aftAmp
- * float: the amplitude to apply once the envelope has completed
- *
- * @related ADSR
- */
- public void setParameters( float maxAmp, float attTime, float decTime, float susLvl, float relTime, float befAmp, float aftAmp)
- {
- maxAmplitude = maxAmp;
- attackTime = attTime;
- decayTime = decTime;
- sustainLevel = susLvl;
- releaseTime = relTime;
- beforeAmplitude = befAmp;
- afterAmplitude = aftAmp;
- }
-
- /**
- * Specifies that the ADSR envelope should begin.
- *
- * @example Synthesis/ADSRExample
- *
- * @related ADSR
- */
- public void noteOn()
- {
- timeFromOn = 0f;
- isTurnedOn = true;
-
- // ddf: reset these so that the envelope can be retriggered
- timeFromOff = -1.f;
- isTurnedOff = false;
- }
- /**
- * Specifies that the ADSR envelope should start the release time.
- *
- * @example Synthesis/ADSRExample
- *
- * @related ADSR
- */
- public void noteOff()
- {
- timeFromOff = 0f;
- isTurnedOff = true;
- }
-
- /**
- * Use this method to notify the ADSR that the sample rate has changed.
- */
- @Override
- protected void sampleRateChanged()
- {
- timeStepSize = 1/sampleRate();
- }
-
- /**
- * Tell the ADSR that it should unpatch itself from the output after the release time.
- *
- * @param output
- * AudioOutput: the output this should unpatch itself from
- *
- * @example Synthesis/ADSRExample
- *
- * @related ADSR
- */
- public void unpatchAfterRelease( AudioOutput output )
- {
- unpatchAfterRelease = true;
- this.output = output;
- }
-
- /**
- * Tell the ADSR that it should unpatch itself from this UGen after the release time.
- *
- * @param ugen
- * the UGen this should unpatch itself from
- *
- * @related ADSR
- */
- public void unpatchAfterRelease( UGen ugen )
- {
- unpatchAfterRelease = true;
- ugenOutput = ugen;
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- // before the envelope, just output the beforeAmplitude*audio
- if (!isTurnedOn)
- {
- for(int i = 0; i < channelCount(); i++)
- {
- channels[i] = beforeAmplitude*audio.getLastValues()[i];
- }
- }
- // after the envelope, just output the afterAmplitude*audio
- else if (timeFromOff > releaseTime)
- {
- for(int i = 0; i < channelCount(); i++)
- {
- channels[i] = afterAmplitude*audio.getLastValues()[i];
- }
- if ( unpatchAfterRelease )
- {
- if ( output != null )
- {
- unpatch( output );
- output = null;
- }
- if ( ugenOutput != null )
- {
- unpatch( ugenOutput );
- ugenOutput = null;
- }
- unpatchAfterRelease = false;
- }
- }
- // inside the envelope
- else
- {
- if ((isTurnedOn) && (!isTurnedOff))
- {
- // ATTACK
- if (timeFromOn <= attackTime)
- {
- // use time remaining until maxAmplitude to change amplitude
- float timeRemain = (attackTime - timeFromOn);
- amplitude += (maxAmplitude - amplitude)*timeStepSize/timeRemain;
- }
- // DECAY
- else if ((timeFromOn > attackTime) && (timeFromOn <= (attackTime+decayTime)))
- {
- // use time remaining until sustain to change to sustain level
- float timeRemain = (attackTime + decayTime - timeFromOn);
- amplitude += (sustainLevel*maxAmplitude - amplitude)*timeStepSize/timeRemain;
- }
- // SUSTAIN
- else if (timeFromOn > (attackTime+decayTime))
- {
- // hold the sustain level
- amplitude = sustainLevel*maxAmplitude;
- }
- timeFromOn += timeStepSize;
- }
- // RELEASE
- else //isTurnedOn and isTurnedOFF and timeFromOff < releaseTime
- {
- // use remaining time to get to afterAmplitude
- float timeRemain = (releaseTime - timeFromOff);
- amplitude += (afterAmplitude - amplitude)*timeStepSize/timeRemain;
- timeFromOff += timeStepSize;
- }
- // finally multiply the input audio to generate the output
- for(int i = 0; i < channelCount(); i++)
- {
- channels[i] = amplitude*audio.getLastValues()[i];
- }
- }
- }
-}
diff --git a/src/ddf/minim/ugens/Abs.java b/src/ddf/minim/ugens/Abs.java
deleted file mode 100644
index 43fd75b..0000000
--- a/src/ddf/minim/ugens/Abs.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-
-/**
- * Abs is a UGen that outputs the absolute value of its input.
- *
- * @author Damien Di Fede
- * @related UGen
- *
- */
-public class Abs extends UGen
-{
- /**
- * The input that we will take the absolute value of.
- *
- * @related Abs
- */
- public UGenInput audio;
-
- public Abs()
- {
- audio = new UGenInput(InputType.AUDIO);
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- for(int i = 0; i < channels.length; ++i)
- {
- channels[i] = Math.abs( audio.getLastValues()[i] );
- }
- }
-
-}
diff --git a/src/ddf/minim/ugens/Balance.java b/src/ddf/minim/ugens/Balance.java
deleted file mode 100644
index 110dfee..0000000
--- a/src/ddf/minim/ugens/Balance.java
+++ /dev/null
@@ -1,91 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- * Balance is for controlling the left/right channel balance of a stereo signal.
- * This is different from Pan because rather than moving the signal around it
- * simply attenuates the existing audio.
- *
- * Bypass<GranulateSteady> granulate = new Bypass( new GranulateSteady() );
- * filePlayer.patch( granulate ).patch( mainOut );
- *
- * ugen
method of Bypass to retrieve the wrapped UGen
- * and operate on it:
- *
- * grainLenLine.patch( granulate.ugen().grainLen );
- *
- * activate
method will bypass the granulate effect
- * so that the Bypass object outputs the audio that is coming into it. Calling the
- * deactivate
method will route the audio through the wrapped effect. The
- * isActive
method indicates whether or not the wrapped effect is currently
- * being bypassed.
- * millis
from the beginning.
- * If this was previously set to loop, looping will be disabled.
- *
- * @param millis
- * int: where to start playing the file, in milliseconds
- *
- * @related FilePlayer
- */
- public void play(int millis)
- {
- cue(millis);
- play();
- }
-
- /**
- * Pauses playback.
- *
- * @example Synthesis/filePlayerExample
- *
- * @related FilePlayer
- */
- public void pause()
- {
- mFileStream.pause();
- isPaused = true;
- }
-
- /**
- * Rewinds to the beginning. This does not stop playback.
- *
- * @related FilePlayer
- */
- public void rewind()
- {
- cue(0);
- }
-
- /**
- * Sets looping to continuous. If this is already playing, the position
- * will not be reset to the beginning. If this is not playing,
- * it will start playing.
- *
- * @shortdesc Start looping playback of the file.
- *
- * @example Synthesis/filePlayerExample
- *
- * @related loopCount ( )
- * @related setLoopPoints ( )
- * @related isLooping ( )
- * @related FilePlayer
- */
- public void loop()
- {
- loop(Minim.LOOP_CONTINUOUSLY);
- }
-
- /**
- * Sets this to loop loopCount
times.
- * If this is already playing,
- * the position will not be reset to the beginning.
- * If this is not playing, it will start playing.
- *
- * @shortdesc Sets this to loop loopCount
times.
- *
- * @param loopCount
- * int: the number of times to loop
- *
- * @related loopCount ( )
- * @related setLoopPoints ( )
- * @related isLooping ( )
- * @related FilePlayer
- */
- public void loop(int loopCount)
- {
- if ( isPaused )
- {
- int pos = mFileStream.getMillisecondPosition();
- mFileStream.loop( loopCount );
- cue( pos );
- }
- else
- {
- mFileStream.loop(loopCount);
- }
-
- isPaused = false;
- }
-
- /**
- * Returns the number of loops left to do.
- *
- * @return int: the number of loops left
- *
- * @related loop ( )
- * @related FilePlayer
- */
- public int loopCount()
- {
- return mFileStream.getLoopCount();
- }
-
- /**
- * Returns the length of the sound in milliseconds. If for any reason the
- * length could not be determined, this will return -1. However, an unknown
- * length should not impact playback.
- *
- * @shortdesc Returns the length of the sound in milliseconds.
- *
- * @return int: the length of the sound in milliseconds
- *
- * @related FilePlayer
- */
- public int length()
- {
- return mFileStream.getMillisecondLength();
- }
-
- /**
- * Returns the current position of the "playhead" (ie how much of
- * the sound has already been played)
- *
- * @return int: the current position of the "playhead", in milliseconds
- *
- * @related FilePlayer
- */
- public int position()
- {
- return mFileStream.getMillisecondPosition();
- }
-
- /**
- * Sets the position to millis
milliseconds from
- * the beginning. This will not change the play state. If an error
- * occurs while trying to cue, the position will not change.
- * If you try to cue to a negative position or try to a position
- * that is greater than length()
, the amount will be clamped
- * to zero or length()
.
- *
- * @shortdesc Sets the position to millis
milliseconds from
- * the beginning.
- *
- * @param millis int: the position to place the "playhead", in milliseconds
- *
- * @related FilePlayer
- */
- public void cue(int millis)
- {
- if (millis < 0)
- {
- millis = 0;
- }
- else if (millis > length())
- {
- millis = length();
- }
- mFileStream.setMillisecondPosition(millis);
- // change the position in the stream invalidates our buffer, so we read a new buffer
- fillBuffer();
- }
-
- /**
- * Skips millis
from the current position. millis
- * can be negative, which will make this skip backwards. If the skip amount
- * would result in a negative position or a position that is greater than
- * length()
, the new position will be clamped to zero or
- * length()
.
- *
- * @shortdesc Skips millis
from the current position.
- *
- * @param millis
- * int: how many milliseconds to skip, sign indicates direction
- *
- * @related FilePlayer
- */
- public void skip(int millis)
- {
- int pos = position() + millis;
- if (pos < 0)
- {
- pos = 0;
- }
- else if (pos > length())
- {
- pos = length();
- }
- //Minim.debug("AudioPlayer.skip: skipping " + millis + " milliseconds, new position is " + pos);
- cue( pos );
- }
-
- /**
- * Returns true if this is currently playing and has more than one loop
- * left to play.
- *
- * @return boolean: true if this is looping
- *
- * @related loop ( )
- * @related FilePlayer
- */
- public boolean isLooping()
- {
- return mFileStream.getLoopCount() != 0;
- }
-
- /**
- * Returns true if this currently playing.
- *
- * @return boolean: the current play state
- *
- * @example Synthesis/filePlayerExample
- *
- * @related play ( )
- * @related pause ( )
- * @related FilePlayer
- */
- public boolean isPlaying()
- {
- return mFileStream.isPlaying();
- }
-
- /**
- * Returns the meta data for the recording being played by this player.
- *
- * @return
- * AudioMetaData: the meta data for this player's recording
- *
- * @related AudioMetaData
- * @related FilePlayer
- */
- public AudioMetaData getMetaData()
- {
- return mFileStream.getMetaData();
- }
-
- /**
- * Sets the loop points used when looping.
- *
- * @param start
- * int: the start of the loop in milliseconds
- * @param stop
- * int: the end of the loop in milliseconds
- *
- * @related loop ( )
- * @related FilePlayer
- */
- public void setLoopPoints(int start, int stop)
- {
- mFileStream.setLoopPoints(start, stop);
- }
-
- /**
- * Calling close will close the AudioRecordingStream that this wraps,
- * which is proper cleanup for using the stream.
- *
- * @related FilePlayer
- */
- public void close()
- {
- mFileStream.close();
- }
-
- private void fillBuffer()
- {
- mFileStream.read(buffer);
- bufferOutIndex = 0;
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- if ( mFileStream.isPlaying() )
- {
- // special case: mono expands out to all channels.
- if ( buffer.getChannelCount() == 1 )
- {
- Arrays.fill( channels, buffer.getSample( 0, bufferOutIndex ) );
- }
- // we have more than one channel, don't try to fill larger channel requests
- if ( buffer.getChannelCount() <= channels.length )
- {
- for(int i = 0 ; i < channels.length; ++i)
- {
- channels[i] = buffer.getSample( i, bufferOutIndex );
- }
- }
- // special case: we are stereo, output is mono.
- else if ( channels.length == 1 && buffer.getChannelCount() == 2 )
- {
- channels[0] = (buffer.getSample( 0, bufferOutIndex ) + buffer.getSample( 1, bufferOutIndex ))/2.0f;
- }
-
- ++bufferOutIndex;
- if ( bufferOutIndex == buffer.getBufferSize() )
- {
- fillBuffer();
- }
- }
- else
- {
- Arrays.fill( channels, 0 );
- }
- }
-
-}
diff --git a/src/ddf/minim/ugens/Flanger.java b/src/ddf/minim/ugens/Flanger.java
deleted file mode 100644
index bfd9827..0000000
--- a/src/ddf/minim/ugens/Flanger.java
+++ /dev/null
@@ -1,243 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- * A Flanger is a specialized kind of delay that uses an LFO (low frequency
- * oscillator) to vary the amount of delay applied to each sample. This causes a
- * sweeping frequency kind of sound as the signal reinforces or cancels itself
- * in various ways. In particular the peaks and notches created in the frequency
- * spectrum are related to each other in a linear harmonic series. This causes
- * the spectrum to look like a comb.
- *
- *
- * Frequency
is a class that represents an audio frequency.
- * Audio frequencies are generally expressed in Hertz, but Frequency
- * allows you to think in terms of other representations, such as note name.
- *
- * This class is generally used by an Oscil
UGen, but
- * can also be used to convert different notations of frequencies
- * such as Hz, MIDI note number, and a pitch name (English or solfege).
- *
- * @example Synthesis/frequencyExample
- *
- * @author Anderson Mills
- *
- */
-public class Frequency
-{
- static float HZA4=440.0f;
- static float MIDIA4=69.0f;
- static float MIDIOCTAVE=12.0f;
-
- // A TreeMap is used to force order so that later when creating the regex for
- // the note names, an ordered list can be used.
- private static TreeMap< String, Integer > noteNameOffsets = initializeNoteNameOffsets();
- private static TreeMap< String, Integer > initializeNoteNameOffsets()
- {
- TreeMap< String, Integer > initNNO = new TreeMap< String, Integer >();
- initNNO.put( "A", new Integer( 9 ) );
- initNNO.put( "B", new Integer( 11 ) );
- initNNO.put( "C", new Integer( 0 ) );
- initNNO.put( "D", new Integer( 2 ) );
- initNNO.put( "E", new Integer( 4 ) );
- initNNO.put( "F", new Integer( 5 ) );
- initNNO.put( "G", new Integer( 7 ) );
- initNNO.put( "La", new Integer( 9 ) );
- initNNO.put( "Si", new Integer( 11 ) );
- //initNNO.put( "Ti", new Integer( 11 ) );
- initNNO.put( "Do", new Integer( 0 ) );
- //initNNO.put( "Ut", new Integer( 0 ) );
- initNNO.put( "Re", new Integer( 2 ) );
- initNNO.put( "Mi", new Integer( 4 ) );
- initNNO.put( "Fa", new Integer( 5 ) );
- initNNO.put( "Sol", new Integer( 7 ) );
- return initNNO;
- }
-
- // several regex expression are used in determining the Frequency of musical pitches
- // want to build up the regex from components of noteName, noteNaturalness, and noteOctave
- private static String noteNameRegex = initializeNoteNameRegex();
- private static String initializeNoteNameRegex()
- {
- // noteName is built using the keys from the noteNameOffsets hashmap
- // The reverserList is a bit ridiculous, but necessary to reverse the
- // order of the the keys so that Do and Fa come before D and F.
- // (There is no .previous() method for a regular Iterator.)
- ArrayList< String > reverserList = new ArrayList< String >();
- Iterator< String > iterator = noteNameOffsets.keySet().iterator();
- while( iterator.hasNext() )
- {
- reverserList.add( iterator.next() );
- }
- // so that Do comes before D and is found first.
- String nNR = "(";
- ListIterator< String > listIterator = reverserList.listIterator( reverserList.size() );
- while( listIterator.hasPrevious() )
- {
- nNR += listIterator.previous() + "|";
- }
- // remove last | or empty string is included
- nNR = nNR.substring( 0, nNR.length() - 1 );
- nNR += ")";
- return nNR;
- }
-
- private static String noteNaturalnessRegex = "[#b]";
- private static String noteOctaveRegex = "(-1|10|[0-9])";
- private static String pitchRegex = "^" + noteNameRegex
- + "?[ ]*" + noteNaturalnessRegex + "*[ ]*" + noteOctaveRegex +"?$";
-
- private float freq;
-
- // The constructors are way down here.
- private Frequency( float hz )
- {
- freq = hz;
- }
-
- // ddf: this one isn't being used, apparently
-// private Frequency( String pitchName )
-// {
-// freq = Frequency.ofPitch( pitchName ).asHz();
-// }
-
- /**
- * Get the value of this Frequency in Hertz.
- *
- * @return float: this Frequency expressed in Hertz
- *
- * @example Synthesis/frequencyExample
- *
- * @related setAsHz ( )
- * @related Frequency
- *
- */
- public float asHz()
- {
- return freq;
- }
-
- /**
- * Set this Frequency to be equal to the provided Hertz value.
- *
- * @param hz
- * float: the new value for this Frequency in Hertz
- *
- * @related asHz ( )
- * @related Frequency
- */
- public void setAsHz( float hz )
- {
- freq = hz;
- }
-
- /**
- * Get the MIDI note value of this Frequency
- *
- * @return float: the MIDI note representation of this Frequency
- *
- * @example Synthesis/frequencyExample
- *
- * @related Frequency
- *
- */
- public float asMidiNote()
- {
- float midiNote = MIDIA4 + MIDIOCTAVE*(float)Math.log( freq/HZA4 )/(float)Math.log( 2.0 );
- return midiNote;
- }
-
- /**
- * Construct a Frequency that represents the provided Hertz.
- *
- * @param hz
- * float: the Hz for this Frequency (440 is A4, for instance)
- *
- * @return a new Frequency object
- *
- * @example Synthesis/frequencyExample
- *
- * @related Frequency
- */
- public static Frequency ofHertz(float hz)
- {
- return new Frequency(hz);
- }
-
- /**
- * Construct a Frequency from a MIDI note value.
- *
- * @param midiNote
- * float: a value in the range [0,127]
- *
- * @return a new Frequency object
- *
- * @example Synthesis/frequencyExample
- *
- * @related Frequency
- *
- */
- public static Frequency ofMidiNote( float midiNote )
- {
- float hz = HZA4*(float)Math.pow( 2.0, ( midiNote - MIDIA4 )/MIDIOCTAVE );
- return new Frequency(hz);
- }
-
- /**
- * Construct a Frequency from a pitch name, such as A4 or Bb2.
- *
- * @param pitchName
- * String: the name of the pitch to convert to a Frequency.
- *
- * @return a new Frequency object
- *
- * @example Synthesis/frequencyExample
- *
- * @related Frequency
- */
- public static Frequency ofPitch(String pitchName)
- {
- // builds up the value of a midiNote used to create the returned Frequency
- float midiNote;
-
- // trim off any white space before or after
- pitchName = pitchName.trim();
-
- // check to see if this is a note
- if ( pitchName.matches( pitchRegex ) )
- {
- Minim.debug(pitchName + " matches the pitchRegex.");
- float noteOctave;
-
- // get octave
- Pattern pattern = Pattern.compile( noteOctaveRegex );
- Matcher matcher = pattern.matcher( pitchName );
-
- if ( matcher.find() )
- {
- String octaveString = pitchName.substring( matcher.start(), matcher.end() );
- noteOctave = Float.valueOf( octaveString.trim() ).floatValue();
- } else // default octave of 4
- {
- noteOctave = 4.0f;
- }
- midiNote = noteOctave*12.0f + 12.0f;
- Minim.debug("midiNote based on octave = " + midiNote );
-
- // get naturalness
- pattern = Pattern.compile( noteNaturalnessRegex );
- matcher = pattern.matcher( pitchName );
-
- while( matcher.find() )
- {
- String naturalnessString = pitchName.substring(matcher.start(), matcher.end() );
- if ( naturalnessString.equals("#") )
- {
- midiNote += 1.0f;
- } else // must be a "b"
- {
- midiNote -= 1.0f;
- }
- }
- Minim.debug("midiNote based on naturalness = " + midiNote );
-
- // get note
- pattern = Pattern.compile( noteNameRegex );
- matcher = pattern.matcher( pitchName );
-
- if ( matcher.find() )
- {
- String noteNameString = pitchName.substring(matcher.start(), matcher.end() );
- float noteOffset = (float) noteNameOffsets.get( noteNameString );
- midiNote += noteOffset;
- }
- Minim.debug("midiNote based on noteName = " + midiNote );
-
- // return a Frequency object with this midiNote
- return new Frequency( ofMidiNote( midiNote ).asHz() );
-
- } else // string does not conform to note name syntax
- {
- Minim.debug(pitchName + " DOES NOT MATCH.");
- // return a Frequency object of 0.0 Hz.
- return new Frequency( 0.0f );
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Gain.java b/src/ddf/minim/ugens/Gain.java
deleted file mode 100644
index b5527be..0000000
--- a/src/ddf/minim/ugens/Gain.java
+++ /dev/null
@@ -1,96 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- * Gain is another way of expressing an increase or decrease in the volume of something.
- * It is represented in decibels (dB), which is a logorithmic scale. A gain of 0 dB means
- * that you are not changing the volume of the incoming signal at all, positive gain boosts
- * the signal and negative gain decreases it. You can effectively silence
- * the incoming signal by setting the gain to something like -60.
- *
- * @example Synthesis/gainExample
- *
- * @author Damien Di Fede
- *
- */
-
-public class Gain extends UGen
-{
- /**
- * The audio input is where incoming signals should be patched, however you do not need
- * to patch directly to this input because patching to the Gain itself will accomplish
- * the same thing.
- *
- * @related Gain
- */
- public UGenInput audio;
-
- /**
- * The gain input controls the value of this Gain. It will be interpreted as being in dB.
- * 0 dB means that the incoming signal will not be changed, positive dB increases the
- * amplitude of the signal, and negative dB decreases it. You can effectively silence
- * the incoming signal by setting the gain to something like -60.
- *
- * @related Gain
- */
- public UGenInput gain;
-
- private float mValue;
-
- /**
- * Construct a Gain UGen with a value of 0 dB, which means
- * it will not change the volume of something patched to it.
- */
- public Gain()
- {
- this(0.f);
- }
-
- /**
- * Construct a Gain with the specific dBvalue. 0 dB is no change
- * to incoming audio, positive values make it louder and negative values
- * make it softer.
- *
- * @param dBvalue
- * float: the amount of gain to apply to the incoming signal
- */
- public Gain( float dBvalue )
- {
- // linear = pow ( 10.0, (0.05 * dBvalue) );
- mValue = (float)Math.pow(10.0, (0.05 * dBvalue));
-
- audio = new UGenInput(InputType.AUDIO);
- gain = new UGenInput(InputType.CONTROL);
- }
-
- /**
- * Set the value of this Gain to a given dB value.
- *
- * @param dBvalue
- * float: the new value for this Gain, in decibels.
- *
- * @example Synthesis/gainExample
- *
- * @related Gain
- */
- public void setValue( float dBvalue )
- {
- mValue = (float)Math.pow(10.0, (0.05 * dBvalue));
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- // TODO: not fond of the fact that we cast up to doubles for this math function.
- if ( gain.isPatched() )
- {
- mValue = (float)Math.pow(10.0, (0.05 * gain.getLastValue()));
- }
-
- for(int i = 0; i < channels.length; ++i)
- {
- channels[i] = mValue * audio.getLastValues()[i];
- }
- }
-}
diff --git a/src/ddf/minim/ugens/GranulateRandom.java b/src/ddf/minim/ugens/GranulateRandom.java
deleted file mode 100644
index 2141578..0000000
--- a/src/ddf/minim/ugens/GranulateRandom.java
+++ /dev/null
@@ -1,335 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-
-/**
- * GranulateRandom is randomly varying version of GranulateSteady.
- * Rather than have fixed values for grain length, space length,
- * and fade length, it has min and max values for each so that each
- * grain is different from the last.
- *
- * @example Synthesis/granulateRandomExample
- *
- * @related GranulateSteady
- * @related UGen
- *
- * @author Anderson Mills
- *
- */
-public class GranulateRandom extends UGen
-{
- /**
- * The default input is "audio."
- *
- * @related GranulateRandom
- */
- public UGenInput audio;
-
- /**
- * Controls the minimum length of each grain.
- *
- * @related GranulateRandom
- */
- public UGenInput grainLenMin;
-
- /**
- * Controls the minimum space between each grain.
- *
- * @related GranulateRandom
- */
- public UGenInput spaceLenMin;
-
- /**
- * Controls the minimum length of the fade in and fade out.
- *
- * @related GranulateRandom
- */
- public UGenInput fadeLenMin;
-
- /**
- * Controls the maximum length of each grain.
- *
- * @related GranulateRandom
- */
- public UGenInput grainLenMax;
-
- /**
- * Controls the maximum space between each grain.
- *
- * @related GranulateRandom
- */
- public UGenInput spaceLenMax;
-
- /**
- * Controls the maximum length of the fade in and fade out.
- *
- * @related GranulateRandom
- */
- public UGenInput fadeLenMax;
-
- // variables to determine the current placement WRT a grain
- private boolean insideGrain;
- private float timeSinceGrainStart;
- private float timeSinceGrainStop;
- private float timeStep;
-
- // variables to keep track of the grain value ranges
- private float fadeLength = 0.0025f;
- private float grainLength = 0.010f;
- private float spaceLength = 0.020f;
- private float fadeLengthMin = 0.0025f;
- private float grainLengthMin = 0.010f;
- private float spaceLengthMin = 0.020f;
- private float fadeLengthMax = 0.0025f;
- private float grainLengthMax = 0.010f;
- private float spaceLengthMax = 0.020f;
- private float minAmp = 0.0f;
- private float maxAmp = 1.0f;
-
- /**
- * Constructor for GranulateRandom.
- * grainLengthMin, minimum grain length of each grain, defaults to 10 msec.
- * spaceLengthMin, minimum space between each grain, defaults to 20 msec.
- * fadeLengthMin, minimum length of the linear fade in and fade out of the i
- * grain envelope, defaults to 2.5 msec.
- * grainLengthMax, maximum grain length of each grain, defaults to 100 msec.
- * spaceLengthMax, maximum space between each grain, defaults to 200 msec.
- * fadeLengthMax, maximum length of the linear fade in and fade out of the
- * grain envelope, defaults to 25 msec.
- *
- * minAmp, minimum amplitude of the envelope, defaults to 0.
- * maxAmp, maximum amplitude of the envelope, defaults to 1.
- */
- public GranulateRandom()
- {
- this( 0.010f, 0.020f, 0.0025f, 0.10f, 0.20f, 0.025f, 0.0f, 1.0f );
- }
- /**
- * Constructor for GranulateRandom.
- * minAmp, minimum amplitude of the envelope, defaults to 0.
- * maxAmp, maximum amplitude of the envelope, defaults to 1.
- *
- * @param grainLengthMin
- * float: minimum grain length of each grain in seconds
- * @param spaceLengthMin
- * float: minimum space between each grain in seconds
- * @param fadeLengthMin
- * float: minimum length of the linear fade in and fade out of the grain envelope in seconds
- * @param grainLengthMax
- * float: maximum grain length of each grain in seconds
- * @param spaceLengthMax
- * float: maximum space between each grain in seconds
- * @param fadeLengthMax
- * float: maximum length of the linear fade in and fade out of the grain envelope in seconds
- */
- public GranulateRandom(float grainLengthMin, float spaceLengthMin, float fadeLengthMin,
- float grainLengthMax, float spaceLengthMax, float fadeLengthMax )
- {
- this( grainLengthMin, spaceLengthMin, fadeLengthMin,
- grainLengthMax, spaceLengthMax, fadeLengthMax, 0.0f, 1.0f );
- }
- /**
- * Constructor for GranulateRandom
- *
- * @param grainLengthMin
- * float: minimum grain length of each grain in seconds
- * @param spaceLengthMin
- * float: minimum space between each grain in seconds
- * @param fadeLengthMin
- * float: minimum length of the linear fade in and fade out of the grain envelope in seconds
- * @param grainLengthMax
- * float: maximum grain length of each grain in seconds
- * @param spaceLengthMax
- * float: maximum space between each grain in seconds
- * @param fadeLengthMax
- * float: maximum length of the linear fade in and fade out of the grain envelope in seconds
- * @param minAmp
- * float: minimum amplitude of the envelope
- * @param maxAmp
- * float: maximum amplitude of the envelope
- */
- public GranulateRandom(float grainLengthMin, float spaceLengthMin, float fadeLengthMin,
- float grainLengthMax, float spaceLengthMax, float fadeLengthMax,
- float minAmp, float maxAmp)
- {
- super();
- // jam3: These can't be instantiated until the uGenInputs ArrayList
- // in the super UGen has been constructed
- audio = new UGenInput(InputType.AUDIO);
- grainLenMin = new UGenInput( InputType.CONTROL );
- spaceLenMin = new UGenInput( InputType.CONTROL );
- fadeLenMin = new UGenInput( InputType.CONTROL );
- grainLenMax = new UGenInput( InputType.CONTROL );
- spaceLenMax = new UGenInput( InputType.CONTROL );
- fadeLenMax = new UGenInput( InputType.CONTROL );
-
- setAllParameters( grainLengthMin, spaceLengthMin, fadeLengthMin,
- grainLengthMax, spaceLengthMax, fadeLengthMax,
- minAmp, maxAmp );
-
- insideGrain = false;
- timeSinceGrainStart = 0.0f;
- timeSinceGrainStop = 0.0f;
- timeStep = 0.0f;
- }
-
- /**
- * Use this method to notify GranulateRandom that the sample rate has changed.
- */
- protected void sampleRateChanged()
- {
- timeStep = 1.0f/sampleRate();
- }
-
- /**
- * Immediately sets all public class members concerning time to new values.
- *
- * @param grainLengthMin
- * float: minimum grain length of each grain in seconds
- * @param spaceLengthMin
- * float: minimum space between each grain in seconds
- * @param fadeLengthMin
- * float: minimum length of the linear fade in and fade out of the grain envelope in seconds
- * @param grainLengthMax
- * float: maximum grain length of each grain in seconds
- * @param spaceLengthMax
- * float: maximum space between each grain in seconds
- * @param fadeLengthMax
- * float: maximum length of the linear fade in and fade out of the grain envelope in seconds
- *
- * @related GranulateRandom
- */
- public void setAllTimeParameters(float grainLengthMin, float spaceLengthMin, float fadeLengthMin,
- float grainLengthMax, float spaceLengthMax, float fadeLengthMax)
- {
- setAllParameters(grainLengthMin, spaceLengthMin, fadeLengthMin, grainLengthMax, spaceLengthMax, fadeLengthMax, minAmp, maxAmp);
- }
-
- /**
- * Immediately sets all public class members to new values.
- *
- * @param grainLengthMin
- * float: minimum grain length of each grain in seconds
- * @param spaceLengthMin
- * float: minimum space between each grain in seconds
- * @param fadeLengthMin
- * float: minimum length of the linear fade in and fade out of the grain envelope in seconds
- * @param grainLengthMax
- * float: maximum grain length of each grain in seconds
- * @param spaceLengthMax
- * float: maximum space between each grain in seconds
- * @param fadeLengthMax
- * float: maximum length of the linear fade in and fade out of the grain envelope in seconds
- * @param minAmp
- * float: minimum amplitude of the envelope
- * @param maxAmp
- * float: maximum amplitude of the envelope
- *
- * @related GranulateRandom
- */
- public void setAllParameters(float grainLengthMin, float spaceLengthMin, float fadeLengthMin,
- float grainLengthMax, float spaceLengthMax, float fadeLengthMax,
- float minAmp, float maxAmp)
- {
- grainLenMin.setLastValue(grainLengthMin);
- grainLenMax.setLastValue(grainLengthMax);
- fadeLenMin.setLastValue(fadeLengthMin);
- fadeLenMax.setLastValue(fadeLengthMax);
- spaceLenMin.setLastValue(spaceLengthMin);
- spaceLenMax.setLastValue(spaceLengthMax);
-
- this.grainLengthMin = grainLengthMin;
- this.spaceLengthMin = spaceLengthMin;
- this.fadeLengthMin = fadeLengthMin;
- this.grainLengthMax = grainLengthMax;
- this.spaceLengthMax = spaceLengthMax;
- this.fadeLengthMax = fadeLengthMax;
-
- this.minAmp = minAmp;
- this.maxAmp = maxAmp;
- }
-
- // This makes sure that fadeLength isn't more than half the grainLength
- private void checkFadeLength()
- {
- fadeLength = Math.min( fadeLength, grainLength/2.0f );
- }
-
- // This is just a helper function to generate a random number between two others.
- // TODO place randomBetween somewhere more generic and useful.
- private float randomBetween( float min, float max )
- {
- return (max - min)*(float)Math.random() + min;
- }
-
- // Make the samples. Must make the samples
- @Override
- protected void uGenerate( float[] channels )
- {
- if ( insideGrain ) // inside a grain
- {
- // start with an amplitude at maxAmp
- float amp = maxAmp;
- if ( timeSinceGrainStart < fadeLength ) // inside the rise
- {
- // linear fade in
- amp *= timeSinceGrainStart/fadeLength;
- }
- else if ( timeSinceGrainStart > ( grainLength - fadeLength ) ) // inside the decay
- {
- // linear fade out
- amp *= ( grainLength - timeSinceGrainStart )/fadeLength;
- }
-
- // generate the sample
- for(int i = 0; i < channels.length; i++)
- {
- channels[i] = amp*audio.getLastValues()[i];
- }
-
- // increment time
- timeSinceGrainStart += timeStep;
-
- if ( timeSinceGrainStart > grainLength ) // just after a grain
- {
- // stop the grain
- timeSinceGrainStop = 0.0f;
- insideGrain = false;
- // set a new spaceLength
- spaceLengthMin = spaceLenMin.getLastValue();
- spaceLengthMax = spaceLenMax.getLastValue();
- spaceLength = randomBetween( spaceLengthMin, spaceLengthMax );
- }
- }
- else // outside a grain
- {
- for(int i = 0; i < channels.length; i++)
- {
- channels[i] = minAmp;
- }
-
- // increment time
- timeSinceGrainStop += timeStep;
-
- if (timeSinceGrainStop > spaceLength) // just inside a grain again
- {
- // start the grain
- timeSinceGrainStart = 0.0f;
- insideGrain = true;
- // set a new grain length
- grainLengthMin = grainLenMin.getLastValue();
- grainLengthMax = grainLenMax.getLastValue();
- grainLength = randomBetween( grainLengthMin, grainLengthMax );
-
- // set a new fade length
- fadeLengthMin = fadeLenMin.getLastValue();
- fadeLengthMax = fadeLenMax.getLastValue();
- fadeLength = randomBetween( fadeLengthMin, fadeLengthMax );
-
- // make sure the fade length is correct
- checkFadeLength();
- }
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/GranulateSteady.java b/src/ddf/minim/ugens/GranulateSteady.java
deleted file mode 100644
index a94f77b..0000000
--- a/src/ddf/minim/ugens/GranulateSteady.java
+++ /dev/null
@@ -1,253 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-
-/**
- * A UGen which chops the incoming audio into steady grains
- * of sound. The envelope of these sounds has a linear fade
- * in and fade out.
- *
- * @example Synthesis/granulateSteadyExample
- *
- * @related UGen
- * @related GranulateRandom
- *
- * @author Anderson Mills
- *
- */
-public class GranulateSteady extends UGen
-{
- /**
- * The default input is "audio."
- *
- * @related GranulateSteady
- */
- public UGenInput audio;
-
- /**
- * Controls the length of each grain.
- *
- * @related GranulateSteady
- */
- public UGenInput grainLen;
-
- /**
- * Controls the space between each grain.
- *
- * @related GranulateSteady
- */
- public UGenInput spaceLen;
-
- /**
- * Controls the length of the fade in and fade out.
- *
- * @related GranulateSteady
- */
- public UGenInput fadeLen;
-
- // variables to determine the current placement WRT a grain
- private boolean insideGrain;
- private float timeSinceGrainStart;
- private float timeSinceGrainStop;
- private float timeStep;
-
- // variables to keep track of the grain values
- // these are only set when appropriate for the algorithm
- // the user-manipulated values are held by the inputs
- private float grainLength = 0.010f;
- private float spaceLength = 0.020f;
- private float fadeLength = 0.0025f;
- private float minAmp = 0.0f;
- private float maxAmp = 1.0f;
-
- /**
- * Constructor for GranulateSteady.
- * grainLength, length of each grain, defaults to 10 msec.
- * spaceLength, space between each grain, defaults to 20 msec.
- * fadeLength, length of the linear fade in and fade out of the grain envelope, defaults to 2.5 msec.
- * minAmp, minimum amplitude of the envelope, defaults to 0.
- * maxAmp, maximum amplitude of the envelope, defaults to 1.
- */
- public GranulateSteady()
- {
- this( 0.01f, 0.02f, 0.0025f, 0.0f, 1.0f );
- }
- /**
- * Constructor for GranulateSteady.
- * minAmp, minimum amplitude of the envelope, defaults to 0.
- * maxAmp, maximum amplitude of the envelope, defaults to 1.
- *
- * @param grainLength
- * float: length of each grain in seconds
- * @param spaceLength
- * float: space between each grain in seconds
- * @param fadeLength
- * float: length of the linear fade in and fade out of the grain envelope in seconds
- */
- public GranulateSteady( float grainLength, float spaceLength, float fadeLength )
- {
- this( grainLength, spaceLength, fadeLength, 0.0f, 1.0f );
- }
- /**
- * Constructor for GranulateSteady.
- * @param grainLength
- * float: length of each grain in seconds
- * @param spaceLength
- * float: space between each grain in seconds
- * @param fadeLength
- * float: length of the linear fade in and fade out of the grain envelope in seconds
- * @param minAmp
- * float: minimum amplitude of the envelope
- * @param maxAmp
- * float: maximum amplitude of the envelope
- */
- public GranulateSteady( float grainLength, float spaceLength, float fadeLength, float minAmp, float maxAmp )
- {
- super();
- // jam3: These can't be instantiated until the uGenInputs ArrayList
- // in the super UGen has been constructed
- audio = new UGenInput(InputType.AUDIO);
- grainLen = new UGenInput( InputType.CONTROL );
- spaceLen = new UGenInput( InputType.CONTROL );
- fadeLen = new UGenInput( InputType.CONTROL );
- //amplitude = new UGenInput(InputType.CONTROL);
- setAllParameters( grainLength, spaceLength, fadeLength, minAmp, maxAmp );
- insideGrain = true;
- timeSinceGrainStart = 0.0f;
- timeSinceGrainStop = 0.0f;
- timeStep = 0.0f;
- }
-
- /**
- * Use this method to notify GranulateSteady that the sample rate has changed.
- */
- protected void sampleRateChanged()
- {
- timeStep = 1.0f/sampleRate();
- }
-
- /**
- * Immediately sets all public class members concerning time to new values.
- * @param grainLength
- * float: grain length of each grain in seconds
- * @param spaceLength
- * float: space between each grain in seconds
- * @param fadeLength
- * float: length of the linear fade in and fade out of the grain envelope in seconds
- *
- * @related GranulateSteady
- */
- public void setAllTimeParameters( float grainLength, float spaceLength, float fadeLength )
- {
- setAllParameters( grainLength, spaceLength, fadeLength, minAmp, maxAmp );
- }
-
- /**
- * Immediately sets all public class members to new values.
- *
- * @param grainLength
- * float: grain length of each grain in seconds
- * @param spaceLength
- * float: space between each grain in seconds
- * @param fadeLength
- * float: length of the linear fade in and fade out of the grain envelope in seconds
- * @param minAmp
- * float: minimum amplitude of the envelope
- * @param maxAmp
- * float: maximum amplitude of the envelope
- *
- * @related GranulateSteady
- */
- public void setAllParameters( float grainLength, float spaceLength, float fadeLength,
- float minAmp, float maxAmp)
- {
- grainLen.setLastValue(grainLength);
- spaceLen.setLastValue(spaceLength);
- fadeLen.setLastValue(fadeLength);
- this.grainLength = grainLength;
- this.spaceLength = spaceLength;
- this.fadeLength = fadeLength;
- this.minAmp = minAmp;
- this.maxAmp = maxAmp;
- }
-
- /**
- * Sets the state of this granulate to the very start of a grain.
- * Useful for syncing the granulate timing with other audio.
- *
- * @related GranulateSteady
- */
- public void reset()
- {
- // start the grain
- timeSinceGrainStart = 0.0f;
- insideGrain = true;
- // only set the grain values at the beginning of a grain
- grainLength = grainLen.getLastValue();
- checkFadeLength();
- fadeLength = fadeLen.getLastValue();
- checkFadeLength();
- }
-
- // This makes sure that fadeLength isn't more than half the grainLength
- private void checkFadeLength()
- {
- fadeLength = Math.min( fadeLength, grainLength/2.0f );
- }
-
- // Make those samples!
- @Override
- protected void uGenerate( float[] channels )
- {
- if ( insideGrain ) // inside a grain
- {
- // start with an amplitude at maxAmp
- float amp = maxAmp;
- if ( timeSinceGrainStart < fadeLength ) // inside the rise of the envelope
- {
- // linear fade in
- amp *= timeSinceGrainStart/fadeLength;
- }
- else if ( timeSinceGrainStart > ( grainLength - fadeLength ) ) // inside the decay of the envelope
- {
- // linear fade out
- amp *= ( grainLength - timeSinceGrainStart )/fadeLength;
- }
-
- // generate the sample
- for( int i = 0; i < channels.length; i++ )
- {
- channels[i] = amp*audio.getLastValues()[i];
- }
-
- // increment time
- timeSinceGrainStart += timeStep;
-
- if ( timeSinceGrainStart > grainLength ) // just after the grain
- {
- // stop the grain
- timeSinceGrainStop = 0.0f;
- insideGrain = false;
- // only set space volues at the beginning of a space
- spaceLength = spaceLen.getLastValue();
- }
- }
- else // outside of a grain
- {
- // generate the samples
- for( int i = 0; i < channels.length; i++ )
- {
- channels[i] = minAmp;
- }
-
- // increment time
- timeSinceGrainStop += timeStep;
-
- if ( timeSinceGrainStop > spaceLength ) // just inside a grain again
- {
- reset();
- }
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Instrument.java b/src/ddf/minim/ugens/Instrument.java
deleted file mode 100644
index e316b6f..0000000
--- a/src/ddf/minim/ugens/Instrument.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package ddf.minim.ugens;
-
-/**
- * The Instrument interface is expected by AudioOutput.playNote. You can create
- * your own instruments by implementing this interface in one of your classes.
- * Typically, you will create a class that constructs a UGen chain: an Oscil
- * patched to a filter patched to an ADSR. When noteOn is called you will patch
- * the end of your chain to the AudioOutput you are using and when noteOff is
- * called you will unpatch.
- *
- * @example Basics/CreateAnInstrument
- *
- * @author Damien Di Fede
- *
- */
-public interface Instrument
-{
- /**
- * Start playing a note.
- * This is called by AudioOutput when this Instrument's
- * note should begin, based on the values passed to playNote.
- * Typically you will patch your UGen chain to your AudioOutput here.
- *
- * @shortdesc Start playing a note.
- *
- * @param duration
- * float: how long the note will last
- * (i.e. noteOff will be called after this many seconds)
- *
- * @example Basics/CreateAnInstrument
- *
- * @related Instrument
- * @related noteOff ( )
- */
- void noteOn(float duration);
-
- /**
- * Stop playing a note.
- * This is called by AudioOuput when this Instrument's
- * note should end, based on the values passed to playNote.
- * Typically you will unpatch your UGen chain from your AudioOutput here.
- *
- * @shortdesc Stop playing a note.
- *
- * @example Basics/CreateAnInstrument
- *
- * @related Instrument
- * @related noteOn ( )
- */
- void noteOff();
-}
diff --git a/src/ddf/minim/ugens/Line.java b/src/ddf/minim/ugens/Line.java
deleted file mode 100644
index e3b166a..0000000
--- a/src/ddf/minim/ugens/Line.java
+++ /dev/null
@@ -1,183 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.Arrays;
-
-import ddf.minim.Minim;
-import ddf.minim.UGen;
-
-/**
- * A UGen that starts at a value and changes linearly to another value over a specified time.
- *
- * @example Synthesis/lineExample
- *
- * @author nodog
- *
- */
-public class Line extends UGen
-{
- // jam3: define the inputs to Oscil
- // the initial amplitude
- private float begAmp;
- // the ending amplitude
- private float endAmp;
- // the current amplitude
- private float amp;
- // the time from begAmp to endAmp
- private float lineTime;
- // the current size of the step
- private float timeStepSize;
- // the current time
- private float lineNow;
- // the damp has been activated
- private boolean isActivated;
-
- /**
- * Constructs a Line that starts at 1 and transitions to 0 over 1 second.
- */
- public Line()
- {
- this(1.0f, 1.0f, 0.0f);
- }
-
- /**
- * Constructs a Line that starts at 1 and transitions to 0 over dT seconds.
- *
- * @param dT
- * float: how long it should take, in seconds, to transition from the beginning value to the end value.
- */
- public Line(float dT)
- {
- this(dT, 1.0f, 0.0f);
- }
-
- /**
- * Constructs a Line that starts at beginningAmplitude and transitions to 0 over dT seconds.
- *
- * @param dT
- * float: how long it should take, in seconds, to transition from the beginning value to the end value.
- * @param beginningAmplitude
- * float: the value to begin at
- */
- public Line(float dT, float beginningAmplitude)
- {
- this(dT, beginningAmplitude, 0.0f);
- }
-
- /**
- * Constructs a Line that starts at beginningAmplitude and transitions to endAmplitude over dT seconds.
- *
- * @param dT
- * float: how long it should take, in seconds, to transition from the beginning value to the end value.
- * @param beginningAmplitude
- * float: the value to begin at
- * @param endAmplitude
- * float: the value to end at
- */
- public Line(float dT, float beginningAmplitude, float endAmplitude)
- {
- super();
- lineTime = dT;
- begAmp = beginningAmplitude;
- amp = begAmp;
- endAmp = endAmplitude;
- lineNow = 0f;
- isActivated = false;
- Minim.debug(" dampTime = " + lineTime + " begAmp = " + begAmp + " now = " + lineNow);
- }
-
- /**
- * Start the Line's transition.
- *
- */
- public void activate()
- {
- lineNow = 0f;
- amp = begAmp;
- isActivated = true;
- }
-
- /**
- * Start the Line's transition after setting all parameters for the Line.
- *
- * @param duration
- * float: how long it should take, in seconds, to transition from the beginning value to the end value.
- * @param beginAmp
- * float: the value to begin at
- * @param endingAmp
- * float: the value to end at
- */
- public void activate( float duration, float beginAmp, float endingAmp )
- {
- begAmp = beginAmp;
- endAmp = endingAmp;
- lineTime = duration;
- activate();
- }
-
- /**
- * Has the Line completed its transition.
- *
- * @return
- * true if the Line has completed
- */
- public boolean isAtEnd()
- {
- return (lineNow >= lineTime);
- }
-
- /**
- * Set the ending value of the Line's transition.
- * This can be set while a Line is transitioning without causing
- * serious discontinuities in the Line's output.
- *
- * @shortdesc Set the ending value of the Line's transition.
- *
- * @param newEndAmp
- * float: the new value to end at
- */
- public void setEndAmp( float newEndAmp )
- {
- endAmp = newEndAmp;
- }
-
- /**
- * Set the length of this Line's transition.
- *
- * @param newLineTime
- * float: the new transition time (in seconds)
- */
- public void setLineTime( float newLineTime )
- {
- lineTime = newLineTime;
- }
-
- /**
- * Change the timeStepSize when sampleRate changes.
- */
- @Override
- protected void sampleRateChanged()
- {
- timeStepSize = 1/sampleRate();
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- //Minim.debug(" dampTime = " + dampTime + " begAmp = " + begAmp + " now = " + now);
- if (!isActivated)
- {
- Arrays.fill( channels, begAmp );
- }
- else if (lineNow >= lineTime)
- {
- Arrays.fill( channels, endAmp );
- }
- else
- {
- amp += ( endAmp - amp )*timeStepSize/( lineTime - lineNow );
- //Minim.debug(" dampTime = " + dampTime + " begAmp = " + begAmp + " amp = " + amp + " dampNow = " + dampNow);
- Arrays.fill( channels, amp );
- lineNow += timeStepSize;
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/LiveInput.java b/src/ddf/minim/ugens/LiveInput.java
deleted file mode 100644
index a046765..0000000
--- a/src/ddf/minim/ugens/LiveInput.java
+++ /dev/null
@@ -1,56 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-import ddf.minim.spi.AudioStream;
-
-/**
- * LiveInput is a way to wrap an input stream with the UGen interface so that you can
- * easily route incoming audio through a UGen graph. You can get an AudioStream that is
- * reading audio input from Minim by calling Minim.getInputStream.
- *
- * @example Synthesis/liveInputExample
- *
- * @author Damien Di Fede
- *
- * @related UGen
- * @related Minim
- *
- */
-
-public class LiveInput extends UGen
-{
- private AudioStream mInputStream;
-
- /**
- * Constructs a LiveInput that will read from inputStream.
- * @param inputStream
- * AudioStream: the audio stream this LiveInput will read from
- */
- public LiveInput( AudioStream inputStream )
- {
- mInputStream = inputStream;
- inputStream.open();
- }
-
- /**
- * Calling close will close the AudioStream that this wraps,
- * which is proper cleanup for using the stream.
- */
- public void close()
- {
- mInputStream.close();
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- float[] samples = mInputStream.read();
- // TODO: say the input is mono and output is stereo, what should we do?
- // should we just copy like this and have the input come in the
- // left side? Or should we somehow expand across the extra channels?
- // what about the opposite problem? stereo input to mono output?
- int length = ( samples.length >= channels.length ) ? channels.length : samples.length;
- System.arraycopy(samples, 0, channels, 0, length);
- }
-
-}
diff --git a/src/ddf/minim/ugens/Midi2Hz.java b/src/ddf/minim/ugens/Midi2Hz.java
deleted file mode 100644
index a09b4e0..0000000
--- a/src/ddf/minim/ugens/Midi2Hz.java
+++ /dev/null
@@ -1,73 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.Arrays;
-
-import ddf.minim.UGen;
-
-/**
- * Midi2Hz is a UGen that will convert a MIDI note number to a frequency in
- * Hertz. This is useful if you want to drive the frequency input of an Oscil
- * with something that generates MIDI notes.
- *
- * @example Synthesis/midiFreqKeyboardExample
- *
- * @author Anderson Mills
- *
- */
-
-public class Midi2Hz extends UGen
-{
- /**
- * Patch something to this input that generates MIDI note numbers
- * (values in the range [0,127])
- *
- * @related Midi2Hz
- * @related UGen.UGenInput
- */
- public UGenInput midiNoteIn;
-
- /**
- * Construct a Midi2Hz that generates a fixed value from MIDI note 0.
- *
- */
- public Midi2Hz()
- {
- this(0.0f);
- }
-
- /**
- * Construct a Midi2Hz that generates a fixed value from fixedMidiNoteIn.
- *
- * @param fixedMidiNoteIn
- * float: the MIDI note to convert to Hz (values in the range [0,127])
- */
- public Midi2Hz(float fixedMidiNoteIn)
- {
- super();
- // jam3: These can't be instantiated until the uGenInputs ArrayList
- // in the super UGen has been constructed
- // audio = new UGenInput(InputType.AUDIO);
- midiNoteIn = new UGenInput(InputType.CONTROL);
- midiNoteIn.setLastValue(fixedMidiNoteIn);
- }
-
- /**
- * Set the fixed value this will use if midiNoteIn is not patched.
- *
- * @param fixedMidiNoteIn
- * float: the MIDI note to convert to Hz (values in the range [0,127])
- *
- * @related midiNoteIn
- * @related Midi2Hz
- */
- public void setMidiNoteIn(float fixedMidiNoteIn)
- {
- midiNoteIn.setLastValue(fixedMidiNoteIn);
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- Arrays.fill( channels, Frequency.ofMidiNote(midiNoteIn.getLastValue()).asHz() );
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/MoogFilter.java b/src/ddf/minim/ugens/MoogFilter.java
deleted file mode 100644
index 8141e6f..0000000
--- a/src/ddf/minim/ugens/MoogFilter.java
+++ /dev/null
@@ -1,226 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.Arrays;
-
-import ddf.minim.UGen;
-
-//Moog 24 dB/oct resonant lowpass VCF
-//References: CSound source code, Stilson/Smith CCRMA paper.
-//Modified by paul.kellett@maxim.abel.co.uk July 2000
-//Java implementation by Damien Di Fede September 2010
-
-/**
- * MoogFilter is a digital model of a Moog 24 dB/octave resonant VCF.
- * It can be set to low pass, high pass, or band pass using the
- * MoogFilter.Type enumeration. More generally, a filter is used to
- * remove certain ranges of the audio spectrum from a sound.
- * A low pass filter will allow frequencies below the cutoff frequency
- * to be heard, a high pass filter allows frequencies above the cutoff
- * frequency to be heard, a band pass filter will allow frequencies
- * to either side of the center frequency to be heard. With MoogFilter,
- * the cutoff frequency and the center frequency are set using the
- * frequency
input. Because this is a resonant
- * filter, it means that frequencies close to the cutoff of center frequency
- * will become slighly emphasized, depending on the value of the
- * resonance
input. The resonance of the filter has a
- * range from 0 to 1, where as the resonance approaches 1 the filter will
- * begin to "ring" at the cutoff frequency.
- *
- * @example Synthesis/moogFilterExample
- *
- * @related UGen
- *
- * @author Damien Di Fede
- *
- */
-public class MoogFilter extends UGen
-{
- /**
- * The MoogFilter.Type enumeration is used to set
- * the filter mode of a MoogFilter. HP is high pass,
- * LP is low pass, and BP is band pass.
- *
- * @example Synthesis/moogFilterExample
- *
- * @related type
- * @related MoogFilter
- *
- * @nosuperclasses
- */
- public enum Type
- {
- /**
- * The value representing high pass.
- *
- * @related type
- */
- HP,
-
- /**
- * The value representing low pass.
- *
- * @related type
- */
- LP,
-
- /**
- * The value representing band pass.
- *
- * @related type
- */
- BP
- }
-
- /**
- * The main audio input where the the UGen
- * you want to filter should be patched.
- *
- * @related MoogFilter
- * @related UGen.UGenInput
- */
- public UGenInput audio;
-
- /**
- * The cutoff (or center) frequency of the filter,
- * expressed in Hz.
- *
- * @example Synthesis/moogFilterExample
- *
- * @related MoogFilter
- * @related UGen.UGenInput
- */
- public UGenInput frequency;
-
- /**
- * The resonance of the filter, expressed as a normalized value [0,1].
- *
- * @example Synthesis/moogFilterExample
- *
- * @related MoogFilter
- * @related UGen.UGenInput
- */
- public UGenInput resonance;
-
- /**
- * The current type of this filter: low pass, high pass, or band pass.
- *
- * @example Synthesis/moogFilterExample
- *
- * @related MoogFilter.Type
- */
- public Type type;
-
- private float coeff[][]; // filter buffers (beware denormals!)
-
- /**
- * Creates a low pass filter.
- *
- * @param frequencyInHz
- * float: the cutoff frequency for the filter
- * @param normalizedResonance
- * float: the resonance of the filter [0,1]
- */
- public MoogFilter( float frequencyInHz, float normalizedResonance )
- {
- this( frequencyInHz, normalizedResonance, Type.LP );
- }
-
- /**
- * Creates a filter of the type specified.
- *
- * @param frequencyInHz
- * float: the cutoff frequency for the filter
- * @param normalizedResonance
- * float: the resonance of the filter [0,1]
- * @param filterType
- * the type of the filter: MoogFilter.Type.HP (high pass),
- * MoogFitler.Type.LP (low pass), or MoogFilter.Type.BP (band pass)
- */
- public MoogFilter(float frequencyInHz, float normalizedResonance, Type filterType )
- {
- super();
-
- audio = new UGenInput( InputType.AUDIO );
- frequency = new UGenInput( InputType.CONTROL );
- resonance = new UGenInput( InputType.CONTROL );
- type = filterType;
-
- frequency.setLastValue( frequencyInHz );
- resonance.setLastValue( constrain( normalizedResonance, 0.f, 1.f ) );
-
- coeff = new float[channelCount()][5];
- }
-
- protected void channelCountChanged()
- {
- if ( coeff == null || coeff.length != channelCount() )
- {
- coeff = new float[channelCount()][5];
- }
- }
-
- protected void uGenerate(float[] out)
- {
- // Set coefficients given frequency & resonance [0.0...1.0]
- float t1, t2; // temporary buffers
- float normFreq = frequency.getLastValue() / ( sampleRate() * 0.5f );
- float rez = constrain( resonance.getLastValue(), 0.f, 1.f );
-
- float q = 1.0f - normFreq;
- float p = normFreq + 0.8f * normFreq * q;
- float f = p + p - 1.0f;
- q = rez * ( 1.0f + 0.5f * q * ( 1.0f - q + 5.6f * q * q ) );
-
- float[] input = audio.getLastValues();
-
- for ( int i = 0; i < channelCount(); ++i )
- {
- // Filter (in [-1.0...+1.0])
- float[] b = coeff[i];
- float in = constrain( input[i], -1, 1 ); // hard clip
-
- in -= q * b[4]; // feedback
-
- t1 = b[1];
- b[1] = ( in + b[0] ) * p - b[1] * f;
-
- t2 = b[2];
- b[2] = ( b[1] + t1 ) * p - b[2] * f;
-
- t1 = b[3];
- b[3] = ( b[2] + t2 ) * p - b[3] * f;
-
- b[4] = ( b[3] + t1 ) * p - b[4] * f;
- b[4] = b[4] - b[4] * b[4] * b[4] * 0.166667f; // clipping
-
- // inelegantly squash denormals
- if ( Float.isNaN( b[4] ) )
- {
- Arrays.fill( b, 0 );
- }
-
- b[0] = in;
-
- switch( type )
- {
- case HP:
- out[i] = in - b[4];
- break;
-
- case LP:
- out[i] = b[4];
- break;
-
- case BP:
- out[i] = 3.0f * (b[3] - b[4]);
- }
- }
- }
-
- private float constrain( float value, float min, float max )
- {
- if ( value < min ) return min;
- if ( value > max ) return max;
- return value;
- }
-}
diff --git a/src/ddf/minim/ugens/Multiplier.java b/src/ddf/minim/ugens/Multiplier.java
deleted file mode 100644
index cd13978..0000000
--- a/src/ddf/minim/ugens/Multiplier.java
+++ /dev/null
@@ -1,88 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- * Multiplier is a UGen that will simply multiply the incoming signal by whatever
- * its amplitude input is currently generating, which could be constant if
- * nothing is patched to it.
- *
- * @example Synthesis/multiplierExample
- *
- * @author Damien Di Fede
- *
- * @related UGen
- *
- */
-
-public class Multiplier extends UGen
-{
- /**
- * The audio input is where incoming audio should be patched, but you can simply patch to the
- * Multiplier itself.
- *
- * @related Multiplier
- * @related UGen.UGenInput
- */
- public UGenInput audio;
-
- /**
- * The amplitude input allows you to control the value being used for multiplying with another UGen.
- *
- * @related Multiplier
- * @related UGen.UGenInput
- */
- public UGenInput amplitude;
-
- /**
- * Construct a Multiplier with a fixed value of 1, which will mean incoming audio is not changed.
- *
- */
- public Multiplier()
- {
- this( 1f );
- }
-
- /**
- * Construct a Multiplier with a fixed value.
- *
- * @param value
- * float: the amplitude for the Multiplier
- */
- public Multiplier( float value )
- {
- super();
- // jam3: These can't be instantiated until the uGenInputs ArrayList
- // in the super UGen has been constructed
- //audio = new UGenInput(InputType.AUDIO);
- audio = new UGenInput(InputType.AUDIO);
- amplitude = new UGenInput(InputType.CONTROL);
- amplitude.setLastValue( value );
- }
-
- /**
- * Set the amplitude of this Multiplier.
- *
- * @param value
- * float: the new amplitude for the Multiplier
- *
- * @example Synthesis/multiplierExample
- *
- * @related amplitude
- * @related Multiplier
- * @related UGen
- */
- public void setValue( float value )
- {
- amplitude.setLastValue( value );
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- for(int i = 0; i < channelCount(); i++)
- {
- channels[i] = amplitude.getLastValue() * audio.getLastValues()[i];
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Noise.java b/src/ddf/minim/ugens/Noise.java
deleted file mode 100644
index 4f20119..0000000
--- a/src/ddf/minim/ugens/Noise.java
+++ /dev/null
@@ -1,243 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-
-/**
- * A UGen that can generate White, Pink, or Red/Brown noise.
- *
- * @example Synthesis/noiseExample
- *
- * @author Anderson Mills, Damien Di Fede
- *
- * @related UGen
- * @related Noise.Tint
- */
-public class Noise extends UGen
-{
- /**
- * An enumeration used to specify the tint of a Noise UGen.
- *
- * @example Synthesis/noiseTintExample
- *
- * @nosuperclasses
- *
- * @related Noise
- */
- public enum Tint { WHITE, PINK, RED, BROWN };
-
- /**
- * Patch to this to control the amplitude of the noise with another UGen.
- *
- * @related Noise
- */
- public UGenInput amplitude;
-
- /**
- * Patch to this to offset the value of the noise by a fixed value.
- *
- * @related Noise
- */
- public UGenInput offset;
-
- // the type of noise
- private Tint tint;
- // the last output value
- private float lastOutput;
- // cutoff frequency for brown/red noise
- private float brownCutoffFreq = 100.0f;
- // alpha filter coefficient for brown/red noise
- private float brownAlpha;
- // amplitude correction for brown noise;
- private float brownAmpCorr = 6.2f;
-
- /**
- * Constructor for white noise.
- * By default, the amplitude will be 1 and the tint will be WHITE.
- */
- public Noise()
- {
- this( 1.0f, 0.f, Tint.WHITE );
- }
- /**
- * Constructor for white noise of the specified amplitude.
- *
- * @param amplitude
- * float: the amplitude of the noise
- */
- public Noise( float amplitude )
- {
- this( amplitude, 0.f, Tint.WHITE ) ;
- }
- /**
- * Constructor for noise of the specified tint with an amplitude of 1.0.
- *
- * @param noiseType
- * Noise.Tint: specifies the tint of the noise
- * (Noise.Tint.WHITE, Noise.Tint.PINK, Noise.Tint.RED, Noise.Tint.BROWN)
- */
- public Noise( Tint noiseType )
- {
- this( 1.0f, 0.f, noiseType ) ;
- }
- /**
- * Constructor for noise of a specific tint with a specified amplitude.
- *
- * @param amplitude
- * float: the amplitude of the noise
- * @param noiseType
- * Noise.Tint: specifies the tint of the noise
- * (Noise.Tint.WHITE, Noise.Tint.PINK, Noise.Tint.RED, Noise.Tint.BROWN)
- */
- public Noise(float amplitude, Tint noiseType)
- {
- this(amplitude, 0.f, noiseType);
- }
- /**
- * Constructor for noise of a specific tint with a specified amplitude and offset.
- * @param amplitude
- * float: the amplitude of the noise
- * @param offset
- * float: the value that should be added to the noise to offset the "center"
- * @param noiseType
- * Noise.Tint: specifies the tint of the noise
- * (Noise.Tint.WHITE, Noise.Tint.PINK, Noise.Tint.RED, Noise.Tint.BROWN)
- */
- public Noise(float amplitude, float offset, Tint noiseType)
- {
- this.amplitude = addControl(amplitude);
- this.offset = addControl(offset);
- lastOutput = 0f;
- tint = noiseType;
- if ( tint == Tint.PINK )
- {
- initPink();
- }
- }
-
- /**
- * Set the Noise.Tint to use.
- *
- * @param noiseType
- * Noise.Tint: specifies the tint of the noise
- * (Noise.Tint.WHITE, Noise.Tint.PINK, Noise.Tint.RED, Noise.Tint.BROWN)
- *
- * @related Noise
- * @related Noise.Tint
- */
- public void setTint( Tint noiseType )
- {
- if ( tint != noiseType )
- {
- if ( noiseType == Tint.PINK )
- {
- initPink();
- }
- tint = noiseType;
- }
- }
-
- /**
- * Returns the current Noise.Tint in use
- *
- * @return Noise.Tint: the current tint of the noise
- * (Noise.Tint.WHITE, Noise.Tint.PINK, Noise.Tint.RED, Noise.Tint.BROWN)
- *
- * @related Noise
- * @related Noise.Tint
- */
- public final Tint getTint()
- {
- return tint;
- }
-
- @Override
- protected void sampleRateChanged()
- {
- float dt = 1.0f/sampleRate();
- float RC = 1.0f/( 2.0f*(float)Math.PI*brownCutoffFreq );
- brownAlpha = dt/( RC + dt );
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- // start with our base amplitude
- float outAmp = amplitude.getLastValue();
-
- float n;
- switch (tint)
- {
- // BROWN is a 1/f^2 spectrum (20db/decade, 6db/octave).
- // There is some disagreement as to whether
- // brown and red are the same, but here they are.
- case BROWN :
- case RED :
- // I admit that I'm using the filter coefficients and
- // amplitude correction from audacity, a great audio editor.
- n = outAmp*(2.0f*(float)Math.random() - 1.0f);
- n = brownAlpha*n + ( 1 - brownAlpha )*lastOutput;
- lastOutput = n;
- n *= brownAmpCorr;
- break;
- // PINK noise has a 10db/decade (3db/octave) slope
- case PINK :
- n = outAmp*pink();
- break;
- case WHITE :
- default :
- n = outAmp*(2.0f*(float)Math.random() - 1.0f);
- break;
- }
- n += offset.getLastValue();
- for(int i = 0; i < channels.length; i++)
- {
- channels[i] = n;
- }
- }
-
- // The code below (including comments) is taken directly from ddf's old PinkNoise.java code
- // This is the Voss algorithm for creating pink noise
- private int maxKey, key, range;
- private float whiteValues[];
- private float maxSumEver;
-
- private void initPink()
- {
- maxKey = 0x1f;
- range = 128;
- maxSumEver = 90;
- key = 0;
- whiteValues = new float[6];
- for (int i = 0; i < 6; i++)
- whiteValues[i] = ((float) Math.random() * Long.MAX_VALUE) % (range / 6);
- }
-
- // return a pink noise value
- private float pink()
- {
- int last_key = key;
- float sum;
-
- key++;
- if (key > maxKey) key = 0;
- // Exclusive-Or previous value with current value. This gives
- // a list of bits that have changed.
- int diff = last_key ^ key;
- sum = 0;
- for (int i = 0; i < 6; i++)
- {
- // If bit changed get new random number for corresponding
- // white_value
- if ((diff & (1 << i)) != 0)
- {
- whiteValues[i] = ((float) Math.random() * Long.MAX_VALUE) % (range / 6);
- }
- sum += whiteValues[i];
- }
- if (sum > maxSumEver) maxSumEver = sum;
- sum = 2f * (sum / maxSumEver) - 1f;
- return sum;
- }
-
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Oscil.java b/src/ddf/minim/ugens/Oscil.java
deleted file mode 100644
index 430f909..0000000
--- a/src/ddf/minim/ugens/Oscil.java
+++ /dev/null
@@ -1,391 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.Arrays;
-
-import ddf.minim.UGen;
-
-/**
- * Oscil testTone = new Oscil( 440, 1, Waves.SINE );
- * Oscil testTone = new Oscil( Frequency.ofPitch("A4"), 1, Waves.SINE );
- *
- * @example Basics/SynthesizeSound
- *
- * @related UGen
- * @related Waveform
- * @related Waves
- * @related WavetableGenerator
- * @related Frequency
- *
- * @author Damien Di Fede, Anderson Mills
- *
- */
-public class Oscil extends UGen
-{
- /**
- * Patch to this to control the amplitude of the oscillator with another
- * UGen.
- *
- * @example Synthesis/oscilEnvExample
- *
- * @related Oscil
- */
- public UGenInput amplitude;
-
- /**
- * Patch to this to control the frequency of the oscillator with another
- * UGen.
- *
- * @example Synthesis/frequencyModulation
- *
- * @related Oscil
- */
- public UGenInput frequency;
-
- /**
- * Patch to this to control the phase of the oscillator with another UGen.
- *
- * @example Synthesis/oscilPhaseExample
- *
- * @related Oscil
- */
- public UGenInput phase;
-
- /**
- * Patch to this to control the DC offset of the Oscil with another UGen.
- * This is useful when using an Oscil as a modulator.
- *
- * @example Synthesis/frequencyModulation
- *
- * @related Oscil
- */
- public UGenInput offset;
-
- // the waveform we will oscillate over
- private Waveform wave;
-
- // where we will sample our waveform, moves between [0,1]
- private float step;
- // the step size we will use to advance our step
- private float stepSize;
- // what was our frequency from the last time we updated our step size
- // stashed so that we don't do more math than necessary
- private float prevFreq;
- // 1 / sampleRate, which is used to calculate stepSize
- private float oneOverSampleRate;
-
- // constructors
- /**
- * Constructs an Oscil UGen, given frequency in Hz, amplitude, and a waveform
- *
- * @param frequencyInHertz
- * float: the frequency this Oscil should oscillate at
- * @param amplitude
- * float: the amplitude of this Oscil.
- * @param waveform
- * Waveform: the waveform this Oscil will oscillate over
- *
- * @related Waveform
- */
- public Oscil(float frequencyInHertz, float amplitude, Waveform waveform)
- {
- this( Frequency.ofHertz( frequencyInHertz ), amplitude, waveform );
- }
-
- /**
- * Constructs an Oscil UGen given frequency in Hz and amplitude. This
- * oscillator uses a sine wave.
- *
- * @param frequencyInHertz
- * float: the frequency this Oscil should oscillate at
- * @param amplitude
- * float: the amplitude of this Oscil.
- */
- public Oscil(float frequencyInHertz, float amplitude)
- {
- this( Frequency.ofHertz( frequencyInHertz ), amplitude );
- }
-
- /**
- * Constructs an Oscil UGen given a Frequency and amplitude. This oscillator
- * uses a sine wave.
- *
- * @param frequency
- * Frequency: the frequency this Oscil should oscillate at.
- * @param amplitude
- * float: the amplitude of this Oscil.
- */
- // shortcut for building a sine wave
- public Oscil(Frequency frequency, float amplitude)
- {
- this( frequency, amplitude, Waves.SINE );
- }
-
- /**
- * Constructs an Oscil UGen given a Frequency, amplitude, and a waveform
- *
- * @param frequency
- * Frequency: the frequency this Oscil should oscillate at.
- * @param amplitude
- * float: the amplitude of this Oscil.
- * @param waveform
- * Waveform: the waveform this Oscil will oscillate over
- *
- * @related Frequency
- * @related Waveform
- */
- public Oscil(Frequency frequency, float amplitude, Waveform waveform)
- {
- super();
-
- this.amplitude = new UGenInput( InputType.CONTROL );
- this.amplitude.setLastValue( amplitude );
-
- this.frequency = new UGenInput( InputType.CONTROL );
- this.frequency.setLastValue( frequency.asHz() );
-
- phase = new UGenInput( InputType.CONTROL );
- phase.setLastValue( 0.f );
-
- offset = new UGenInput( InputType.CONTROL );
- offset.setLastValue( 0.f );
-
- wave = waveform;
- step = 0f;
- oneOverSampleRate = 1.f;
- }
-
- /**
- * This routine will be called any time the sample rate changes.
- */
- protected void sampleRateChanged()
- {
- oneOverSampleRate = 1 / sampleRate();
- // don't call updateStepSize because it checks for frequency change
- stepSize = frequency.getLastValue() * oneOverSampleRate;
- prevFreq = frequency.getLastValue();
- }
-
- // updates our step size based on the current frequency
- private void updateStepSize()
- {
- float currFreq = frequency.getLastValue();
- if ( prevFreq != currFreq )
- {
- stepSize = currFreq * oneOverSampleRate;
- prevFreq = currFreq;
- }
- }
-
- /**
- * Sets the frequency of this Oscil. You might want to do this to change the
- * frequency of this Oscil in response to a button press or something. For
- * controlling frequency continuously over time you will usually want to use
- * the frequency input.
- *
- * @shortdesc Sets the frequency of this Oscil.
- *
- * @param hz
- * the frequency, in Hertz, to set this Oscil to
- *
- * @example Basics/SynthesizeSound
- *
- * @related frequency
- * @related Frequency
- * @related Oscil
- */
- public void setFrequency(float hz)
- {
- frequency.setLastValue( hz );
- updateStepSize();
- }
-
- /**
- * Sets the frequency of this Oscil. You might want to do this to change the
- * frequency of this Oscil in response to a button press or something. For
- * controlling frequency continuously over time you will usually want to use
- * the frequency input.
- *
- * @shortdesc Sets the frequency of this Oscil.
- *
- * @param newFreq
- * the Frequency to set this Oscil to
- *
- * @example Basics/SynthesizeSound
- *
- * @related frequency
- * @related Frequency
- * @related Oscil
- */
- public void setFrequency(Frequency newFreq)
- {
- frequency.setLastValue( newFreq.asHz() );
- updateStepSize();
- }
-
- /**
- * Sets the amplitude of this Oscil. You might want to do this to change the
- * amplitude of this Oscil in response to a button press or something. For
- * controlling amplitude continuously over time you will usually want to use
- * the amplitude input.
- *
- * @shortdesc Sets the amplitude of this Oscil.
- *
- * @param newAmp
- * amplitude to set this Oscil to
- *
- * @example Basics/SynthesizeSound
- *
- * @related amplitude
- * @related Oscil
- */
- public void setAmplitude(float newAmp)
- {
- amplitude.setLastValue( newAmp );
- }
-
- /**
- * Set the amount that the phase will be offset by. Oscil steps its time
- * from 0 to 1, which means that the phase is also normalized. However, it
- * still makes sense to set the phase to greater than 1 or even to a
- * negative number.
- *
- * @shortdesc Set the amount that the phase will be offset by.
- *
- * @param newPhase
- * float: the phase offset value
- *
- * @related phase
- * @related Oscil
- */
- public void setPhase(float newPhase)
- {
- phase.setLastValue( newPhase );
- }
-
- /**
- * Changes the Waveform used by this Oscil.
- *
- * @param theWaveform
- * the new Waveform to use
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waveform
- * @related Oscil
- */
- public void setWaveform(Waveform theWaveform)
- {
- wave = theWaveform;
- }
-
- /**
- * Returns the Waveform currently being used by this Oscil.
- *
- * @return a Waveform
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waveform
- * @related Oscil
- */
- public Waveform getWaveform()
- {
- return wave;
- }
-
- /**
- * Resets the time-step used by the Oscil to be equal to the current
- * phase input value. You will typically use this when starting a new note with an
- * Oscil that you have already used so that the waveform will begin sounding
- * at the beginning of its period, which will typically be a zero-crossing.
- * In other words, use this to prevent clicks when starting Oscils that have
- * been used before.
- *
- * @shortdesc Resets the time-step used by the Oscil to be equal to the current
- * phase input value.
- *
- * @example Synthesis/oscilPhaseExample
- *
- * @related Oscil
- */
- public void reset()
- {
- step = phase.getLastValue();
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- // start with our base amplitude
- float outAmp = amplitude.getLastValue();
-
- // temporary step location with phase offset.
- float tmpStep = step + phase.getLastValue();
- // don't be less than zero
- if ( tmpStep < 0.f )
- {
- tmpStep -= (int)tmpStep - 1f;
- }
- // don't exceed 1.
- // we don't use Math.floor because that involves casting up
- // to a double and then back to a float.
- if ( tmpStep > 1.0f )
- {
- tmpStep -= (int)tmpStep;
- }
-
- // calculate the sample value
- float sample = outAmp * wave.value( tmpStep ) + offset.getLastValue();
-
- Arrays.fill( channels, sample );
-
- // update our step size.
- // this will check to make sure the frequency has changed.
- updateStepSize();
-
- // increase time
- // NOT THIS FROM BEFORE: step += stepSize + fPhase;
- step += stepSize;
-
- // don't be less than zero
- if ( step < 0.f )
- {
- step -= (int)step - 1f;
- }
-
- // don't exceed 1.
- // we don't use Math.floor because that involves casting up
- // to a double and then back to a float.
- if ( step > 1.0f )
- {
- step -= (int)step;
- }
- }
-}
diff --git a/src/ddf/minim/ugens/Pan.java b/src/ddf/minim/ugens/Pan.java
deleted file mode 100644
index b49244f..0000000
--- a/src/ddf/minim/ugens/Pan.java
+++ /dev/null
@@ -1,155 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- * A UGen for panning a mono signal in a stereo field.
- * Because of the generally accepted meaning of pan,
- * this UGen strictly enforces the channel count of its
- * input and output. Anything patched to the audio input
- * of Pan will be configured to generate mono audio, and when
- * Pan is patched to any other UGen, it will throw an
- * exception if that UGen tries to set Pan's channel count
- * to anything other than 2.
- *
- * @example Synthesis/panExample
- *
- * @related UGen
- * @related Balance
- *
- * @author nb, ddf
- */
-
-public class Pan extends UGen
-{
- /**
- * UGens patched to this input should generate values between -1 and +1.
- *
- * @example Synthesis/panExample
- *
- * @related Pan
- * @related setPan ( )
- */
- public UGenInput pan;
-
- private UGen audio;
- private float[] tickBuffer = new float[1];
-
- static private float PIOVER2 = (float)Math.PI / 2.f;
-
- /**
- * Construct a Pan UGen with a specific starting pan value.
- *
- * @param panValue
- * float: a value of 0 means to pan dead center,
- * -1 hard left, and 1 hard right.
- */
- public Pan(float panValue)
- {
- super();
- pan = addControl( panValue );
- }
-
- /**
- * Set the pan value of this Pan. Values passed to this method should be
- * between -1 and +1. This is equivalent to calling the setLastValue method
- * on the pan input directly.
- *
- * @param panValue
- * the new value for the pan input
- *
- * @related Pan
- * @related pan
- */
- public void setPan(float panValue)
- {
- pan.setLastValue( panValue );
- }
-
- @Override
- protected void addInput(UGen in)
- {
- // System.out.println("Adding " + in.toString() + " to Pan.");
- audio = in;
- // we only deal in MONO!
- audio.setChannelCount( 1 );
- }
-
- @Override
- protected void removeInput(UGen input)
- {
- if ( audio == input )
- {
- audio = null;
- }
- }
-
- @Override
- protected void sampleRateChanged()
- {
- if ( audio != null )
- {
- audio.setSampleRate( sampleRate() );
- }
- }
-
- /**
- * Pan overrides setChannelCount to ensure that it can
- * never be set to output more or fewer than 2 channels.
- */
- @Override
- public void setChannelCount(int numberOfChannels)
- {
- if ( numberOfChannels == 2 )
- {
- super.setChannelCount( numberOfChannels );
- }
- else
- {
- throw new IllegalArgumentException( "Pan MUST be ticked with STEREO output! It doesn't make sense in any other context!" );
- }
- }
-
- /**
- * NOTE: Currently only supports stereo audio!
- */
- @Override
- protected void uGenerate(float[] channels)
- {
- if ( channels.length != 2 )
- {
- throw new IllegalArgumentException( "Pan MUST be ticked with STEREO output! It doesn't make sense in any other context!" );
- }
-
- float panValue = pan.getLastValue();
-
- // tick our audio as MONO because that's what a Pan is for!
- if ( audio != null )
- {
- audio.tick( tickBuffer );
- }
-
- // formula swiped from the MIDI specification:
- // http://www.midi.org/techspecs/rp36.php
- // Left Channel Gain [dB] = 20*log (cos (Pi/2* max(0,CC#10 - 1)/126)
- // Right Channel Gain [dB] = 20*log (sin (Pi /2* max(0,CC#10 - 1)/126)
-
- // dBvalue = 20.0 * log10 ( linear );
- // dB = 20 * log (linear)
-
- // conversely...
- // linear = pow ( 10.0, (0.05 * dBvalue) );
- // linear = 10^(dB/20)
-
- float normBalance = ( panValue + 1.f ) * 0.5f;
-
- // note that I am calculating amplitude directly, by using the linear
- // value
- // that the MIDI specification suggests inputing into the dB formula.
- float leftAmp = (float)Math.cos( PIOVER2 * normBalance );
- float rightAmp = (float)Math.sin( PIOVER2 * normBalance );
-
- channels[0] = tickBuffer[0] * leftAmp;
- channels[1] = tickBuffer[0] * rightAmp;
- }
-}
diff --git a/src/ddf/minim/ugens/Reciprocal.java b/src/ddf/minim/ugens/Reciprocal.java
deleted file mode 100644
index 9a0fd66..0000000
--- a/src/ddf/minim/ugens/Reciprocal.java
+++ /dev/null
@@ -1,69 +0,0 @@
-package ddf.minim.ugens;
-
-import ddf.minim.UGen;
-
-/**
- * A UGen which simply returns the reciprocal value of it's input.
- * Because this UGen is intended for use with control signals,
- * rather than audio signals, it behaves as a mono UGen, regardless
- * of whether or not it has been configured with more than one channel.
- * This means that the output of Reciprocal will always be the reciprocal
- * of the first (and usually only) channel of the denominator input copied
- * to all output channels, similar to Constant.
- *
- * @related UGen
- *
- * @author nodog
- *
- */
-
-public class Reciprocal extends UGen
-{
- /**
- * denominator is the default audio input
- */
- public UGenInput denominator;
-
- /**
- * Constructs a Reciprocal with a denominator of 1.
- */
- public Reciprocal()
- {
- this( 1.0f );
- }
-
- /**
- * Constructs a Reciprocal with the given denominator value.
- *
- * @param fixedDenominator
- * the denominator value if the input is never connected
- */
- public Reciprocal(float fixedDenominator)
- {
- super();
- // audio = new UGenInput(InputType.AUDIO);
- // for this UGen, denominator is the main input and can be audio
- denominator = new UGenInput( InputType.AUDIO );
- denominator.setLastValue( fixedDenominator );
- }
-
- /**
- * Used to change the fixedDenominator value after instantiation
- *
- * @param fixedDenominator
- * the denominator value if the input is never connected
- */
- public void setReciprocal(float fixedDenominator)
- {
- denominator.setLastValue( fixedDenominator );
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- for ( int i = 0; i < channels.length; i++ )
- {
- channels[i] = 1.0f / denominator.getLastValue();
- }
- }
-}
\ No newline at end of file
diff --git a/src/ddf/minim/ugens/Sampler.java b/src/ddf/minim/ugens/Sampler.java
deleted file mode 100644
index 2e69f45..0000000
--- a/src/ddf/minim/ugens/Sampler.java
+++ /dev/null
@@ -1,308 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.Arrays;
-
-import ddf.minim.Minim;
-import ddf.minim.MultiChannelBuffer;
-import ddf.minim.UGen;
-
-/**
- * Sampler is the UGen version of AudioSample and is
- * the preferred method of triggering short audio files.
- * You will also find Sampler much more flexible,
- * since it provides ways to trigger only part of a sample, and
- * to trigger a sample at different playback rates. Also, unlike AudioSample,
- * a Sampler lets you specify how many voices (i.e. simultaneous
- * playbacks of the sample) should have.
- * amplitude
, which controls the volume of the Sampler
- * as a whole.
- *
- * @example Advanced/DrumMachine
- *
- * @related AudioSample
- * @related UGen
- *
- * @author Damien Di Fede
- *
- */
-
-public class Sampler extends UGen
-{
- /**
- * The sample number in the source sample
- * the voice will start at when triggering this Sampler.
- */
- public UGenInput begin;
-
- /**
- * The sample number in the source sample
- * the voice will end at when triggering this Sampler.
- */
- public UGenInput end;
-
- /**
- * The attack time, in seconds, when triggering
- * this Sampler. Attack time is used to ramp up
- * the amplitude of the voice. By default it
- * is 0 seconds.
- */
- public UGenInput attack;
-
- /**
- * The amplitude of this Sampler. This acts as an
- * overall volume control. So changing the amplitude
- * will effect all currently active voices.
- */
- public UGenInput amplitude;
-
- /**
- * The playback rate used when triggering this Sampler.
- */
- public UGenInput rate;
-
- /**
- * Whether triggered voices should loop or not.
- */
- public boolean looping;
-
- private MultiChannelBuffer sampleData;
- // what's the sample rate of our sample data
- private float sampleDataSampleRate;
- // what's the baseline playback rate.
- // this is set whenever sampleRateChanged is called
- // and is used to scale the value of the rate input
- // when starting a trigger. we need this so that,
- // for example, 22k sample data will playback at
- // the correct speed when played through a 44.1k
- // UGen chain.
- private float basePlaybackRate;
-
- // Trigger class is defined at bottom of Sampler imp
- private Trigger[] triggers;
- private int nextTrigger;
-
- /**
- * Create a new Sampler for triggering the provided file.
- *
- * @param filename
- * String: the file to load
- * @param maxVoices
- * int: the maximum number of voices for this Sampler
- * @param system
- * Minim: the instance of Minim to use for loading the file
- *
- */
- public Sampler( String filename, int maxVoices, Minim system )
- {
- triggers = new Trigger[maxVoices];
- for( int i = 0; i < maxVoices; ++i )
- {
- triggers[i] = new Trigger();
- }
-
- sampleData = new MultiChannelBuffer(1,1);
- sampleDataSampleRate = system.loadFileIntoBuffer( filename, sampleData );
-
- createInputs();
- }
-
- /**
- * Create a Sampler that will use the audio in the provided MultiChannelBuffer
- * for its sample. It will make a copy of the data, so modifying the provided
- * buffer after the fact will not change the audio in this Sampler.
- * The original sample rate of the audio data must be provided
- * so that the default playback rate of the Sampler can be set properly.
- * Additionally, you must specify how many voices the Sampler should use,
- * which will determine how many times the sound can overlap with itself
- * when triggered.
- *
- * @param sampleData
- * MultiChannelBuffer: the sample data this Sampler will use to generate sound
- * @param sampleRate
- * float: the sample rate of the sampleData
- * @param maxVoices
- * int: the maximum number of voices for this Sampler
- *
- * @related MultiChannelBuffer
- */
- public Sampler( MultiChannelBuffer sampleData, float sampleRate, int maxVoices )
- {
- triggers = new Trigger[maxVoices];
- for( int i = 0; i < maxVoices; ++i )
- {
- triggers[i] = new Trigger();
- }
-
- this.sampleData = new MultiChannelBuffer( sampleData.getChannelCount(), sampleData.getBufferSize() );
- this.sampleData.set( sampleData );
- sampleDataSampleRate = sampleRate;
-
- createInputs();
- }
-
- private void createInputs()
- {
- begin = addControl(0);
- end = addControl(sampleData.getBufferSize()-1);
- attack = addControl();
- amplitude = addControl(1);
- rate = addControl(1);
- }
-
- /**
- * Trigger this Sampler. If all of the Sampler's voices
- * are currently in use, it will use the least recently
- * triggered voice, which means whatever that voice is
- * currently playing will get cut off. For this reason,
- * choose the number of voices you want carefully.
- *
- * @shortdesc Trigger this Sampler.
- */
- public void trigger()
- {
- triggers[nextTrigger].activate();
- nextTrigger = (nextTrigger+1)%triggers.length;
- }
-
- /**
- * Stop all active voices. In other words,
- * immediately silence this Sampler.
- */
- public void stop()
- {
- for( Trigger t : triggers )
- {
- t.stop();
- }
- }
-
- /**
- * Sets the sample data used by this Sampler by copying the
- * contents of the provided MultiChannelBuffer into the internal buffer.
- *
- * @param newSampleData
- * MultiChannelBuffer: the new sample data for this Sampler
- * @param sampleRate
- * float: the sample rate of the sample data
- *
- * @related MultiChannelBuffer
- */
- public void setSample( MultiChannelBuffer newSampleData, float sampleRate )
- {
- sampleData.set( newSampleData );
- sampleDataSampleRate = sampleRate;
- basePlaybackRate = sampleRate / sampleRate();
- }
-
- @Override
- protected void sampleRateChanged()
- {
- basePlaybackRate = sampleDataSampleRate / sampleRate();
- }
-
- @Override
- protected void uGenerate(float[] channels)
- {
- Arrays.fill( channels, 0 );
- for( Trigger t : triggers )
- {
- t.generate( channels );
- }
- }
-
- private class Trigger
- {
- // begin and end sample numbers
- float beginSample;
- float endSample;
- // playback rate
- float playbackRate;
- // what sample we are at in our trigger. expressed as a float to handle variable rate.
- float sample;
- // how many output samples we have generated, tracked for attack/release
- float outSampleCount;
- // attack time, in samples
- int attackLength;
- // current amplitude mod for attack
- float attackAmp;
- // how much to increase the attack amp each sample frame
- float attackAmpStep;
- // release time, in samples
- int release;
- // whether we are done playing our bit of the sample or not
- boolean done;
- // whether we should start triggering in the next call to generate
- boolean triggering;
-
- Trigger()
- {
- done = true;
- }
-
- // start this Trigger playing with the current settings of the Sampler
- void activate()
- {
- triggering = true;
- }
-
- // stop this trigger
- void stop()
- {
- done = true;
- }
-
- // generate one sample frame of data
- void generate( float[] sampleFrame )
- {
- if ( triggering )
- {
- beginSample = (int)Math.min( begin.getLastValue(), sampleData.getBufferSize()-2);
- endSample = (int)Math.min( end.getLastValue(), sampleData.getBufferSize()-1 );
- playbackRate = rate.getLastValue();
- attackLength = (int)Math.max( sampleRate() * attack.getLastValue(), 1.f );
- attackAmp = 0;
- attackAmpStep = 1.0f / attackLength;
- release = 0;
- sample = beginSample;
- outSampleCount = 0;
- done = false;
- triggering = false;
- }
-
- if ( done ) return;
-
- final float outAmp = amplitude.getLastValue() * attackAmp;
-
- for( int c = 0; c < sampleFrame.length; ++c )
- {
- int sourceChannel = c < sampleData.getChannelCount() ? c : sampleData.getChannelCount() - 1;
- sampleFrame[c] += outAmp * sampleData.getSample( sourceChannel, sample );
- }
-
- sample += playbackRate*basePlaybackRate;
-
- if ( sample > endSample )
- {
- if ( looping )
- {
- sample -= endSample - beginSample;
- }
- else
- {
- done = true;
- }
- }
-
- ++outSampleCount;
- if ( outSampleCount <= attackLength )
- {
- attackAmp += attackAmpStep;
- }
- }
- }
-}
diff --git a/src/ddf/minim/ugens/Sink.java b/src/ddf/minim/ugens/Sink.java
deleted file mode 100644
index 32811a5..0000000
--- a/src/ddf/minim/ugens/Sink.java
+++ /dev/null
@@ -1,33 +0,0 @@
-package ddf.minim.ugens;
-
-
-/**
- * A Sink is similar to a Summer, but instead of summing all of the UGens patched to it,
- * it simply ticks them and only generates silence. This is useful if you have a UGen that
- * needs to be ticked but that shouldn't be generating audio, such as an EnvelopeFollower.
- *
- * @example Synthesis/envelopeFollowerExample
- *
- * @related Summer
- *
- * @author Damien Di Fede
- *
- */
-// ddf: I'm extending Summer because dealing with our own array of UGens is tricky.
-// Extending Summer means we can keep that code in one place.
-public class Sink extends Summer
-{
- public Sink()
- {
- super();
- }
-
- // we do nothing here because a Sink should always output silence.
- // since Summer always fills the output with silence before ticking
- // its list, we don't even need to do that work.
- @Override
- protected void processSampleFrame( float[] in, float[] out )
- {
- return;
- }
-}
diff --git a/src/ddf/minim/ugens/Summer.java b/src/ddf/minim/ugens/Summer.java
deleted file mode 100644
index c35eeb7..0000000
--- a/src/ddf/minim/ugens/Summer.java
+++ /dev/null
@@ -1,196 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-
-import ddf.minim.AudioSignal;
-import ddf.minim.Minim;
-import ddf.minim.UGen;
-
-/**
- * A Summer allows you to sum the outputs of multiple UGens to be sent further
- * down the chain. Unlike most UGen effects, you can patch more than one UGen to
- * a Summer.
- *
- * @example Synthesis/summerExample
- *
- * @author Damien Di Fede
- *
- */
-public class Summer extends UGen implements AudioSignal
-{
- private ArrayList
- * waveform.value( 0.25f ) == sin( PI/2 )
- * waveform.value( 0.5f ) == sin( PI )
- * waveform.value( 0.75f ) == sin( 3*PI/2 )
- *
- *
- * @shortdesc Sample the Waveform at the location specified.
- *
- * @param at
- * float: a value in the range [0,1]
- * @return float: the value of the Waveform at the sampled location
- *
- * @related Waveform
- */
- float value(float at);
-}
diff --git a/src/ddf/minim/ugens/Waves.java b/src/ddf/minim/ugens/Waves.java
deleted file mode 100644
index be5f952..0000000
--- a/src/ddf/minim/ugens/Waves.java
+++ /dev/null
@@ -1,417 +0,0 @@
-package ddf.minim.ugens;
-
-/**
- * Waves provides some already constructed Wavetables for common waveforms, as
- * well as methods for constructing some basic waveforms with non-standard
- * parameters. For instance, you can use the QUARTERPULSE member if you want a
- * typical "thin" square wave sound, but you might want a square wave with a 60%
- * duty cycle instead, which you can create by passing 0.6f to the square
- * method. Methods exist for generating basic waves with multiple harmonics,
- * basic waves with different duty cycles, and noise.
- *
- * @example Synthesis/waveformExample
- *
- * @related Wavetable
- * @related WavetableGenerator
- * @related Oscil
- *
- * @author Nicolas Brix, Anderson Mills
- */
-public class Waves
-{
- // private constructor so it doesn't show up in documentation
- // and so that people can't make instances of this class, which is all
- // static methods
- private Waves()
- {
- }
-
- /**
- * standard size for a Wavetable from Waves
- */
- private static int tableSize = 8192;
- private static int tSby2 = tableSize / 2;
- private static int tSby4 = tableSize / 4;
-
- // Perfect waveforms
- /**
- * A pure sine wave.
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public final static Wavetable SINE = WavetableGenerator.gen10(
- tableSize,
- new float[] { 1 } );
- /**
- * A perfect sawtooth wave.
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public final static Wavetable SAW = WavetableGenerator.gen7(
- tableSize,
- new float[] { 0,-1, 1, 0 },
- new int[] { tSby2, 0, tableSize - tSby2 } );
-
- /**
- * A perfect phasor wave going from 0 to 1.
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public final static Wavetable PHASOR = WavetableGenerator.gen7( tableSize,
- new float[] { 0, 1 },
- new int[] { tableSize } );
- /**
- * A perfect square wave with a 50% duty cycle.
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public final static Wavetable SQUARE = WavetableGenerator.gen7(
- tableSize,
- new float[] { -1, -1, 1, 1 },
- new int[] { tSby2, 0, tableSize - tSby2 } );
-
- /**
- * A perfect triangle wave.
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public final static Wavetable TRIANGLE = WavetableGenerator.gen7(
- tableSize,
- new float[] { 0, 1, -1, 0 },
- new int[] { tSby4, tSby2, tableSize - tSby2 - tSby4 } );
-
- /**
- * A perfect square wave with a 25% duty cycle.
- *
- * @example Basics/SynthesizeSound
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public final static Wavetable QUARTERPULSE = WavetableGenerator.gen7(
- tableSize,
- new float[] { -1, -1, 1, 1 },
- new int[] { tSby4, 0, tableSize - tSby4 } );
-
- /**
- * Builds an approximation of a perfect sawtooth wave by summing together
- * harmonically related sine waves.
- *
- * @param numberOfHarmonics
- * int: the number of harmonics to use in the approximation. 1 harmonic
- * will simply generate a sine wave. The greater the number of
- * harmonics used, the closer to a pure saw wave the approximation will be.
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable sawh(int numberOfHarmonics)
- {
- float[] content = new float[numberOfHarmonics];
- for ( int i = 0; i < numberOfHarmonics; i++ )
- {
- content[i] = (float)( ( -2 ) / ( ( i + 1 ) * Math.PI ) * Math.pow( -1, i + 1 ) );
- }
- return WavetableGenerator.gen10( tableSize, content );
- }
-
- /**
- * Constructs a perfect sawtooth wave with the specified duty cycle.
- *
- * @param dutyCycle
- * float: a sawtooth wave with a duty cycle of 0.5 will be
- * a perfect sawtooth wave that smoothly changes from 1 to -1
- * with a zero-crossing in the middle. By changing the duty
- * cycle, you change how much of the sawtooth is below zero.
- * So, a duty cycle of 0.2 would result in 20 percent of the
- * sawtooth below zero and the rest above. Duty cycle will
- * be clamped to [0,1].
- *
- * @return Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable saw(float dutyCycle)
- {
- dutyCycle = Math.max( 0, Math.min( dutyCycle, 1 ) );
- int a = (int)( tableSize * dutyCycle );
- return WavetableGenerator.gen7( tableSize, new float[] { 0, -1, 1, 0 }, new int[] { a, 0, tableSize - a } );
- }
-
- /**
- * Builds an approximation of a perfect square wave by summing together
- * harmonically related sine waves.
- *
- * @param numberOfHarmonics
- * int: the number of harmonics to use in the approximation. 1 harmonic
- * will simply generate a sine wave. The greater the number of
- * harmonics used, the closer to a pure saw wave the approximation will be.
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable squareh(int numberOfHarmonics)
- {
- float[] content = new float[numberOfHarmonics + 1];
- for ( int i = 0; i < numberOfHarmonics; i += 2 )
- {
- content[i] = (float)1 / ( i + 1 );
- content[i + 1] = 0;
- }
- return WavetableGenerator.gen10( tableSize, content );
- }
-
- /**
- * Constructs a perfect square wave with the specified duty cycle.
- *
- * @param dutyCycle
- * float: a square wave with a duty cycle of 0.5 will be
- * a perfect square wave that is 1 half the time and -1 the other half.
- * By changing the duty cycle, you change how much of the square
- * is below zero. So, a duty cycle of 0.2 would result in 20 percent of the
- * square below zero and the rest above. Duty cycle will
- * be clamped to [0,1].
- *
- * @return Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable square(float dutyCycle)
- {// same as pulse
- return pulse( dutyCycle );
- }
-
- /**
- * Constructs a perfect square wave with the specified duty cycle.
- *
- * @param dutyCycle
- * float: a square wave with a duty cycle of 0.5 will be
- * a perfect square wave that is 1 half the time and -1 the other half.
- * By changing the duty cycle, you change how much of the square
- * is below zero. So, a duty cycle of 0.2 would result in 20 percent of the
- * square below zero and the rest above. Duty cycle will
- * be clamped to [0,1].
- *
- * @return Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable pulse(float dutyCycle)
- {
- dutyCycle = Math.max( 0, Math.min( dutyCycle, 1 ) );
- return WavetableGenerator.gen7( tableSize,
- new float[] { -1, -1, 1, 1 },
- new int[] { (int)( dutyCycle * tableSize ), 0, tableSize - (int)( dutyCycle * tableSize ) } );
- }
-
- /**
- * Builds an approximation of a perfect triangle wave by summing together
- * harmonically related sine waves.
- *
- * @param numberOfHarmonics
- * int: the number of harmonics to use in the approximation. 1 harmonic
- * will simply generate a sine wave. The greater the number of
- * harmonics used, the closer to a pure saw wave the approximation will be.
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable triangleh(int numberOfHarmonics)
- {
- float[] content = new float[numberOfHarmonics + 1];
- for ( int i = 0; i < numberOfHarmonics; i += 2 )
- {
- content[i] = (float)( Math.pow( -1, i / 2 ) * 8 / Math.PI / Math.PI / Math.pow( i + 1, 2 ) );
- content[i + 1] = 0;
- }
- return WavetableGenerator.gen10( tableSize, content );
- }
-
- /**
- * Constructs a perfect triangle wave with the specified duty cycle.
- *
- * @param dutyCycle
- * float: a triangle wave with a duty cycle of 0.5 will be
- * a perfect triangle wave that is 1 half the time and -1 the other half.
- * By changing the duty cycle, you change how much of the triangle
- * is below zero. So, a duty cycle of 0.2 would result in 20 percent of the
- * triangle below zero and the rest above. Duty cycle will
- * be clamped to [0,1].
- *
- * @return Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable triangle(float dutyCycle)
- {
- dutyCycle = Math.max( 0, Math.min( dutyCycle, 1 ) );
- int a = (int)( tableSize * dutyCycle * 0.5 );
- return WavetableGenerator.gen7( tableSize,
- new float[] { 0, -1, 0, 1, 0 }, new int[] { a, a, tSby2 - a, tableSize - tSby2 - a } );
- }
-
- // TODO a dutycycled sine wavetable : i think a new warp() method in
- // Wavetable would be the best
-
- /**
- * Constructs a waveform by summing together the first numberOfHarmonics
- * in the harmonic series with randomly chosen amplitudes. This often
- * sounds like an organ.
- *
- * @param numberOfHarmonics
- * int: the number of harmonics to use when generating the wave
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable randomNHarms(int numberOfHarmonics)
- {
- float[] harmAmps = new float[numberOfHarmonics];
- for ( int i = 0; i < numberOfHarmonics; i++ )
- {
- harmAmps[i] = (float)Math.random() * 2 - 1;
- }
- Wavetable builtWave = WavetableGenerator.gen10( tableSize, harmAmps );
- builtWave.normalize();
- return builtWave;
- }
-
- /**
- * Constructs a waveform by summing together the first odd numberOfHarmonics
- * in the harmonic series (1, 3, 5, etc) with randomly chosen amplitudes.
- * This often sounds like an organ with a band pass filter on it.
- *
- * @param numberOfHarmonics
- * int: the number of odd harmonics to use when generating the wave
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable randomNOddHarms(int numberOfHarmonics)
- {
- float[] harmAmps = new float[numberOfHarmonics * 2];
- for ( int i = 0; i < numberOfHarmonics; i += 1 )
- {
- harmAmps[i * 2] = (float)Math.random() * 2 - 1;
- harmAmps[i * 2 + 1] = 0.0f;
- }
- Wavetable builtWave = WavetableGenerator.gen10( tableSize, harmAmps );
- builtWave.normalize();
- return builtWave;
- }
-
- /**
- * Constructs a Wavetable of randomly generated noise.
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Wavetable
- * @related Waveform
- */
- public static Wavetable randomNoise()
- {
- float[] builtArray = new float[tableSize];
- for ( int i = 0; i < builtArray.length; i++ )
- {
- builtArray[i] = (float)Math.random() * 2 - 1;
- }
- Wavetable builtWave = new Wavetable( builtArray );
- builtWave.normalize();
- return builtWave;
- }
-
- /**
- * Generates a Wavetable by adding any number of Waveforms, each scaled by an amplitude.
- *
- * Calling this method might look like:
- *
- * Wavetable wave = Wavetable.add( new float[] { 0.8f, 0.2f }, Waves.SINE, Waves.SAW );
- *
- * or:
- *
- * Wavetable wave = Wavetable.add( new float[] { 0.2f, 0.3f, 0.5f }, Waves.SINE, Waves.SQUARE, Waves.sawh( 6 ) );
- *
- *
- * In other words, the number of elements in the amplitude array
- * must match the number of Waveform arguments provided.
- *
- * @shortdesc Generates a Wavetable by adding any number of Waveforms, each scaled by an amplitude.
- *
- * @param amps
- * float[]: an array of amplitudes used to scale the matching Waveform argument
- * when adding it into the final Wavetable.
- * @param waves
- * Waveform vararg: The Waveforms to be added together. The number of Waveforms
- * passed in as arguments much match the length of the amps array.
- *
- * @example Synthesis/waveformExample
- *
- * @return a Wavetable
- *
- * @related Waves
- * @related Waveform
- * @related Wavetable
- */
- public static Wavetable add(float[] amps, Waveform... waves)
- {
- if ( amps.length != waves.length )
- {
- System.out.println( "add() : amplitude array size must match the number of waveforms!" );
- return null;
- }
-
- float[] accumulate = new float[tableSize];
- for ( int i = 0; i < waves.length; i++ )
- {
- for ( int j = 0; j < tableSize; j++ )
- {
- float lu = (float)j / tableSize;
- accumulate[j] += waves[i].value( lu ) * amps[i];
- }
- }
- return new Wavetable( accumulate );
- }
-}
diff --git a/src/ddf/minim/ugens/Wavetable.java b/src/ddf/minim/ugens/Wavetable.java
deleted file mode 100644
index 24bbe10..0000000
--- a/src/ddf/minim/ugens/Wavetable.java
+++ /dev/null
@@ -1,402 +0,0 @@
-package ddf.minim.ugens;
-
-import java.util.Random;
-
-/**
- * Wavetable wraps a float array of any size and lets you sample the array using
- * a normalized value [0,1]. This means that if you have an array that is 2048
- * samples long, then value(0.5) will give you the 1024th sample. You will most
- * often use Wavetables as the Waveform in an Oscil, but other uses are also
- * possible. Additionally, Wavetable provides a set of methods for transforming
- * the samples it contains.
- *
- * @example Synthesis/WavetableMethods
- *
- * @related Waveform
- * @related Waves
- * @related WavetableGenerator
- *
- * @author Mark Godfrey <mark.godfrey@gatech.edu>
- */
-
-public class Wavetable implements Waveform
-{
-
- private float[] waveform;
- // precalculate this since we use it alot
- private float lengthForValue;
-
- /**
- * Construct a Wavetable that contains size
entries.
- *
- * @param size
- * int: the number of samples the Wavetable should contain
- *
- * @related Wavetable
- */
- public Wavetable(int size)
- {
- waveform = new float[size];
- lengthForValue = size - 1;
- }
-
- /**
- * Construct a Wavetable that will use waveform
as the float
- * array to sample from. This will not copy waveform
,
- * it will use it directly.
- *
- * @param waveform
- * float[]: the float array this Wavetable will sample
- *
- * @related Wavetable
- */
- public Wavetable(float[] waveform)
- {
- this.waveform = waveform;
- lengthForValue = waveform.length - 1;
- }
-
- /**
- * Make a new Wavetable that has the same waveform values as
- * wavetable
. This will copy the values from the
- * provided Wavetable into this Wavetable's waveform.
- *
- * @param wavetable
- * Wavetable: the Wavetable to copy
- *
- * @related Wavetable
- */
- public Wavetable(Wavetable wavetable)
- {
- waveform = new float[wavetable.waveform.length];
- System.arraycopy( wavetable.waveform, 0, waveform, 0, waveform.length );
- lengthForValue = waveform.length - 1;
- }
-
- /**
- * Sets this Wavetable's waveform to the one provided. This
- * will not copy the values from the provided waveform, it will use
- * the waveform directly.
- *
- * @param waveform
- * float[]: the new sample data
- *
- * @related Wavetable
- */
- public void setWaveform(float[] waveform)
- {
- this.waveform = waveform;
- lengthForValue = waveform.length - 1;
- }
-
- /**
- * Returns the value of the ith entry in this Wavetable's
- * waveform. This is equivalent to getWaveform()[i].
- *
- * @shortdesc Returns the value of the ith entry in this Wavetable's
- * waveform.
- *
- * @param i
- * int: the index of the sample to return
- *
- * @return float: the value of the sample at i
- *
- * @related Wavetable
- */
- public float get(int i)
- {
- return waveform[i];
- }
-
- /**
- * Sample the Wavetable using a value in the range [0,1]. For instance, if
- * the Wavetable has 1024 values in its float array, then calling value(0.5)
- * will return the 512th value in the array. If the result is that it needs
- * say the 456.65th value, this will interpolate between the surrounding
- * values.
- *
- * @shortdesc Sample the Wavetable using a value in the range [0,1].
- *
- * @example Synthesis/WavetableMethods
- *
- * @param at
- * float: a value in the range [0, 1]
- *
- * @return float: this Wavetable sampled at the requested interval
- *
- * @related Wavetable
- */
- public float value(float at)
- {
- float whichSample = lengthForValue * at;
-
- // linearly interpolate between the two samples we want.
- int lowSamp = (int)whichSample;
- int hiSamp = lowSamp + 1;
- // lowSamp might be the last sample in the waveform
- // we need to make sure we wrap.
- if ( hiSamp >= waveform.length )
- {
- hiSamp -= waveform.length;
- }
-
- float rem = whichSample - lowSamp;
-
- return waveform[lowSamp] + rem
- * ( waveform[hiSamp] - waveform[lowSamp] );
-
- // This was here for testing.
- // Causes non-interpolation, but adds max # of oscillators
- // return get(lowSamp);
- }
-
- /**
- * Returns the underlying waveform, not a copy of it.
- *
- * @return float[]: the float array managed by this Wavetable
- *
- * @related Wavetable
- */
- public float[] getWaveform()
- {
- return waveform;
- }
-
- /**
- * Sets the ith entry of the underlying waveform to
- * value
. This is equivalent to:
- * getWaveform()[i] = value;
- *
- * @param i
- * int: the index of the sample to set
- * @param value
- * float: the new sample value
- *
- * @related Wavetable
- */
- public void set(int i, float value)
- {
- waveform[i] = value;
- }
-
- /**
- * Returns the length of the underlying waveform. This is equivalent to:
- * getWaveform().length
- *
- * @return int: the length of the underlying float array
- *
- * @related Wavetable
- */
- public int size()
- {
- return waveform.length;
- }
-
- /**
- * Multiplies each value of the underlying waveform by scale
.
- *
- * @param scale
- * float: the amount to scale the Wavetable with
- *
- * @related Wavetable
- */
- public void scale(float scale)
- {
- for ( int i = 0; i < waveform.length; i++ )
- {
- waveform[i] *= scale;
- }
- }
-
- /**
- * Apply a DC offset to this Wavetable. In other words, add
- * amount
to every sample.
- *
- * @param amount
- * float: the amount to add to every sample in the table
- *
- * @related Wavetable
- */
- public void offset(float amount)
- {
- for ( int i = 0; i < waveform.length; ++i )
- {
- waveform[i] += amount;
- }
- }
-
- /**
- * Normalizes the Wavetable by finding the largest amplitude in the table
- * and scaling the table by the inverse of that amount. The result is that
- * the largest value in the table will now have an amplitude of 1 and
- * everything else is scaled proportionally.
- *
- * @example Synthesis/WavetableMethods
- *
- * @related Wavetable
- */
- public void normalize()
- {
- float max = Float.MIN_VALUE;
- for ( int i = 0; i < waveform.length; i++ )
- {
- if ( Math.abs( waveform[i] ) > max )
- max = Math.abs( waveform[i] );
- }
- scale( 1 / max );
- }
-
- /**
- * Flips the table around 0. Equivalent to flip(0)
.
- *
- * @see #flip(float)
- * @related flip ( )
- * @related Wavetable
- */
- public void invert()
- {
- flip( 0 );
- }
-
- /**
- * Flip the values in the table around a particular value. For example, if
- * you flip around 2, values greater than 2 will become less than two by the
- * same amount and values less than 2 will become greater than 2 by the same
- * amount. 3 -> 1, 0 -> 4, etc.
- *
- * @shortdesc Flip the values in the table around a particular value.
- *
- * @example Synthesis/WavetableMethods
- *
- * @param in
- * float: the value to flip the table around
- *
- * @related Wavetable
- */
- public void flip(float in)
- {
- for ( int i = 0; i < waveform.length; i++ )
- {
- if ( waveform[i] > in )
- waveform[i] = in - ( waveform[i] - in );
- else
- waveform[i] = in + ( in - waveform[i] );
- }
- }
-
- /**
- * Adds Gaussian noise to the waveform.
- *
- * @example Synthesis/WavetableMethods
- *
- * @param sigma
- * float: the amount to scale the random values by, in effect how
- * "loud" the added noise will be.
- *
- * @related Wavetable
- */
- public void addNoise(float sigma)
- {
- Random rgen = new Random();
- for ( int i = 0; i < waveform.length; i++ )
- {
- waveform[i] += ( (float)rgen.nextGaussian() ) * sigma;
- }
- }
-
- /**
- * Inverts all values in the table that are less than zero. -1 -> 1, -0.2 -> 0.2, etc.
- *
- * @example Synthesis/WavetableMethods
- *
- * @related Wavetable
- */
- public void rectify()
- {
- for ( int i = 0; i < waveform.length; i++ )
- {
- if ( waveform[i] < 0 )
- waveform[i] *= -1;
- }
- }
-
- /**
- * Smooth out the values in the table by using a moving average window.
- *
- * @example Synthesis/WavetableMethods
- *
- * @param windowLength
- * int: how many samples large the window should be
- *
- * @related Wavetable
- */
- public void smooth(int windowLength)
- {
- if ( windowLength < 1 )
- return;
- float[] temp = (float[])waveform.clone();
- for ( int i = windowLength; i < waveform.length; i++ )
- {
- float avg = 0;
- for ( int j = i - windowLength; j <= i; j++ )
- {
- avg += temp[j] / windowLength;
- }
- waveform[i] = avg;
- }
- }
-
- /**
- * Warping works by choosing a point in the waveform, the warpPoint, and
- * then specifying where it should move to, the warpTarget. Both values
- * should be normalized (i.e. in the range [0,1]). What will happen is that
- * the waveform data in front of and behind the warpPoint will be squashed
- * or stretch to fill the space defined by where the warpTarget is. For
- * instance, if you took Waves.SQUARE and called warp( 0.5, 0.2 ), you would
- * wind up with a square wave with a 20 percent duty cycle, the same as
- * using Waves.square( 0.2 ). This is because the crossover point of a
- * square wave is halfway through and warping it such that the crossover is
- * moved to 20% through the waveform is equivalent to changing the duty
- * cycle. Or course, much more interesting things happen when warping a more
- * complex waveform, such as one returned by the Waves.randomNHarms method,
- * especially if it is warped more than once.
- *
- * @shortdesc Warping works by choosing a point in the waveform, the
- * warpPoint, and then specifying where it should move to, the
- * warpTarget.
- *
- * @example Synthesis/WavetableMethods
- *
- * @param warpPoint
- * float: the point in the wave for to be moved, expressed as a
- * normalized value.
- * @param warpTarget
- * float: the point in the wave to move the warpPoint to,
- * expressed as a normalized value.
- *
- * @related Wavetable
- */
- public void warp(float warpPoint, float warpTarget)
- {
- float[] newWave = new float[waveform.length];
- for ( int s = 0; s < newWave.length; ++s )
- {
- float lookup = (float)s / newWave.length;
- if ( lookup <= warpTarget )
- {
- // normalize look up to [0,warpTarget], expand to [0,warpPoint]
- lookup = ( lookup / warpTarget ) * warpPoint;
- }
- else
- {
- // map (warpTarget,1] to (warpPoint,1]
- lookup = warpPoint + ( 1 - ( 1 - lookup ) / ( 1 - warpTarget ) ) * ( 1 - warpPoint );
- }
- newWave[s] = value( lookup );
- }
- waveform = newWave;
- }
-
-}
diff --git a/src/ddf/minim/ugens/WavetableGenerator.java b/src/ddf/minim/ugens/WavetableGenerator.java
deleted file mode 100644
index 3560f71..0000000
--- a/src/ddf/minim/ugens/WavetableGenerator.java
+++ /dev/null
@@ -1,199 +0,0 @@
-package ddf.minim.ugens;
-
-
-/**
- * WavetableGenerator is a helper class for generating Wavetables.
- * The method names come from CSound.
- * Generally speaking, it will often be easier to use the static methods in the Waves class, but the methods
- * in this class provide more flexibility.
- *
- * @related Wavetable
- * @related Waves
- *
- * @author Mark Godfrey <mark.godfrey@gatech.edu>
- */
-
-public class WavetableGenerator
-{
- // private constructor so it doesn't show up in documentation
- // and so that instances of this class cannot be created.
- private WavetableGenerator() {}
-
- /**
- * Generate a piecewise linear waveform given an array of sample values and the distances
- * between them. The dist
array should contain one value less than the val
- * array. The values in the dist
array should also add up to size
. For instance, a
- * call like this:
- * Wavetable table = WavetableGenerator.gen7( 4096, new float[] { 1.0, -1.0, 1.0 }, new int[] { 2048, 2048 } );
- * Wavetable table = WavetableGenerator.gen7( 4069, new float[] { 0.0, 1.0, 0.0, -1.0, 0.0 }, new int[] { 1024, 1024, 1024, 1024 } );
- *
- * @shortdesc Generate a piecewise linear waveform given an array of sample values and the distances
- * between them.
- *
- * @param size
- * int: the size of the Wavetable that you want generate
- * @param val
- * float[]: the sample values used as control points for generating the waveform
- * @param dist
- * int[]: the sample distances between control points in val
- *
- * @return a Wavetable
- *
- * @related Wavetable
- */
- public static Wavetable gen7(int size, float[] val, int[] dist)
- {
- //System.out.println("gen7: " + size + ", " + val + ", " + dist);
- float[] waveform = new float[size];
-
- // check lengths of arrays
- if (val.length - 1 != dist.length)
- {
- System.out.println("Input arrays of invalid sizes!");
- return null;
- }
-
- // check if size is sum of dists
- int sum = 0;
- for (int i = 0; i < dist.length; i++)
- {
- sum += dist[i];
- }
- if (size != sum)
- {
- System.out.println("Distances do not sum to size!");
- return null;
- }
-
- // waveform[0] = val[0];
- int i = 0;
- for (int j = 1; j < val.length && i < waveform.length; j++)
- {
- waveform[i] = val[j - 1];
- float m = (val[j] - val[j - 1]) / (float)(dist[j - 1]);
- for (int k = i + 1; k < i + dist[j - 1]; k++)
- {
- waveform[k] = m * (k - i) + val[j - 1];
- }
- i += dist[j - 1];
- }
- waveform[waveform.length - 1] = val[val.length - 1];
-
- // for(int n = 0; n < waveform.length; n++)
- // System.out.println(waveform[n]);
-
- return new Wavetable(waveform);
- }
-
- /**
- *
- * Generates a Wavetable from a list of partials with matching amplitudes and phases. Partial, here, refers
- * to a particular sine wave in the harmonic series (see: Harmonic vs. partial).
- * If you want to generate a single sine wave, suitable for playing a single tone of a particular frequency
- * in an Oscil, you could use this code:
- * Wavetable sine = WavetableGenerator.gen9(4096, new float[] { 1 }, new float[] { 1 }, new float[] { 0 });
- * Wavetable octave = WavetableGenerator.gen9(4096, new float[] { 1, 2 }, new float[] { 1, 1 }, new float[] { 0, 0 });
- * Wavetable table = WavetableGenerator.gen9(4096, new float[] { 1, 2, 3 }, new float[] { 1, 0.5, 0.2 }, new float[] { 0, 0, 0 });
- * Wavetable table = WavetableGenerator.gen10(4096, new float[] { 1, 0.5, 0.2 });
- *
- * @shortdesc Generate a Wavetable given a list of amplitudes for successive partials (harmonics).
- *
- * @param size
- * int: the number of samples the Wavetable should contain
- * @param amp
- * float[]: the amplitude of each successive partial, beginning with partial 1.
- *
- * @return a Wavetable
- *
- * @see #gen9
- * @related gen9 ( )
- * @related Wavetable
- */
- public static Wavetable gen10(int size, float[] amp)
- {
-
- float[] waveform = new float[size];
-
- float index = 0;
- for (int i = 0; i < size; i++)
- {
- index = (float)i / (size - 1);
- for (int j = 0; j < amp.length; j++)
- {
- waveform[i] += amp[j] * Math.sin(2 * Math.PI * (j + 1) * index);
- }
- }
-
- return new Wavetable(waveform);
- }
-
-}
diff --git a/src/ddf/minim/ugens/package.html b/src/ddf/minim/ugens/package.html
deleted file mode 100644
index f6845f1..0000000
--- a/src/ddf/minim/ugens/package.html
+++ /dev/null
@@ -1,258 +0,0 @@
-
-
-List of UGens
-
- Sound Generators
-
-
-
-
- Effects
-
-
-
-
- Envelopes
-
-
-
-
- Math
-
-
-
-
- Utility
-
-
-
-
-
- Oscil osc = new Oscil( 349.23, 0.8 );
-
-
- aState
- */
- private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute();
-
- private static final String ZZ_ATTRIBUTE_PACKED_0 =
- "\2\0\1\11\3\1\1\11\3\1\6\11\2\1\1\11"+
- "\5\0\10\11\1\0\1\1\1\0\1\1\4\0\2\11"+
- "\2\0\1\11";
-
- private static int [] zzUnpackAttribute() {
- int [] result = new int[45];
- int offset = 0;
- offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
- return result;
- }
-
- private static int zzUnpackAttribute(String packed, int offset, int [] result) {
- int i = 0; /* index in packed string */
- int j = offset; /* index in unpacked array */
- int l = packed.length();
- while (i < l) {
- int count = packed.charAt(i++);
- int value = packed.charAt(i++);
- do result[j++] = value; while (--count > 0);
- }
- return j;
- }
-
- /** the input device */
- private java.io.Reader zzReader;
-
- /** the current state of the DFA */
- private int zzState;
-
- /** the current lexical state */
- private int zzLexicalState = YYINITIAL;
-
- /** this buffer contains the current text to be matched and is
- the source of the yytext() string */
- private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
-
- /** the textposition at the last accepting state */
- private int zzMarkedPos;
-
- /** the current text position in the buffer */
- private int zzCurrentPos;
-
- /** startRead marks the beginning of the yytext() string in the buffer */
- private int zzStartRead;
-
- /** endRead marks the last character in the buffer, that has been read
- from input */
- private int zzEndRead;
-
- /** number of newlines encountered up to the start of the matched text */
- private int yyline;
-
- /** the number of characters up to the start of the matched text */
- private int yychar;
-
- /**
- * the number of characters from the last newline up to the start of the
- * matched text
- */
- private int yycolumn;
-
- /**
- * zzAtBOL == true <=> the scanner is currently at the beginning of a line
- */
- private boolean zzAtBOL = true;
-
- /** zzAtEOF == true <=> the scanner is at the EOF */
- private boolean zzAtEOF;
-
- /* user code: */
-private StringBuffer sb=new StringBuffer();
-
-int getPosition(){
- return yychar;
-}
-
-
-
- /**
- * Creates a new scanner
- * There is also a java.io.InputStream version of this constructor.
- *
- * @param in the java.io.Reader to read input from.
- */
- Yylex(java.io.Reader in) {
- this.zzReader = in;
- }
-
- /**
- * Creates a new scanner.
- * There is also java.io.Reader version of this constructor.
- *
- * @param in the java.io.Inputstream to read input from.
- */
- Yylex(java.io.InputStream in) {
- this(new java.io.InputStreamReader(in));
- }
-
- /**
- * Unpacks the compressed character translation table.
- *
- * @param packed the packed character translation table
- * @return the unpacked character translation table
- */
- private static char [] zzUnpackCMap(String packed) {
- char [] map = new char[0x10000];
- int i = 0; /* index in packed string */
- int j = 0; /* index in unpacked array */
- while (i < 90) {
- int count = packed.charAt(i++);
- char value = packed.charAt(i++);
- do map[j++] = value; while (--count > 0);
- }
- return map;
- }
-
-
- /**
- * Refills the input buffer.
- *
- * @return false
, iff there was new input.
- *
- * @exception java.io.IOException if any I/O-Error occurs
- */
- private boolean zzRefill() throws java.io.IOException {
-
- /* first: make room (if you can) */
- if (zzStartRead > 0) {
- System.arraycopy(zzBuffer, zzStartRead,
- zzBuffer, 0,
- zzEndRead-zzStartRead);
-
- /* translate stored positions */
- zzEndRead-= zzStartRead;
- zzCurrentPos-= zzStartRead;
- zzMarkedPos-= zzStartRead;
- zzStartRead = 0;
- }
-
- /* is the buffer big enough? */
- if (zzCurrentPos >= zzBuffer.length) {
- /* if not: blow it up */
- char newBuffer[] = new char[zzCurrentPos*2];
- System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length);
- zzBuffer = newBuffer;
- }
-
- /* finally: fill the buffer with new input */
- int numRead = zzReader.read(zzBuffer, zzEndRead,
- zzBuffer.length-zzEndRead);
-
- if (numRead > 0) {
- zzEndRead+= numRead;
- return false;
- }
- // unlikely but not impossible: read 0 characters, but not at end of stream
- if (numRead == 0) {
- int c = zzReader.read();
- if (c == -1) {
- return true;
- } else {
- zzBuffer[zzEndRead++] = (char) c;
- return false;
- }
- }
-
- // numRead < 0
- return true;
- }
-
-
- /**
- * Closes the input stream.
- */
- public final void yyclose() throws java.io.IOException {
- zzAtEOF = true; /* indicate end of file */
- zzEndRead = zzStartRead; /* invalidate buffer */
-
- if (zzReader != null)
- zzReader.close();
- }
-
-
- /**
- * Resets the scanner to read from a new input stream.
- * Does not close the old reader.
- *
- * All internal variables are reset, the old input stream
- * cannot be reused (internal buffer is discarded and lost).
- * Lexical state is set to ZZ_INITIAL.
- *
- * @param reader the new input stream
- */
- public final void yyreset(java.io.Reader reader) {
- zzReader = reader;
- zzAtBOL = true;
- zzAtEOF = false;
- zzEndRead = zzStartRead = 0;
- zzCurrentPos = zzMarkedPos = 0;
- yyline = yychar = yycolumn = 0;
- zzLexicalState = YYINITIAL;
- }
-
-
- /**
- * Returns the current lexical state.
- */
- public final int yystate() {
- return zzLexicalState;
- }
-
-
- /**
- * Enters a new lexical state
- *
- * @param newState the new lexical state
- */
- public final void yybegin(int newState) {
- zzLexicalState = newState;
- }
-
-
- /**
- * Returns the text matched by the current regular expression.
- */
- public final String yytext() {
- return new String( zzBuffer, zzStartRead, zzMarkedPos-zzStartRead );
- }
-
-
- /**
- * Returns the character at position pos from the
- * matched text.
- *
- * It is equivalent to yytext().charAt(pos), but faster
- *
- * @param pos the position of the character to fetch.
- * A value from 0 to yylength()-1.
- *
- * @return the character at position pos
- */
- public final char yycharat(int pos) {
- return zzBuffer[zzStartRead+pos];
- }
-
-
- /**
- * Returns the length of the matched text region.
- */
- public final int yylength() {
- return zzMarkedPos-zzStartRead;
- }
-
-
- /**
- * Reports an error that occured while scanning.
- *
- * In a wellformed scanner (no or only correct usage of
- * yypushback(int) and a match-all fallback rule) this method
- * will only be called with things that "Can't Possibly Happen".
- * If this method is called, something is seriously wrong
- * (e.g. a JFlex bug producing a faulty scanner etc.).
- *
- * Usual syntax/scanner level error handling should be done
- * in error fallback rules.
- *
- * @param errorCode the code of the errormessage to display
- */
- private void zzScanError(int errorCode) {
- String message;
- try {
- message = ZZ_ERROR_MSG[errorCode];
- }
- catch (ArrayIndexOutOfBoundsException e) {
- message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
- }
-
- throw new Error(message);
- }
-
-
- /**
- * Pushes the specified amount of characters back into the input stream.
- *
- * They will be read again by then next call of the scanning method
- *
- * @param number the number of characters to be read again.
- * This number must not be greater than yylength()!
- */
- public void yypushback(int number) {
- if ( number > yylength() )
- zzScanError(ZZ_PUSHBACK_2BIG);
-
- zzMarkedPos -= number;
- }
-
-
- /**
- * Resumes scanning until the next regular expression is matched,
- * the end of input is encountered or an I/O-Error occurs.
- *
- * @return the next token
- * @exception java.io.IOException if any I/O-Error occurs
- */
- public Yytoken yylex() throws java.io.IOException, ParseException {
- int zzInput;
- int zzAction;
-
- // cached fields:
- int zzCurrentPosL;
- int zzMarkedPosL;
- int zzEndReadL = zzEndRead;
- char [] zzBufferL = zzBuffer;
- char [] zzCMapL = ZZ_CMAP;
-
- int [] zzTransL = ZZ_TRANS;
- int [] zzRowMapL = ZZ_ROWMAP;
- int [] zzAttrL = ZZ_ATTRIBUTE;
-
- while (true) {
- zzMarkedPosL = zzMarkedPos;
-
- yychar+= zzMarkedPosL-zzStartRead;
-
- zzAction = -1;
-
- zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
-
- zzState = ZZ_LEXSTATE[zzLexicalState];
-
-
- zzForAction: {
- while (true) {
-
- if (zzCurrentPosL < zzEndReadL)
- zzInput = zzBufferL[zzCurrentPosL++];
- else if (zzAtEOF) {
- zzInput = YYEOF;
- break zzForAction;
- }
- else {
- // store back cached positions
- zzCurrentPos = zzCurrentPosL;
- zzMarkedPos = zzMarkedPosL;
- boolean eof = zzRefill();
- // get translated positions and possibly new buffer
- zzCurrentPosL = zzCurrentPos;
- zzMarkedPosL = zzMarkedPos;
- zzBufferL = zzBuffer;
- zzEndReadL = zzEndRead;
- if (eof) {
- zzInput = YYEOF;
- break zzForAction;
- }
- else {
- zzInput = zzBufferL[zzCurrentPosL++];
- }
- }
- int zzNext = zzTransL[ zzRowMapL[zzState] + zzCMapL[zzInput] ];
- if (zzNext == -1) break zzForAction;
- zzState = zzNext;
-
- int zzAttributes = zzAttrL[zzState];
- if ( (zzAttributes & 1) == 1 ) {
- zzAction = zzState;
- zzMarkedPosL = zzCurrentPosL;
- if ( (zzAttributes & 8) == 8 ) break zzForAction;
- }
-
- }
- }
-
- // store back cached position
- zzMarkedPos = zzMarkedPosL;
-
- switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) {
- case 11:
- { sb.append(yytext());
- }
- case 25: break;
- case 4:
- { sb = null; sb = new StringBuffer(); yybegin(STRING_BEGIN);
- }
- case 26: break;
- case 16:
- { sb.append('\b');
- }
- case 27: break;
- case 6:
- { return new Yytoken(Yytoken.TYPE_RIGHT_BRACE,null);
- }
- case 28: break;
- case 23:
- { Boolean val=Boolean.valueOf(yytext()); return new Yytoken(Yytoken.TYPE_VALUE, val);
- }
- case 29: break;
- case 22:
- { return new Yytoken(Yytoken.TYPE_VALUE, null);
- }
- case 30: break;
- case 13:
- { yybegin(YYINITIAL);return new Yytoken(Yytoken.TYPE_VALUE, sb.toString());
- }
- case 31: break;
- case 12:
- { sb.append('\\');
- }
- case 32: break;
- case 21:
- { Double val=Double.valueOf(yytext()); return new Yytoken(Yytoken.TYPE_VALUE, val);
- }
- case 33: break;
- case 1:
- { throw new ParseException(yychar, ParseException.ERROR_UNEXPECTED_CHAR, new Character(yycharat(0)));
- }
- case 34: break;
- case 8:
- { return new Yytoken(Yytoken.TYPE_RIGHT_SQUARE,null);
- }
- case 35: break;
- case 19:
- { sb.append('\r');
- }
- case 36: break;
- case 15:
- { sb.append('/');
- }
- case 37: break;
- case 10:
- { return new Yytoken(Yytoken.TYPE_COLON,null);
- }
- case 38: break;
- case 14:
- { sb.append('"');
- }
- case 39: break;
- case 5:
- { return new Yytoken(Yytoken.TYPE_LEFT_BRACE,null);
- }
- case 40: break;
- case 17:
- { sb.append('\f');
- }
- case 41: break;
- case 24:
- { try{
- int ch=Integer.parseInt(yytext().substring(2),16);
- sb.append((char)ch);
- }
- catch(Exception e){
- throw new ParseException(yychar, ParseException.ERROR_UNEXPECTED_EXCEPTION, e);
- }
- }
- case 42: break;
- case 20:
- { sb.append('\t');
- }
- case 43: break;
- case 7:
- { return new Yytoken(Yytoken.TYPE_LEFT_SQUARE,null);
- }
- case 44: break;
- case 2:
- { Long val=Long.valueOf(yytext()); return new Yytoken(Yytoken.TYPE_VALUE, val);
- }
- case 45: break;
- case 18:
- { sb.append('\n');
- }
- case 46: break;
- case 9:
- { return new Yytoken(Yytoken.TYPE_COMMA,null);
- }
- case 47: break;
- case 3:
- {
- }
- case 48: break;
- default:
- if (zzInput == YYEOF && zzStartRead == zzCurrentPos) {
- zzAtEOF = true;
- return null;
- }
- else {
- zzScanError(ZZ_NO_MATCH);
- }
- }
- }
- }
-
-
-}
diff --git a/src/org/json/simple/parser/Yytoken.java b/src/org/json/simple/parser/Yytoken.java
deleted file mode 100644
index ff14e27..0000000
--- a/src/org/json/simple/parser/Yytoken.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * $Id: Yytoken.java,v 1.1 2006/04/15 14:10:48 platform Exp $
- * Created on 2006-4-15
- */
-package org.json.simple.parser;
-
-/**
- * @author FangYidong