/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.media;
import android.annotation.IntDef;
import android.annotation.NonNull;
import android.os.Parcel;
import android.os.Parcelable;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.Arrays;
import java.util.Objects;
/**
* The {@link AudioFormat} class is used to access a number of audio format and
* channel configuration constants. They are for instance used
* in {@link AudioTrack} and {@link AudioRecord}, as valid values in individual parameters of
* constructors like {@link AudioTrack#AudioTrack(int, int, int, int, int, int)}, where the fourth
* parameter is one of the AudioFormat.ENCODING_*
constants.
* The AudioFormat
constants are also used in {@link MediaFormat} to specify
* audio related values commonly used in media, such as for {@link MediaFormat#KEY_CHANNEL_MASK}.
*
The {@link AudioFormat.Builder} class can be used to create instances of
* the AudioFormat
format class.
* Refer to
* {@link AudioFormat.Builder} for documentation on the mechanics of the configuration and building
* of such instances. Here we describe the main concepts that the AudioFormat
class
* allow you to convey in each instance, they are:
*
Closely associated with the AudioFormat
is the notion of an
* audio frame, which is used throughout the documentation
* to represent the minimum size complete unit of audio data.
*
*
Expressed in Hz, the sample rate in an AudioFormat
instance expresses the number
* of audio samples for each channel per second in the content you are playing or recording. It is
* not the sample rate
* at which content is rendered or produced. For instance a sound at a media sample rate of 8000Hz
* can be played on a device operating at a sample rate of 48000Hz; the sample rate conversion is
* automatically handled by the platform, it will not play at 6x speed.
*
*
As of API {@link android.os.Build.VERSION_CODES#M},
* sample rates up to 192kHz are supported
* for AudioRecord
and AudioTrack
, with sample rate conversion
* performed as needed.
* To improve efficiency and avoid lossy conversions, it is recommended to match the sample rate
* for AudioRecord
and AudioTrack
to the endpoint device
* sample rate, and limit the sample rate to no more than 48kHz unless there are special
* device capabilities that warrant a higher rate.
*
*
Audio encoding is used to describe the bit representation of audio data, which can be * either linear PCM or compressed audio, such as AC3 or DTS. *
For linear PCM, the audio encoding describes the sample size, 8 bits, 16 bits, or 32 bits, * and the sample representation, integer or float. *
ENCODING_PCM_FLOAT
audio data is [-1.0, 1.0].
* It is implementation dependent whether the positive maximum of 1.0 is included
* in the interval. Values outside of the nominal range are clamped before
* sending to the endpoint device. Beware that
* the handling of NaN is undefined; subnormals may be treated as zero; and
* infinities are generally clamped just like other values for AudioTrack
* – try to avoid infinities because they can easily generate a NaN.
* ENCODING_PCM_FLOAT
for audio capture, processing,
* and playback.
* Floats are efficiently manipulated by modern CPUs,
* have greater precision than 24 bit signed integers,
* and have greater dynamic range than 32 bit signed integers.
* AudioRecord
as of API {@link android.os.Build.VERSION_CODES#M} and
* AudioTrack
as of API {@link android.os.Build.VERSION_CODES#LOLLIPOP}
* support ENCODING_PCM_FLOAT
.
* For compressed audio, the encoding specifies the method of compression,
* for example {@link #ENCODING_AC3} and {@link #ENCODING_DTS}. The compressed
* audio data is typically stored as bytes in
* a byte array or ByteBuffer. When a compressed audio encoding is specified
* for an AudioTrack
, it creates a direct (non-mixed) track
* for output to an endpoint (such as HDMI) capable of decoding the compressed audio.
* For (most) other endpoints, which are not capable of decoding such compressed audio,
* you will need to decode the data first, typically by creating a {@link MediaCodec}.
* Alternatively, one may use {@link MediaPlayer} for playback of compressed
* audio files or streams.
*
When compressed audio is sent out through a direct AudioTrack
,
* it need not be written in exact multiples of the audio access unit;
* this differs from MediaCodec
input buffers.
*
*
Channel masks are used in AudioTrack
and AudioRecord
to describe
* the samples and their arrangement in the audio frame. They are also used in the endpoint (e.g.
* a USB audio interface, a DAC connected to headphones) to specify allowable configurations of a
* particular device.
*
As of API {@link android.os.Build.VERSION_CODES#M}, there are two types of channel masks:
* channel position masks and channel index masks.
*
*
channel count | channel position mask |
1 | {@link #CHANNEL_OUT_MONO} |
2 | {@link #CHANNEL_OUT_STEREO} |
3 | {@link #CHANNEL_OUT_STEREO} | {@link #CHANNEL_OUT_FRONT_CENTER} |
4 | {@link #CHANNEL_OUT_QUAD} |
5 | {@link #CHANNEL_OUT_QUAD} | {@link #CHANNEL_OUT_FRONT_CENTER} |
6 | {@link #CHANNEL_OUT_5POINT1} |
7 | {@link #CHANNEL_OUT_5POINT1} | {@link #CHANNEL_OUT_BACK_CENTER} |
8 | {@link #CHANNEL_OUT_7POINT1_SURROUND} |
1 << channelNumber
.
* A set bit indicates that channel is present in the audio frame, otherwise it is cleared.
* The order of the bits also correspond to that channel number's sample order in the audio frame.
* 0xF
. Suppose we wanted to select only the first and the third channels; this would
* correspond to a channel index mask 0x5
(the first and third bits set). If an
* AudioTrack
uses this channel index mask, the audio frame would consist of two
* samples, the first sample of each frame routed to channel 0, and the second sample of each frame
* routed to channel 2.
* The canonical channel index masks by channel count are given by the formula
* (1 << channelCount) - 1
.
*
* CHANNEL_OUT_FRONT_LEFT
,
* CHANNEL_OUT_FRONT_CENTER
, etc. for HDMI home theater purposes.
* AudioTrack
* to output movie content, where 5.1 multichannel output is to be written.
* AudioRecord
may only want the
* third and fourth audio channels of the endpoint (i.e. the second channel pair), and not care the
* about position it corresponds to, in which case the channel index mask is 0xC
.
* Multichannel AudioRecord
sessions should use channel index masks.
* For linear PCM, an audio frame consists of a set of samples captured at the same time,
* whose count and
* channel association are given by the channel mask,
* and whose sample contents are specified by the encoding.
* For example, a stereo 16 bit PCM frame consists of
* two 16 bit linear PCM samples, with a frame size of 4 bytes.
* For compressed audio, an audio frame may alternately
* refer to an access unit of compressed data bytes that is logically grouped together for
* decoding and bitstream access (e.g. {@link MediaCodec}),
* or a single byte of compressed data (e.g. {@link AudioTrack#getBufferSizeInFrames()
* AudioTrack.getBufferSizeInFrames()}),
* or the linear PCM frame result from decoding the compressed data
* (e.g.{@link AudioTrack#getPlaybackHeadPosition()
* AudioTrack.getPlaybackHeadPosition()}),
* depending on the context where audio frame is used.
*/
public final class AudioFormat implements Parcelable {
//---------------------------------------------------------
// Constants
//--------------------
/** Invalid audio data format */
public static final int ENCODING_INVALID = 0;
/** Default audio data format */
public static final int ENCODING_DEFAULT = 1;
// These values must be kept in sync with core/jni/android_media_AudioFormat.h
// Also sync av/services/audiopolicy/managerdefault/ConfigParsingUtils.h
/** Audio data format: PCM 16 bit per sample. Guaranteed to be supported by devices. */
public static final int ENCODING_PCM_16BIT = 2;
/** Audio data format: PCM 8 bit per sample. Not guaranteed to be supported by devices. */
public static final int ENCODING_PCM_8BIT = 3;
/** Audio data format: single-precision floating-point per sample */
public static final int ENCODING_PCM_FLOAT = 4;
/** Audio data format: AC-3 compressed */
public static final int ENCODING_AC3 = 5;
/** Audio data format: E-AC-3 compressed */
public static final int ENCODING_E_AC3 = 6;
/** Audio data format: DTS compressed */
public static final int ENCODING_DTS = 7;
/** Audio data format: DTS HD compressed */
public static final int ENCODING_DTS_HD = 8;
/** Audio data format: MP3 compressed
* @hide
* */
public static final int ENCODING_MP3 = 9;
/** Audio data format: AAC LC compressed
* @hide
* */
public static final int ENCODING_AAC_LC = 10;
/** Audio data format: AAC HE V1 compressed
* @hide
* */
public static final int ENCODING_AAC_HE_V1 = 11;
/** Audio data format: AAC HE V2 compressed
* @hide
* */
public static final int ENCODING_AAC_HE_V2 = 12;
/** Audio data format: compressed audio wrapped in PCM for HDMI
* or S/PDIF passthrough.
* IEC61937 uses a stereo stream of 16-bit samples as the wrapper.
* So the channel mask for the track must be {@link #CHANNEL_OUT_STEREO}.
* Data should be written to the stream in a short[] array.
* If the data is written in a byte[] array then there may be endian problems
* on some platforms when converting to short internally.
*/
public static final int ENCODING_IEC61937 = 13;
/** Audio data format: DOLBY TRUEHD compressed
**/
public static final int ENCODING_DOLBY_TRUEHD = 14;
/** @hide */
public static String toLogFriendlyEncoding(int enc) {
switch(enc) {
case ENCODING_INVALID:
return "ENCODING_INVALID";
case ENCODING_PCM_16BIT:
return "ENCODING_PCM_16BIT";
case ENCODING_PCM_8BIT:
return "ENCODING_PCM_8BIT";
case ENCODING_PCM_FLOAT:
return "ENCODING_PCM_FLOAT";
case ENCODING_AC3:
return "ENCODING_AC3";
case ENCODING_E_AC3:
return "ENCODING_E_AC3";
case ENCODING_DTS:
return "ENCODING_DTS";
case ENCODING_DTS_HD:
return "ENCODING_DTS_HD";
case ENCODING_MP3:
return "ENCODING_MP3";
case ENCODING_AAC_LC:
return "ENCODING_AAC_LC";
case ENCODING_AAC_HE_V1:
return "ENCODING_AAC_HE_V1";
case ENCODING_AAC_HE_V2:
return "ENCODING_AAC_HE_V2";
case ENCODING_IEC61937:
return "ENCODING_IEC61937";
case ENCODING_DOLBY_TRUEHD:
return "ENCODING_DOLBY_TRUEHD";
default :
return "invalid encoding " + enc;
}
}
/** Invalid audio channel configuration */
/** @deprecated Use {@link #CHANNEL_INVALID} instead. */
@Deprecated public static final int CHANNEL_CONFIGURATION_INVALID = 0;
/** Default audio channel configuration */
/** @deprecated Use {@link #CHANNEL_OUT_DEFAULT} or {@link #CHANNEL_IN_DEFAULT} instead. */
@Deprecated public static final int CHANNEL_CONFIGURATION_DEFAULT = 1;
/** Mono audio configuration */
/** @deprecated Use {@link #CHANNEL_OUT_MONO} or {@link #CHANNEL_IN_MONO} instead. */
@Deprecated public static final int CHANNEL_CONFIGURATION_MONO = 2;
/** Stereo (2 channel) audio configuration */
/** @deprecated Use {@link #CHANNEL_OUT_STEREO} or {@link #CHANNEL_IN_STEREO} instead. */
@Deprecated public static final int CHANNEL_CONFIGURATION_STEREO = 3;
/** Invalid audio channel mask */
public static final int CHANNEL_INVALID = 0;
/** Default audio channel mask */
public static final int CHANNEL_OUT_DEFAULT = 1;
// Output channel mask definitions below are translated to the native values defined in
// in /system/media/audio/include/system/audio.h in the JNI code of AudioTrack
public static final int CHANNEL_OUT_FRONT_LEFT = 0x4;
public static final int CHANNEL_OUT_FRONT_RIGHT = 0x8;
public static final int CHANNEL_OUT_FRONT_CENTER = 0x10;
public static final int CHANNEL_OUT_LOW_FREQUENCY = 0x20;
public static final int CHANNEL_OUT_BACK_LEFT = 0x40;
public static final int CHANNEL_OUT_BACK_RIGHT = 0x80;
public static final int CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x100;
public static final int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x200;
public static final int CHANNEL_OUT_BACK_CENTER = 0x400;
public static final int CHANNEL_OUT_SIDE_LEFT = 0x800;
public static final int CHANNEL_OUT_SIDE_RIGHT = 0x1000;
/** @hide */
public static final int CHANNEL_OUT_TOP_CENTER = 0x2000;
/** @hide */
public static final int CHANNEL_OUT_TOP_FRONT_LEFT = 0x4000;
/** @hide */
public static final int CHANNEL_OUT_TOP_FRONT_CENTER = 0x8000;
/** @hide */
public static final int CHANNEL_OUT_TOP_FRONT_RIGHT = 0x10000;
/** @hide */
public static final int CHANNEL_OUT_TOP_BACK_LEFT = 0x20000;
/** @hide */
public static final int CHANNEL_OUT_TOP_BACK_CENTER = 0x40000;
/** @hide */
public static final int CHANNEL_OUT_TOP_BACK_RIGHT = 0x80000;
public static final int CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT;
public static final int CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT);
// aka QUAD_BACK
public static final int CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
/** @hide */
public static final int CHANNEL_OUT_QUAD_SIDE = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT);
public static final int CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER);
// aka 5POINT1_BACK
public static final int CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
/** @hide */
public static final int CHANNEL_OUT_5POINT1_SIDE = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY |
CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT);
// different from AUDIO_CHANNEL_OUT_7POINT1 used internally, and not accepted by AudioRecord.
/** @deprecated Not the typical 7.1 surround configuration. Use {@link #CHANNEL_OUT_7POINT1_SURROUND} instead. */
@Deprecated public static final int CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
// matches AUDIO_CHANNEL_OUT_7POINT1
public static final int CHANNEL_OUT_7POINT1_SURROUND = (
CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_FRONT_RIGHT |
CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT |
CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
CHANNEL_OUT_LOW_FREQUENCY);
// CHANNEL_OUT_ALL is not yet defined; if added then it should match AUDIO_CHANNEL_OUT_ALL
/** Minimum value for sample rate,
* assuming AudioTrack and AudioRecord share the same limitations.
* @hide
*/
// never unhide
public static final int SAMPLE_RATE_HZ_MIN = 4000;
/** Maximum value for sample rate,
* assuming AudioTrack and AudioRecord share the same limitations.
* @hide
*/
// never unhide
public static final int SAMPLE_RATE_HZ_MAX = 192000;
/** Sample rate will be a route-dependent value.
* For AudioTrack, it is usually the sink sample rate,
* and for AudioRecord it is usually the source sample rate.
*/
public static final int SAMPLE_RATE_UNSPECIFIED = 0;
/**
* @hide
* Return the input channel mask corresponding to an output channel mask.
* This can be used for submix rerouting for the mask of the recorder to map to that of the mix.
* @param outMask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
* @return a combination of CHANNEL_IN_* definitions matching an output channel mask
* @throws IllegalArgumentException
*/
public static int inChannelMaskFromOutChannelMask(int outMask) throws IllegalArgumentException {
if (outMask == CHANNEL_OUT_DEFAULT) {
throw new IllegalArgumentException(
"Illegal CHANNEL_OUT_DEFAULT channel mask for input.");
}
switch (channelCountFromOutChannelMask(outMask)) {
case 1:
return CHANNEL_IN_MONO;
case 2:
return CHANNEL_IN_STEREO;
default:
throw new IllegalArgumentException("Unsupported channel configuration for input.");
}
}
/**
* @hide
* Return the number of channels from an input channel mask
* @param mask a combination of the CHANNEL_IN_* definitions, even CHANNEL_IN_DEFAULT
* @return number of channels for the mask
*/
public static int channelCountFromInChannelMask(int mask) {
return Integer.bitCount(mask);
}
/**
* @hide
* Return the number of channels from an output channel mask
* @param mask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
* @return number of channels for the mask
*/
public static int channelCountFromOutChannelMask(int mask) {
return Integer.bitCount(mask);
}
/**
* @hide
* Return a channel mask ready to be used by native code
* @param mask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
* @return a native channel mask
*/
public static int convertChannelOutMaskToNativeMask(int javaMask) {
return (javaMask >> 2);
}
/**
* @hide
* Return a java output channel mask
* @param mask a native channel mask
* @return a combination of the CHANNEL_OUT_* definitions
*/
public static int convertNativeChannelMaskToOutMask(int nativeMask) {
return (nativeMask << 2);
}
public static final int CHANNEL_IN_DEFAULT = 1;
// These directly match native
public static final int CHANNEL_IN_LEFT = 0x4;
public static final int CHANNEL_IN_RIGHT = 0x8;
public static final int CHANNEL_IN_FRONT = 0x10;
public static final int CHANNEL_IN_BACK = 0x20;
public static final int CHANNEL_IN_LEFT_PROCESSED = 0x40;
public static final int CHANNEL_IN_RIGHT_PROCESSED = 0x80;
public static final int CHANNEL_IN_FRONT_PROCESSED = 0x100;
public static final int CHANNEL_IN_BACK_PROCESSED = 0x200;
public static final int CHANNEL_IN_PRESSURE = 0x400;
public static final int CHANNEL_IN_X_AXIS = 0x800;
public static final int CHANNEL_IN_Y_AXIS = 0x1000;
public static final int CHANNEL_IN_Z_AXIS = 0x2000;
public static final int CHANNEL_IN_VOICE_UPLINK = 0x4000;
public static final int CHANNEL_IN_VOICE_DNLINK = 0x8000;
public static final int CHANNEL_IN_MONO = CHANNEL_IN_FRONT;
public static final int CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT);
/** @hide */
public static final int CHANNEL_IN_FRONT_BACK = CHANNEL_IN_FRONT | CHANNEL_IN_BACK;
// CHANNEL_IN_ALL is not yet defined; if added then it should match AUDIO_CHANNEL_IN_ALL
/** @hide */
public static int getBytesPerSample(int audioFormat)
{
switch (audioFormat) {
case ENCODING_PCM_8BIT:
return 1;
case ENCODING_PCM_16BIT:
case ENCODING_IEC61937:
case ENCODING_DEFAULT:
return 2;
case ENCODING_PCM_FLOAT:
return 4;
case ENCODING_INVALID:
default:
throw new IllegalArgumentException("Bad audio format " + audioFormat);
}
}
/** @hide */
public static boolean isValidEncoding(int audioFormat)
{
switch (audioFormat) {
case ENCODING_PCM_8BIT:
case ENCODING_PCM_16BIT:
case ENCODING_PCM_FLOAT:
case ENCODING_AC3:
case ENCODING_E_AC3:
case ENCODING_DTS:
case ENCODING_DTS_HD:
case ENCODING_MP3:
case ENCODING_AAC_LC:
case ENCODING_AAC_HE_V1:
case ENCODING_AAC_HE_V2:
case ENCODING_IEC61937:
return true;
default:
return false;
}
}
/** @hide */
public static boolean isPublicEncoding(int audioFormat)
{
switch (audioFormat) {
case ENCODING_PCM_8BIT:
case ENCODING_PCM_16BIT:
case ENCODING_PCM_FLOAT:
case ENCODING_AC3:
case ENCODING_E_AC3:
case ENCODING_DTS:
case ENCODING_DTS_HD:
case ENCODING_IEC61937:
return true;
default:
return false;
}
}
/** @hide */
public static boolean isEncodingLinearPcm(int audioFormat)
{
switch (audioFormat) {
case ENCODING_PCM_8BIT:
case ENCODING_PCM_16BIT:
case ENCODING_PCM_FLOAT:
case ENCODING_DEFAULT:
return true;
case ENCODING_AC3:
case ENCODING_E_AC3:
case ENCODING_DTS:
case ENCODING_DTS_HD:
case ENCODING_MP3:
case ENCODING_AAC_LC:
case ENCODING_AAC_HE_V1:
case ENCODING_AAC_HE_V2:
case ENCODING_IEC61937: // wrapped in PCM but compressed
return false;
case ENCODING_INVALID:
default:
throw new IllegalArgumentException("Bad audio format " + audioFormat);
}
}
/** @hide */
public static boolean isEncodingLinearFrames(int audioFormat)
{
switch (audioFormat) {
case ENCODING_PCM_8BIT:
case ENCODING_PCM_16BIT:
case ENCODING_PCM_FLOAT:
case ENCODING_IEC61937: // same size as stereo PCM
case ENCODING_DEFAULT:
return true;
case ENCODING_AC3:
case ENCODING_E_AC3:
case ENCODING_DTS:
case ENCODING_DTS_HD:
case ENCODING_MP3:
case ENCODING_AAC_LC:
case ENCODING_AAC_HE_V1:
case ENCODING_AAC_HE_V2:
return false;
case ENCODING_INVALID:
default:
throw new IllegalArgumentException("Bad audio format " + audioFormat);
}
}
/**
* Returns an array of public encoding values extracted from an array of
* encoding values.
* @hide
*/
public static int[] filterPublicFormats(int[] formats) {
if (formats == null) {
return null;
}
int[] myCopy = Arrays.copyOf(formats, formats.length);
int size = 0;
for (int i = 0; i < myCopy.length; i++) {
if (isPublicEncoding(myCopy[i])) {
if (size != i) {
myCopy[size] = myCopy[i];
}
size++;
}
}
return Arrays.copyOf(myCopy, size);
}
/** @removed */
public AudioFormat()
{
throw new UnsupportedOperationException("There is no valid usage of this constructor");
}
/**
* Private constructor with an ignored argument to differentiate from the removed default ctor
* @param ignoredArgument
*/
private AudioFormat(int ignoredArgument) {
}
/**
* Constructor used by the JNI. Parameters are not checked for validity.
*/
// Update sound trigger JNI in core/jni/android_hardware_SoundTrigger.cpp when modifying this
// constructor
private AudioFormat(int encoding, int sampleRate, int channelMask, int channelIndexMask) {
mEncoding = encoding;
mSampleRate = sampleRate;
mChannelMask = channelMask;
mChannelIndexMask = channelIndexMask;
mPropertySetMask = AUDIO_FORMAT_HAS_PROPERTY_ENCODING |
AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE |
AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK |
AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK;
}
/** @hide */
public final static int AUDIO_FORMAT_HAS_PROPERTY_NONE = 0x0;
/** @hide */
public final static int AUDIO_FORMAT_HAS_PROPERTY_ENCODING = 0x1 << 0;
/** @hide */
public final static int AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE = 0x1 << 1;
/** @hide */
public final static int AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK = 0x1 << 2;
/** @hide */
public final static int AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK = 0x1 << 3;
private int mEncoding;
private int mSampleRate;
private int mChannelMask;
private int mChannelIndexMask;
private int mPropertySetMask;
/**
* Return the encoding.
* See the section on encodings for more information about the different
* types of supported audio encoding.
* @return one of the values that can be set in {@link Builder#setEncoding(int)} or
* {@link AudioFormat#ENCODING_INVALID} if not set.
*/
public int getEncoding() {
if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_ENCODING) == 0) {
return ENCODING_INVALID;
}
return mEncoding;
}
/**
* Return the sample rate.
* @return one of the values that can be set in {@link Builder#setSampleRate(int)} or
* {@link #SAMPLE_RATE_UNSPECIFIED} if not set.
*/
public int getSampleRate() {
return mSampleRate;
}
/**
* Return the channel mask.
* See the section on channel masks for more information about
* the difference between index-based masks(as returned by {@link #getChannelIndexMask()}) and
* the position-based mask returned by this function.
* @return one of the values that can be set in {@link Builder#setChannelMask(int)} or
* {@link AudioFormat#CHANNEL_INVALID} if not set.
*/
public int getChannelMask() {
if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) == 0) {
return CHANNEL_INVALID;
}
return mChannelMask;
}
/**
* Return the channel index mask.
* See the section on channel masks for more information about
* the difference between index-based masks, and position-based masks (as returned
* by {@link #getChannelMask()}).
* @return one of the values that can be set in {@link Builder#setChannelIndexMask(int)} or
* {@link AudioFormat#CHANNEL_INVALID} if not set or an invalid mask was used.
*/
public int getChannelIndexMask() {
if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) == 0) {
return CHANNEL_INVALID;
}
return mChannelIndexMask;
}
/**
* Return the channel count.
* @return the channel count derived from the channel position mask or the channel index mask.
* Zero is returned if both the channel position mask and the channel index mask are not set.
*/
public int getChannelCount() {
final int channelIndexCount = Integer.bitCount(getChannelIndexMask());
int channelCount = channelCountFromOutChannelMask(getChannelMask());
if (channelCount == 0) {
channelCount = channelIndexCount;
} else if (channelCount != channelIndexCount && channelIndexCount != 0) {
channelCount = 0; // position and index channel count mismatch
}
return channelCount;
}
/** @hide */
public int getPropertySetMask() {
return mPropertySetMask;
}
/** @hide */
public String toLogFriendlyString() {
return String.format("%dch %dHz %s",
getChannelCount(), mSampleRate, toLogFriendlyEncoding(mEncoding));
}
/**
* Builder class for {@link AudioFormat} objects.
* Use this class to configure and create an AudioFormat instance. By setting format
* characteristics such as audio encoding, channel mask or sample rate, you indicate which
* of those are to vary from the default behavior on this device wherever this audio format
* is used. See {@link AudioFormat} for a complete description of the different parameters that
* can be used to configure an AudioFormat
instance.
*
{@link AudioFormat} is for instance used in
* {@link AudioTrack#AudioTrack(AudioAttributes, AudioFormat, int, int, int)}. In this
* constructor, every format characteristic set on the Builder
(e.g. with
* {@link #setSampleRate(int)}) will alter the default values used by an
* AudioTrack
. In this case for audio playback with AudioTrack
, the
* sample rate set in the Builder
would override the platform output sample rate
* which would otherwise be selected by default.
*/
public static class Builder {
private int mEncoding = ENCODING_INVALID;
private int mSampleRate = SAMPLE_RATE_UNSPECIFIED;
private int mChannelMask = CHANNEL_INVALID;
private int mChannelIndexMask = 0;
private int mPropertySetMask = AUDIO_FORMAT_HAS_PROPERTY_NONE;
/**
* Constructs a new Builder with none of the format characteristics set.
*/
public Builder() {
}
/**
* Constructs a new Builder from a given {@link AudioFormat}.
* @param af the {@link AudioFormat} object whose data will be reused in the new Builder.
*/
public Builder(AudioFormat af) {
mEncoding = af.mEncoding;
mSampleRate = af.mSampleRate;
mChannelMask = af.mChannelMask;
mChannelIndexMask = af.mChannelIndexMask;
mPropertySetMask = af.mPropertySetMask;
}
/**
* Combines all of the format characteristics that have been set and return a new
* {@link AudioFormat} object.
* @return a new {@link AudioFormat} object
*/
public AudioFormat build() {
AudioFormat af = new AudioFormat(1980/*ignored*/);
af.mEncoding = mEncoding;
// not calling setSampleRate is equivalent to calling
// setSampleRate(SAMPLE_RATE_UNSPECIFIED)
af.mSampleRate = mSampleRate;
af.mChannelMask = mChannelMask;
af.mChannelIndexMask = mChannelIndexMask;
af.mPropertySetMask = mPropertySetMask;
return af;
}
/**
* Sets the data encoding format.
* @param encoding one of {@link AudioFormat#ENCODING_DEFAULT},
* {@link AudioFormat#ENCODING_PCM_8BIT},
* {@link AudioFormat#ENCODING_PCM_16BIT},
* {@link AudioFormat#ENCODING_PCM_FLOAT},
* {@link AudioFormat#ENCODING_AC3},
* {@link AudioFormat#ENCODING_E_AC3}.
* {@link AudioFormat#ENCODING_DTS},
* {@link AudioFormat#ENCODING_DTS_HD}.
* @return the same Builder instance.
* @throws java.lang.IllegalArgumentException
*/
public Builder setEncoding(@Encoding int encoding) throws IllegalArgumentException {
switch (encoding) {
case ENCODING_DEFAULT:
mEncoding = ENCODING_PCM_16BIT;
break;
case ENCODING_PCM_8BIT:
case ENCODING_PCM_16BIT:
case ENCODING_PCM_FLOAT:
case ENCODING_AC3:
case ENCODING_E_AC3:
case ENCODING_DTS:
case ENCODING_DTS_HD:
case ENCODING_IEC61937:
mEncoding = encoding;
break;
case ENCODING_INVALID:
default:
throw new IllegalArgumentException("Invalid encoding " + encoding);
}
mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_ENCODING;
return this;
}
/**
* Sets the channel position mask.
* The channel position mask specifies the association between audio samples in a frame
* with named endpoint channels. The samples in the frame correspond to the
* named set bits in the channel position mask, in ascending bit order.
* See {@link #setChannelIndexMask(int)} to specify channels
* based on endpoint numbered channels. This SAMPLE_RATE_HZ_MAX)) &&
sampleRate != SAMPLE_RATE_UNSPECIFIED) {
throw new IllegalArgumentException("Invalid sample rate " + sampleRate);
}
mSampleRate = sampleRate;
mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE;
return this;
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AudioFormat that = (AudioFormat) o;
if (mPropertySetMask != that.mPropertySetMask) return false;
// return false if any of the properties is set and the values differ
return !((((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0)
&& (mEncoding != that.mEncoding))
|| (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
&& (mSampleRate != that.mSampleRate))
|| (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)
&& (mChannelMask != that.mChannelMask))
|| (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0)
&& (mChannelIndexMask != that.mChannelIndexMask)));
}
@Override
public int hashCode() {
return Objects.hash(mPropertySetMask, mSampleRate, mEncoding, mChannelMask,
mChannelIndexMask);
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeInt(mPropertySetMask);
dest.writeInt(mEncoding);
dest.writeInt(mSampleRate);
dest.writeInt(mChannelMask);
dest.writeInt(mChannelIndexMask);
}
private AudioFormat(Parcel in) {
mPropertySetMask = in.readInt();
mEncoding = in.readInt();
mSampleRate = in.readInt();
mChannelMask = in.readInt();
mChannelIndexMask = in.readInt();
}
public static final Parcelable.Creator