在您的应用程序中尝试此代码…
private int RECORDER_CHANNELS = AudioFormat.CHANNEL_CONFIGURATION_MONO;
private int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
private int RECORDER_SAMPLERATE = 44100;
private byte RECORDER_BPP = (byte) 16;
private AudioRecord audioRecorder;
public void arm() {
// Get the minimum buffer size required for the successful creation of an AudioRecord object.
int bufferSizeInBytes = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE, RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING);
// Initialize Audio Recorder.
audioRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, RECORDER_SAMPLERATE,
RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING, bufferSizeInBytes);
// Start Recording.
audioRecorder.startRecording();
int numberOfReadBytes = 0;
byte audioBuffer[] = new byte[bufferSizeInBytes];
boolean recording = false;
float tempFloatBuffer[] = new float[3];
int tempIndex = 0;
int totalReadBytes = 0;
byte totalByteBuffer[] = new byte[60 * 44100 * 2];
// While data come from microphone.
while (true) {
float totalAbsValue = 0.0f;
short sample = 0;
numberOfReadBytes = audioRecorder.read(audioBuffer, 0, bufferSizeInBytes);
// Analyze Sound.
for (int i = 0; i < bufferSizeInBytes; i += 2) {
sample = (short) ((audioBuffer[i]) | audioBuffer[i + 1] << 8);
totalAbsValue += Math.abs(sample) / (numberOfReadBytes / 2);
}
// Analyze temp buffer.
tempFloatBuffer[tempIndex % 3] = totalAbsValue;
float temp = 0.0f;
for (int i = 0; i < 3; ++i)
temp += tempFloatBuffer[i];
if ((temp >= 0 && temp <= 350) && recording == false) {
Log.i("TAG", "1");
tempIndex++;
continue;
}
if (temp > 350 && recording == false) {
Log.i("TAG", "2");
recording = true;
}
if ((temp >= 0 && temp <= 350) && recording == true) {
Log.i("TAG", "Save audio to file.");
// Save audio to file.
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, "AudioRecorder");
if (!file.exists())
file.mkdirs();
String fn = file.getAbsolutePath() + "/" + System.currentTimeMillis() + ".wav";
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = RECORDER_SAMPLERATE;
int channels = 1;
long byteRate = RECORDER_BPP * RECORDER_SAMPLERATE * channels / 8;
totalAudioLen = totalReadBytes;
totalDataLen = totalAudioLen + 36;
byte finalBuffer[] = new byte[totalReadBytes + 44];
finalBuffer[0] = 'R'; // RIFF/WAVE header
finalBuffer[1] = 'I';
finalBuffer[2] = 'F';
finalBuffer[3] = 'F';
finalBuffer[4] = (byte) (totalDataLen & 0xff);
finalBuffer[5] = (byte) ((totalDataLen >> 8) & 0xff);
finalBuffer[6] = (byte) ((totalDataLen >> 16) & 0xff);
finalBuffer[7] = (byte) ((totalDataLen >> 24) & 0xff);
finalBuffer[8] = 'W';
finalBuffer[9] = 'A';
finalBuffer[10] = 'V';
finalBuffer[11] = 'E';
finalBuffer[12] = 'f'; // 'fmt ' chunk
finalBuffer[13] = 'm';
finalBuffer[14] = 't';
finalBuffer[15] = ' ';
finalBuffer[16] = 16; // 4 bytes: size of 'fmt ' chunk
finalBuffer[17] = 0;
finalBuffer[18] = 0;
finalBuffer[19] = 0;
finalBuffer[20] = 1; // format = 1
finalBuffer[21] = 0;
finalBuffer[22] = (byte) channels;
finalBuffer[23] = 0;
finalBuffer[24] = (byte) (longSampleRate & 0xff);
finalBuffer[25] = (byte) ((longSampleRate >> 8) & 0xff);
finalBuffer[26] = (byte) ((longSampleRate >> 16) & 0xff);
finalBuffer[27] = (byte) ((longSampleRate >> 24) & 0xff);
finalBuffer[28] = (byte) (byteRate & 0xff);
finalBuffer[29] = (byte) ((byteRate >> 8) & 0xff);
finalBuffer[30] = (byte) ((byteRate >> 16) & 0xff);
finalBuffer[31] = (byte) ((byteRate >> 24) & 0xff);
finalBuffer[32] = (byte) (2 * 16 / 8); // block align
finalBuffer[33] = 0;
finalBuffer[34] = RECORDER_BPP; // bits per sample
finalBuffer[35] = 0;
finalBuffer[36] = 'd';
finalBuffer[37] = 'a';
finalBuffer[38] = 't';
finalBuffer[39] = 'a';
finalBuffer[40] = (byte) (totalAudioLen & 0xff);
finalBuffer[41] = (byte) ((totalAudioLen >> 8) & 0xff);
finalBuffer[42] = (byte) ((totalAudioLen >> 16) & 0xff);
finalBuffer[43] = (byte) ((totalAudioLen >> 24) & 0xff);
for (int i = 0; i < totalReadBytes; ++i)
finalBuffer[44 + i] = totalByteBuffer[i];
FileOutputStream out;
try {
out = new FileOutputStream(fn);
try {
out.write(finalBuffer);
out.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} catch (FileNotFoundException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
// */
tempIndex++;
break;
}
// -> Recording sound here.
Log.i("TAG", "Recording Sound.");
for (int i = 0; i < numberOfReadBytes; i++)
totalByteBuffer[totalReadBytes + i] = audioBuffer[i];
totalReadBytes += numberOfReadBytes;
// */
tempIndex++;
}
}
有关更多详细信息,您还可以查看此演示…
http://musicg.googlecode.com/files/musicg_android_demo.zip
Windows10上的Python 2.7。
我目前正在创建一个HTML5音乐编辑程序。我从录制音频开始。我想出了如何访问microphon等等。“recorder.js”中的代码帮了我很大的忙。 但我想创建一个临时AudioBuffer,而不是写入.wave文件。我从“OnAudioProcess”事件中的inputtbuffer中获取了Float32Array并将它们一起保存在一个Float32Array中。现在我有一个数组的值,让我们假
我正在为android开发一个音乐钢琴应用程序,在这个应用程序中我想记录用户通过点击钢琴按钮播放的声音。我在用soundpool弹钢琴。现在,对于录音,android给了我们两个API:MediaRecorder和AudioreCorder。但是对于这两者,我们都必须设置mediareCorder.audioSource。我不想从麦克风录制声音,因为用户声音可以包括在内,更重要的是录音质量下降。然
问题内容: 我正在Android上创建游戏,并且已经将这个问题搁置了一段时间,现在又回到了问题上。在我的游戏中,我有背景音乐,枪声,爆炸声等,而且我需要能够同时玩它们。现在,当我在SoundPool类上调用play时,当前正在播放的声音被打断,新的声音开始播放。下面是我的SoundManager类及其用法。任何帮助将不胜感激,因为这确实是我需要拥有如此多音效的第一款游戏。谢谢! …这是我如何使用该
我正在开发webRTC,我正在本地网络上的两个Android设备之间进行实时流,它对我来说工作得很好,除了音质问题,声音中有噪音和回声。如果我在一端使用免提,它会变得更好,但我不想使用免提。 那么我该如何提高音质,有什么技术可以提高音质。它还表示,webRTC内置了回声消除功能,如果这是回声仍然存在的原因。
问题内容: 我创建了一个pong克隆,当发生碰撞时,我想添加一些声音效果。我的问题是,考虑到整个应用程序只有90行代码,我发现的每个有关合成声音的示例都需要约30行代码。我正在寻找一种更简单的方法。有没有简单的方法来创建不同音调的提示音?持续时间无所谓。我只想要一系列不同音调的蜂鸣声。 问题答案: 这是从Java Sound 提取(并简化)的一个小示例-示例:生成音频的代码