将Androidaudio直播到服务器
我正在尝试将Android设备上的实时麦克风audiostream式传输到Java程序。 我开始发送两个Android设备之间的现场audio,以确认我的方法是正确的。 接收设备上几乎没有任何延迟,audio可以被完美地听到。 接下来,我将相同的audiostream发送到一个小型的Java程序,并validation数据是否正确发送。 现在我想要做的就是对这些数据进行编码,并以某种方式在运行Java程序的服务器上播放它。 我宁愿使用HTML5或JavaScript在networking浏览器中播放它,但我愿意使用VLC等替代方法。
这里是发送现场麦克风audio的Android应用程序的代码
public class MainActivity extends Activity { private Button startButton,stopButton; public byte[] buffer; public static DatagramSocket socket; AudioRecord recorder; private int sampleRate = 44100; private int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO; private int audioFormat = AudioFormat.ENCODING_PCM_16BIT; int minBufSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat); private boolean status = true; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); startButton = (Button) findViewById (R.id.start_button); stopButton = (Button) findViewById (R.id.stop_button); startButton.setOnClickListener(startListener); stopButton.setOnClickListener(stopListener); minBufSize += 2048; } @Override public boolean onCreateOptionsMenu(Menu menu) { getMenuInflater().inflate(R.menu.main, menu); return true; } private final OnClickListener stopListener = new OnClickListener() { @Override public void onClick(View arg0) { status = false; recorder.release(); Log.d("VS","Recorder released"); } }; private final OnClickListener startListener = new OnClickListener() { @Override public void onClick(View arg0) { status = true; startStreaming(); } }; public void startStreaming() { Thread streamThread = new Thread(new Runnable(){ @Override public void run() { try{ DatagramSocket socket = new DatagramSocket(); Log.d("VS", "Socket Created"); byte[] buffer = new byte[minBufSize]; Log.d("VS","Buffer created of size " + minBufSize); Log.d("VS", "Address retrieved"); recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,sampleRate,channelConfig,audioFormat,minBufSize); Log.d("VS", "Recorder initialized"); recorder.startRecording(); InetAddress IPAddress = InetAddress.getByName("192.168.1.5"); byte[] sendData = new byte[1024]; byte[] receiveData = new byte[1024]; while (status == true) { DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length, IPAddress, 50005); socket.send(sendPacket); } } catch(UnknownHostException e) { Log.e("VS", "UnknownHostException"); } catch (IOException e) { Log.e("VS", "IOException"); e.printStackTrace(); } } }); streamThread.start(); } }
这里是数据读取Java程序的代码。
class Server { public static void main(String args[]) throws Exception { DatagramSocket serverSocket = new DatagramSocket(50005); byte[] receiveData = new byte[1024]; byte[] sendData = new byte[1024]; while(true) { DatagramPacket receivePacket = new DatagramPacket(receiveData, receiveData.length); serverSocket.receive(receivePacket); String sentence = new String( receivePacket.getData().toString()); System.out.println("RECEIVED: " + sentence); } } }
我知道我应该在发送到Java程序之前对应用程序端的audio进行编码,但我不确定如何在使用AudioRecorder时进行编码。 我宁愿不使用NDK,因为我没有经验,也没有时间学习如何使用它….但:)
所以我解决了我的问题。 问题主要是在接收方面。 接收器接收audiostream并将其推送至PC的扬声器。 由此产生的声音仍然是相当滞后和破碎的,但它的function还不错。 使用缓冲区大小可能会改善这一点。
编辑:你使用一个线程来读取audio,以避免滞后。 另外,最好使用16 000的采样大小,因为对于语音来说是可以的。
Android代码:
package com.example.mictest2; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.UnknownHostException; import android.app.Activity; import android.media.AudioFormat; import android.media.AudioRecord; import android.media.MediaRecorder; import android.os.Bundle; import android.util.Log; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; public class Send extends Activity { private Button startButton,stopButton; public byte[] buffer; public static DatagramSocket socket; private int port=50005; AudioRecord recorder; private int sampleRate = 16000 ; // 44100 for music private int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO; private int audioFormat = AudioFormat.ENCODING_PCM_16BIT; int minBufSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat); private boolean status = true; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); startButton = (Button) findViewById (R.id.start_button); stopButton = (Button) findViewById (R.id.stop_button); startButton.setOnClickListener (startListener); stopButton.setOnClickListener (stopListener); } private final OnClickListener stopListener = new OnClickListener() { @Override public void onClick(View arg0) { status = false; recorder.release(); Log.d("VS","Recorder released"); } }; private final OnClickListener startListener = new OnClickListener() { @Override public void onClick(View arg0) { status = true; startStreaming(); } }; public void startStreaming() { Thread streamThread = new Thread(new Runnable() { @Override public void run() { try { DatagramSocket socket = new DatagramSocket(); Log.d("VS", "Socket Created"); byte[] buffer = new byte[minBufSize]; Log.d("VS","Buffer created of size " + minBufSize); DatagramPacket packet; final InetAddress destination = InetAddress.getByName("192.168.1.5"); Log.d("VS", "Address retrieved"); recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,sampleRate,channelConfig,audioFormat,minBufSize*10); Log.d("VS", "Recorder initialized"); recorder.startRecording(); while(status == true) { //reading data from MIC into buffer minBufSize = recorder.read(buffer, 0, buffer.length); //putting buffer in the packet packet = new DatagramPacket (buffer,buffer.length,destination,port); socket.send(packet); System.out.println("MinBufferSize: " +minBufSize); } } catch(UnknownHostException e) { Log.e("VS", "UnknownHostException"); } catch (IOException e) { e.printStackTrace(); Log.e("VS", "IOException"); } } }); streamThread.start(); } }
Android XML:
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent" android:layout_height="match_parent" android:paddingBottom="@dimen/activity_vertical_margin" android:paddingLeft="@dimen/activity_horizontal_margin" android:paddingRight="@dimen/activity_horizontal_margin" android:paddingTop="@dimen/activity_vertical_margin" tools:context=".MainActivity" > <TextView android:id="@+id/textView1" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="@string/hello_world" /> <Button android:id="@+id/start_button" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_below="@+id/textView1" android:layout_centerHorizontal="true" android:layout_marginTop="130dp" android:text="Start" /> <Button android:id="@+id/stop_button" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_alignLeft="@+id/button1" android:layout_below="@+id/button1" android:layout_marginTop="64dp" android:text="Stop" /> </RelativeLayout>
服务器代码:
package com.datagram; import java.io.ByteArrayInputStream; import java.net.DatagramPacket; import java.net.DatagramSocket; import javax.sound.sampled.AudioFormat; import javax.sound.sampled.AudioInputStream; import javax.sound.sampled.AudioSystem; import javax.sound.sampled.DataLine; import javax.sound.sampled.FloatControl; import javax.sound.sampled.SourceDataLine; class Server { AudioInputStream audioInputStream; static AudioInputStream ais; static AudioFormat format; static boolean status = true; static int port = 50005; static int sampleRate = 44100; public static void main(String args[]) throws Exception { DatagramSocket serverSocket = new DatagramSocket(50005); byte[] receiveData = new byte[1280]; // ( 1280 for 16 000Hz and 3584 for 44 100Hz (use AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat) to get the correct size) format = new AudioFormat(sampleRate, 16, 1, true, false); while (status == true) { DatagramPacket receivePacket = new DatagramPacket(receiveData, receiveData.length); serverSocket.receive(receivePacket); ByteArrayInputStream baiss = new ByteArrayInputStream( receivePacket.getData()); ais = new AudioInputStream(baiss, format, receivePacket.getLength()); // A thread solve the problem of chunky audio new Thread(new Runnable() { @Override public void run() { toSpeaker(receivePacket.getData(), sourceDataLine); } }).start(); } } public static void toSpeaker(byte soundbytes[]) { try { DataLine.Info dataLineInfo = new DataLine.Info(SourceDataLine.class, format); SourceDataLine sourceDataLine = (SourceDataLine) AudioSystem.getLine(dataLineInfo); sourceDataLine.open(format); FloatControl volumeControl = (FloatControl) sourceDataLine.getControl(FloatControl.Type.MASTER_GAIN); volumeControl.setValue(100.0f); sourceDataLine.start(); sourceDataLine.open(format); sourceDataLine.start(); System.out.println("format? :" + sourceDataLine.getFormat()); sourceDataLine.write(soundbytes, 0, soundbytes.length); System.out.println(soundbytes.toString()); sourceDataLine.drain(); sourceDataLine.close(); } catch (Exception e) { System.out.println("Not working in speakers..."); e.printStackTrace(); } } }
我希望这可以帮助节省几个小时的痛苦:)
我的2美分给你的代码,以提高效率。 不错的尝试
package com.datagram; import java.io.ByteArrayInputStream; import java.net.DatagramPacket; import java.net.DatagramSocket; import javax.sound.sampled.AudioFormat; import javax.sound.sampled.AudioInputStream; import javax.sound.sampled.AudioSystem; import javax.sound.sampled.DataLine; import javax.sound.sampled.FloatControl; import javax.sound.sampled.SourceDataLine; class Server { AudioInputStream audioInputStream; static AudioInputStream ais; static AudioFormat format; static boolean status = true; static int port = 50005; static int sampleRate = 44100; static DataLine.Info dataLineInfo; static SourceDataLine sourceDataLine; public static void main(String args[]) throws Exception { DatagramSocket serverSocket = new DatagramSocket(port); /** * Formula for lag = (byte_size/sample_rate)*2 * Byte size 9728 will produce ~ 0.45 seconds of lag. Voice slightly broken. * Byte size 1400 will produce ~ 0.06 seconds of lag. Voice extremely broken. * Byte size 4000 will produce ~ 0.18 seconds of lag. Voice slightly more broken then 9728. */ byte[] receiveData = new byte[4096]; format = new AudioFormat(sampleRate, 16, 1, true, false); dataLineInfo = new DataLine.Info(SourceDataLine.class, format); sourceDataLine = (SourceDataLine) AudioSystem.getLine(dataLineInfo); sourceDataLine.open(format); sourceDataLine.start(); FloatControl volumeControl = (FloatControl) sourceDataLine.getControl(FloatControl.Type.MASTER_GAIN); volumeControl.setValue(1.00f); DatagramPacket receivePacket = new DatagramPacket(receiveData, receiveData.length); ByteArrayInputStream baiss = new ByteArrayInputStream( receivePacket.getData()); while (status == true) { serverSocket.receive(receivePacket); ais = new AudioInputStream(baiss, format, receivePacket.getLength()); toSpeaker(receivePacket.getData()); } sourceDataLine.drain(); sourceDataLine.close(); } public static void toSpeaker(byte soundbytes[]) { try { sourceDataLine.write(soundbytes, 0, soundbytes.length); } catch (Exception e) { System.out.println("Not working in speakers..."); e.printStackTrace(); } } }
由于您的android代码中的以下行,声音被破坏:
minBufSize += 2048;
你只是添加空字节。 另外,使用CHANNEL_IN_MONO
而不是CHANNEL_CONFIGURATION_MONO