我正在通过OpenGL
、MediaCodeC
和MediaMuxer
进行实时视频处理。
我已经在下面添加了核心代码。
public class VideoSavingController
{
// Static Variables
private static final String MIME_TYPE = "video/avc";
private static final int FRAME_RATE = 15;
private static final int IFRAME_INTERVAL = 1;
private static final int TIMEOUT_USEC = 10000;
private static final int BIT_RATE = 16 * 1000 * 1000;
// Member Variables
private boolean mIsRecordingStarted = false;
private boolean mIsStartRequsted = false;
private boolean mIsStopRequested = false;
private MediaCodec mEncoder;
private MediaCodec.BufferInfo mBufferInfo;
private MediaMuxer mMuxer;
private int mTrackIndex;
private boolean mMuxerStarted = false;
private VideoSavingSurface mInputSurface;
private String mOutputPath;
private long mStartTime;
// Constructor
public VideoSavingController(){}
// Controls
public void requestStartRecording()
{
mIsStartRequsted = true;
}
public void updateStartRecording()
{
if (mIsStartRequsted)
{
startRecording();
mIsStartRequsted = false;
mStartTime = SnapDat.camera().mCamera.timestamp();
}
}
private void startRecording()
{
if (mIsRecordingStarted)
return;
mIsRecordingStarted = true;
prepareEncoder();
}
public void recordFrameStep1()
{
if (!mIsRecordingStarted)
return;
mInputSurface.makeCurrent();
drainEncoder(false);
}
/**
* Draw the Image Between These Steps
* Share texture between contexts by passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context argument
* */
public void recordFrameStep2()
{
if (!mIsRecordingStarted)
return;
// Set the presentation time stamp from the SurfaceTexture's time stamp. This
// will be used by MediaMuxer to set the PTS in the video.
long time = SnapDat.camera().mCamera.timestamp() - mStartTime;
mInputSurface.setPresentationTime( time );
// Submit it to the encoder. The eglSwapBuffers call will block if the input
// is full, which would be bad if it stayed full until we dequeued an output
// buffer (which we can't do, since we're stuck here). So long as we fully drain
// the encoder before supplying additional input, the system guarantees that we
// can supply another frame without blocking.
mInputSurface.swapBuffers();
}
public void requestStopRecording()
{
mIsStopRequested = true;
}
public void updateStopRecording()
{
if (mIsStopRequested)
{
mIsStopRequested = false;
stopRecording();
}
}
private void stopRecording()
{
if (!mIsRecordingStarted)
return;
mIsRecordingStarted = false;
drainEncoder(true);
releaseEncoder();
// Notify Video File Added
File videoFile = new File(mOutputPath);
UtilityVideo.addVideo(videoFile, SnapDat.currentActivity());
}
public boolean isRecording()
{
return mIsRecordingStarted;
}
// Encoder
private void prepareEncoder()
{
// Determine Size
Size previewSize = xxxx
int maxSize = Math.max(previewSize.width, previewSize.height);
int width = (640 * previewSize.width ) / maxSize;
int height = (640 * previewSize.height) / maxSize;
if ( !xxxx.isLandscape() )
{
int oldWidth = width;
width = height;
height = oldWidth;
}
// Force Factor of 16 Size
width = (width / 16) * 16;
height = (height / 16) * 16;
mBufferInfo = new MediaCodec.BufferInfo();
MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, width, height);
format.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
format.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
mEncoder = MediaCodec.createEncoderByType(MIME_TYPE);
mEncoder.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mInputSurface = new VideoSavingSurface( mEncoder.createInputSurface() );
mEncoder.start();
// Output filename
mOutputPath = VideoSaver.getVideoPath();
// Create a MediaMuxer. We can't add the video track and start() the muxer here,
// because our MediaFormat doesn't have the Magic Goodies. These can only be
// obtained from the encoder after it has started processing data.
//
// We're not actually interested in multiplexing audio. We just want to convert
// the raw H.264 elementary stream we get from MediaCodec into a .mp4 file.
try
{
mMuxer = new MediaMuxer(mOutputPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
}
catch (IOException ioe)
{
throw new RuntimeException("MediaMuxer creation failed", ioe);
}
mTrackIndex = -1;
mMuxerStarted = false;
}
private void releaseEncoder()
{
if (mEncoder != null)
{
mEncoder.stop();
mEncoder.release();
mEncoder = null;
}
if (mInputSurface != null)
{
mInputSurface.release();
mInputSurface = null;
}
if (mMuxer != null)
{
mMuxer.stop();
mMuxer.release();
mMuxer = null;
}
}
private void drainEncoder(boolean endOfStream)
{
if (endOfStream)
mEncoder.signalEndOfInputStream();
ByteBuffer[] encoderOutputBuffers = mEncoder.getOutputBuffers();
while (true)
{
int encoderStatus = mEncoder.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER)
{
break;
}
else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED)
{
// not expected for an encoder
encoderOutputBuffers = mEncoder.getOutputBuffers();
}
else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED)
{
// should happen before receiving buffers, and should only happen once
if (mMuxerStarted)
throw new RuntimeException("format changed twice");
MediaFormat newFormat = mEncoder.getOutputFormat();
// now that we have the Magic Goodies, start the muxer
mTrackIndex = mMuxer.addTrack(newFormat);
mMuxer.start();
mMuxerStarted = true;
}
else if (encoderStatus < 0)
{
// Unexpected status, ignore it
}
else
{
ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null)
throw new RuntimeException("encoderOutputBuffer " + encoderStatus + " was null");
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0)
mBufferInfo.size = 0;
if (mBufferInfo.size != 0)
{
if (!mMuxerStarted)
throw new RuntimeException("muxer hasn't started");
// adjust the ByteBuffer values to match BufferInfo (not needed?)
encodedData.position(mBufferInfo.offset);
encodedData.limit(mBufferInfo.offset + mBufferInfo.size);
mMuxer.writeSampleData(mTrackIndex, encodedData, mBufferInfo);
}
mEncoder.releaseOutputBuffer(encoderStatus, false);
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0)
break; // out of while
}
}
}
}
驱动此操作的代码如下所示:
OpenGLState oldState = OpenGLState.createCurrent();
mSaveVideo.updateStartRecording();
if (mSaveVideo.isRecording())
{
mSaveVideo.recordFrameStep1();
// Draws Image here
mSaveVideo.recordFrameStep2();
}
mSaveVideo.updateStopRecording();
oldState.makeCurrent();
当使用共享上下文时,这似乎是驱动程序中的一个bug。
这篇文章有详细内容。简而言之,其中一个上下文没有注意到纹理内容发生了变化,因此它一直呈现先前的数据。您可以通过绑定到纹理0,然后返回到实际的纹理ID来解决这个问题。
除了前一节介绍的权重衰减以外,深度学习模型常常使用丢弃法(dropout)[1] 来应对过拟合问题。丢弃法有一些不同的变体。本节中提到的丢弃法特指倒置丢弃法(inverted dropout)。 方法 回忆一下,“多层感知机”一节的图3.3描述了一个单隐藏层的多层感知机。其中输入个数为4,隐藏单元个数为5,且隐藏单元$h_i$($i=1, \ldots, 5$)的计算表达式为 $$h_i = \p
我将事件发送到AWS Kinesis,这些事件由AWS lambda函数处理。但是,如果lambda抛出一些错误,则不会丢弃记录,并且会一次又一次地进行处理,从而阻止处理新记录。 我宁愿跳过错误的记录,消化新的记录。我不知道该怎么做。 lambda函数捕获任何异常,因此不应给出任何执行错误。 下面是python中的片段。 我知道lambda应该在“保留”期间重试(默认为24小时),但我希望放弃并最
似乎都没用。
拒绝/丢弃到反应堆TCPServer的传入连接的正确方法是什么? 我目前有以下资料: 它似乎起作用了,并且成功地从我的列表中的远程地址丢弃连接。但每次它都将堆栈跟踪打印到控制台,而且通常情况下看起来不太好。 拒绝与tcpserver的某些连接的正确方法是什么?
我无法使用 SSL 配置投递向导。 创建密钥/证书如下: 我已经将生成的keystore.jks与我的配置文件(yml)放在一起…我的配置文件具有超文本传输协议条目,如下所示: 已启动服务器..面临异常: 有人可以帮我吗?
这个问题只是为了让我理解这个概念。空值怎么能转换成(可丢弃的)?null不是一个类,对吗?这个问题可能很愚蠢。 我知道: