我正试图在Android上创建一个360视频球体(就像纸板上的那个)。我在OpenGL ES1中绘制了一个球体,并用一张照片完成了这项工作。0,然后将纹理附加到它。之后,我可以使用传感器值来旋转球体。
然而,我不知道如何将图片更改为视频。我尝试过使用texSubImage2D()逐帧渲染,但速度非常慢。我的视频可能会达到4k左右的密度,因为我需要一个好的质量,即使只显示了一小部分。
我读过一些关于如何做到这一点的理论材料(即:帧缓冲区,外部纹理,同步等),但我找不到这些东西的任何例子,所以一些代码会非常感激...
下面是我如何渲染球体,绘制它并将纹理附加到它(即我的球体类)...
import rapid.decoder.BitmapDecoder;
public class Sphere {
/** Buffer holding the vertices. */
private final List<FloatBuffer> mVertexBuffer = new ArrayList<FloatBuffer>();
/** The vertices for the sphere. */
private final List<float[]> mVertices = new ArrayList<float[]>();
/** Buffer holding the texture coordinates. */
private final List<FloatBuffer> mTextureBuffer = new ArrayList<FloatBuffer>();
/** Mapping texture coordinates for the vertices. */
private final List<float[]> mTexture = new ArrayList<float[]>();
/** The texture pointer. */
private final int[] mTextures = new int[1];
/** Total number of strips for the given depth. */
private final int mTotalNumStrips;
public Sphere(final int depth, final float radius) {
// Calculate basic values for the sphere.
this.mTotalNumStrips = Maths.power(2, depth - 1) * 5; //last 5 is related to properties of a icosahedron
final int numVerticesPerStrip = Maths.power(2, depth) * 3;
final double altitudeStepAngle = Maths.rad120 / Maths.power(2, depth);
final double azimuthStepAngle = Maths.rad360 / this.mTotalNumStrips;
double x, y, z, h, altitude, azimuth;
Log.e("mTotalNumStrips", ""+mTotalNumStrips);
Log.e("numVerticesPerStrip", ""+numVerticesPerStrip);
for (int stripNum = 0; stripNum < this.mTotalNumStrips; stripNum++) {
// Setup arrays to hold the points for this strip.
final float[] vertices = new float[numVerticesPerStrip * 3]; // x,y,z
final float[] texturePoints = new float[numVerticesPerStrip * 2]; // 2d texture
int vertexPos = 0;
int texturePos = 0;
// Calculate position of the first vertex in this strip.
altitude = Maths.rad90;
azimuth = stripNum * azimuthStepAngle;
// Draw the rest of this strip.
for (int vertexNum = 0; vertexNum < numVerticesPerStrip; vertexNum += 2) {
// First point - Vertex.
y = radius * Math.sin(altitude);
h = radius * Math.cos(altitude);
z = h * Math.sin(azimuth);
x = h * Math.cos(azimuth);
vertices[vertexPos++] = (float) x;
vertices[vertexPos++] = (float) y;
vertices[vertexPos++] = (float) z;
// First point - Texture.
texturePoints[texturePos++] = (float) (1 + azimuth / Maths.rad360);
texturePoints[texturePos++] = (float) (1 - (altitude + Maths.rad90) / Maths.rad180);
// Second point - Vertex.
altitude -= altitudeStepAngle;
azimuth -= azimuthStepAngle / 2.0;
y = radius * Math.sin(altitude);
h = radius * Math.cos(altitude);
z = h * Math.sin(azimuth);
x = h * Math.cos(azimuth);
vertices[vertexPos++] = (float) x;
vertices[vertexPos++] = (float) y;
vertices[vertexPos++] = (float) z;
// Second point - Texture.
texturePoints[texturePos++] = (float) (1 + azimuth / Maths.rad360);
texturePoints[texturePos++] = (float) (1 - (altitude + Maths.rad90) / Maths.rad180);
azimuth += azimuthStepAngle;
}
this.mVertices.add(vertices);
this.mTexture.add(texturePoints);
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(numVerticesPerStrip * 3 * Float.SIZE);
byteBuffer.order(ByteOrder.nativeOrder());
FloatBuffer fb = byteBuffer.asFloatBuffer();
fb.put(this.mVertices.get(stripNum));
fb.position(0);
this.mVertexBuffer.add(fb);
// Setup texture.
byteBuffer = ByteBuffer.allocateDirect(numVerticesPerStrip * 2 * Float.SIZE);
byteBuffer.order(ByteOrder.nativeOrder());
fb = byteBuffer.asFloatBuffer();
fb.put(this.mTexture.get(stripNum));
fb.position(0);
this.mTextureBuffer.add(fb);
}
}
public void loadGLTexture(final GL10 gl, final Context context, final int texture) {
Bitmap bitmap = BitmapDecoder.from(context.getResources(), texture)
.scale(4048, 2024)
.decode();
// Generate one texture pointer, and bind it to the texture array.
gl.glGenTextures(1, this.mTextures, 0);
gl.glBindTexture(GL10.GL_TEXTURE_2D, this.mTextures[0]);
// Create nearest filtered texture.
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST);
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR);
// Use Android GLUtils to specify a two-dimensional texture image from our bitmap.
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap, 0);
// Tide up.
bitmap.recycle();
}
/**
* The draw method for the square with the GL context.
*
* @param gl Graphics handle.
*/
public void draw(final GL10 gl) {
// bind the previously generated texture.
gl.glBindTexture(GL10.GL_TEXTURE_2D, this.mTextures[0]);
// Point to our buffers.
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
// Set the face rotation, clockwise in this case.
gl.glFrontFace(GL10.GL_CW);
// Point to our vertex buffer.
for (int i = 0; i < this.mTotalNumStrips; i++) {
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, this.mVertexBuffer.get(i));
gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, this.mTextureBuffer.get(i));
// Draw the vertices as triangle strip.
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, this.mVertices.get(i).length / 3);
}
// Disable the client state before leaving.
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
}
}
这是我的渲染器...
@Override
public void onDrawFrame(final GL10 gl) {
zvector = new float[] {0,0,1,0};
resultvector = new float[] {0,0,1,0};
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glLoadIdentity();
float radiansX = (float) Math.toRadians(gyro_angle[1]);
float radiansY = (float) Math.toRadians(-gyro_angle[0]);
float radiansZ = (float) Math.toRadians(-gyro_angle[2]);
// Finds the Sin and Cosin for the half angle.
float sinX =(float) Math.sin(radiansX * 0.5);
float cosX =(float) Math.cos(radiansX * 0.5);
float sinY =(float) Math.sin(radiansY * 0.5);
float cosY =(float) Math.cos(radiansY * 0.5);
float sinZ =(float) Math.sin(radiansZ * 0.5);
float cosZ =(float) Math.cos(radiansZ * 0.5);
// Formula to construct a new Quaternion based on direction and angle.
quatX[0] = cosX;
quatX[1] = 1 * sinX;
quatX[2] = 0 * sinX;
quatX[3] = 0 * sinX;
quatY[0] = cosY;
quatY[1] = 0 * sinY;
quatY[2] = 1 * sinY;
quatY[3] = 0 * sinY;
quatZ[0] = cosZ;
quatZ[1] = 0 * sinZ;
quatZ[2] = 0 * sinZ;
quatZ[3] = 1 * sinZ;
quat1 = multiplyQuat(quatX, quatY);
quat2 = multiplyQuat(quat1, quatZ);
mMatrix = getMatrixfromQuat(quat1);
gl.glLoadMatrixf(mMatrix, 0);
this.mSphere.draw(gl);
}
@Override
public void onSurfaceChanged(final GL10 gl, final int width, final int height) {
final float aspectRatio = (float) width / (float) (height == 0 ? 1 : height);
gl.glViewport(0, 0, width, height);
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
GLU.gluPerspective(gl, 45.0f, aspectRatio, 0.1f, 100.0f);
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
}
@Override
public void onSurfaceCreated(final GL10 gl, final EGLConfig config) {
this.mSphere.loadGLTexture(gl, this.mContext, R.drawable.pic360);
gl.glEnable(GL10.GL_TEXTURE_2D);
gl.glShadeModel(GL10.GL_SMOOTH);
gl.glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
gl.glClearDepthf(1.0f);
gl.glEnable(GL10.GL_DEPTH_TEST);
gl.glDepthFunc(GL10.GL_LEQUAL);
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);
}
//CONSTRUCTER
public GlRenderer(final Context context) {
this.mContext = context;
this.mSphere = new Sphere(5, 2);
sensorManager = (SensorManager) this.mContext.getSystemService(this.mContext.SENSOR_SERVICE);
sensorGyroscope = sensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE);
sensorAccelerometer = sensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER);
sensorMagneticField = sensorManager.getDefaultSensor(Sensor.TYPE_MAGNETIC_FIELD);
valuesAccelerometer = new float[3];
valuesMagneticField = new float[3];
matrixR = new float[9];
matrixI = new float[9];
matrixValues = new float[3];
sensorManager.registerListener(this, sensorGyroscope, SensorManager.SENSOR_DELAY_FASTEST);
sensorManager.registerListener(this, sensorAccelerometer, SensorManager.SENSOR_DELAY_FASTEST);
sensorManager.registerListener(this, sensorMagneticField, SensorManager.SENSOR_DELAY_FASTEST);
}
//HERE GOES SOME CURRENTLY IRRELEVANT STUFF ABOUT THE SENSORS AND QUATERNIONS
我有一些这种类型的视频纹理问题。我第一次使用ffmpeg进行视频解码,但是性能很差(就像你一样——逐帧提取)。为了提高性能,我使用了Android默认的媒体播放器。您可以使用表面纹理创建一个opengl表面(球体,圆柱体,立方体等),然后在媒体播放器中设置表面
Surface surface = new Surface(mSurface);//mSurface is your surface texture
mMediaPlayer.setSurface(surface);
mMediaPlayer.setScreenOnWhilePlaying(true);
这只是一种技巧。我这样做是为了一些商业项目,所以我不能分享代码。我希望我能很快在github上发布一个免费代码。
创建一个使用视频来作为贴图的纹理对象。 它和其基类Texture几乎是相同的,除了它总是将needsUpdate设置为true,以便使得贴图能够在视频播放时进行更新。自动创建mipmaps也会被禁用。 代码示例 // assuming you have created a HTML video element with id="video" const video = document.getEl
接口说明 上传视频文件 API地址 POST /api/marker/1.0.0/uploadVideo 是否需要登录 是 请求字段说明 参数 类型 请求类型 是否必须 说明 dataGuid string form 是 场景GUID file string form 是 视频文件 响应字段说明 参数 类型 说明 mp4UploadPath String 视频文件上传地址 响应成功示例 { "
用户通过视频上传、管理视频、获取代码,实现本地视频在制定网站播放。 2.1视频上传 进入视频页面,点击上传视频 按钮,在弹出的页面点击添加视频 : 1)选择视频“分类”,添加视频“标签”(选填); 2)点击【添加视频】或者【选择文件并上传】按钮选择本地一个或多个视频,点击确认即开始视频上传;或者在本地选择一个或多个视频,将视频拖拽到视频上传区,即可进行视频上传; 3)上传过程中点击视频上传或者取消
我刚刚接触Android,最近我在我的应用程序中添加了一个CardView来显示一些文本框和图像,效果很好。 然而,当我试图显示视频时,卡仍然是空的。有人能帮我理解我做错了什么吗? 我这样定义了图像视图: 最初,我尝试将视频设置为相关活动的onCreate,如下所示: 那不起作用,不知道为什么。所以从相关的中,我使用从URL获取视频。由于UI更改,在onProgressMethod中使用相同的代码
在OpenCV上使用class VideoCapture时如何旋转摄像头?(Android上的人脸检测示例)。我正在旋转画布: 但摄像机的图像并没有旋转:人脸检测不起作用。 摄像头从以下位置接收流: 我做了以下更新: 但这是行不通的。当我以portret方向运行程序时(在android设备上)——当我以横向方向运行程序时,程序不会启动——程序工作,但当我旋转设备时,程序工作,但显示屏上的图像不会旋
前因后果 问题的起因是和一个群友一起讨论,怎么让地球的背景贴上视频,而且地球展开成平面之后,可以拖动实体位置 由于之前几次示例都用了本地的图片(设置 Cesium.Viewer 的imageryProvider 属性)来作背景,这次我很自然的想到了如果使用本地mp4 作为url的值是否可行,结果实践证明这样不行,查看了一下SingleTileImageryProvider的url属性的官方文档,感