下面是一个最简单的case,使用的是dumb buffer显示
1.为什么最终是用fb_id::因为fb_id是对应drm_framebuffer管理,也就是drm机制只认fb_id
目前DRM主要推荐使用的是 Atomic(原子的) 接口;Atomic操作必须依赖的基本元素,Property(属性)。
所谓Property,其实就是把legacy接口传入的参数单独抽出来,做成一个个独立的全局属性。通过设置这些属性参数,即可完成对显示参数的设置。
Property的结构简单概括主要由3部分组成:name、id 和 value。
其中id为该property在DRM框架中全局唯一的标识符。
操作property,通过name 来获取property,通过id 来操作property,通过value 来修改property的值
Property的类型分为如下几种:
• enum
• bitmask
• range
• signed range
• object
• blob
From <https://blog.csdn.net/hexiaolong2009/article/details/87211230>
drm_property的定义:
https://elixir.bootlin.com/linux/latest/source/include/drm/drm_property.h#L73
对应的详细说明:
https://www.kernel.org/doc/html/latest/gpu/drm-kms.html#c.drm_property
133 typedef struct _drmModeProperty {
134 uint32_t prop_id;
135 uint32_t flags;
136 char name[DRM_PROP_NAME_LEN];
137 int count_values;
138 uint64_t *values; /* store the blob lengths */
139 int count_enums;
140 struct drm_mode_property_enum *enums;
141 int count_blobs;
142 uint32_t *blob_ids; /* store the blob IDs */
143 } drmModePropertyRes, *drmModePropertyPtr;
2.为什么handle可以获得fb_id::是因为handle可以找到对应gem object,而drm为gem object创建drm_framebuffer,随后把这个framebuffer 的fb_id给你
3.需要讲清楚modeset_create_fb里面的过程
#include "xf86drm.h"
#include "xf86drmMode.h"
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <sys/mman.h>
struct buffer_object {
uint32_t width;
uint32_t height;
uint32_t pitch;
uint32_t handle;
uint32_t size;
uint8_t *vaddr;
uint32_t fb_id;
};
struct buffer_object buf;
static void dump_mode(drmModeModeInfo *mode)
{
printf(" %s %d %d %d %d %d %d %d %d %d %d",
mode->name,
mode->vrefresh,
mode->hdisplay,
mode->hsync_start,
mode->hsync_end,
mode->htotal,
mode->vdisplay,
mode->vsync_start,
mode->vsync_end,
mode->vtotal,
mode->clock);
printf("\n");
}
static int modeset_create_fb(int fd, struct buffer_object *bo)
{
struct drm_mode_create_dumb create = {};
struct drm_mode_map_dumb map = {};
create.width = bo->width;
create.height = bo->height;
create.bpp = 32;
drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create);
bo->pitch = create.pitch;
bo->size = create.size;
bo->handle = create.handle;
drmModeAddFB(fd, bo->width, bo->height, 24, 32, bo->pitch,
bo->handle, &bo->fb_id);
map.handle = create.handle;
drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map);
bo->vaddr = mmap(0, create.size, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, map.offset);
memset(bo->vaddr, 0xff, bo->size/2);
memset(bo->vaddr + bo->size/2, 0x88, bo->size/2);
return 0;
}
static uint32_t get_property_id(int fd, drmModeObjectProperties *props,
const char *name)
{
drmModePropertyPtr property;
uint32_t i, id = 0;
for (i = 0; i < props->count_props; i++) {
property = drmModeGetProperty(fd, props->props[i]);
if (!strcmp(property->name, name))
id = property->prop_id;
drmModeFreeProperty(property);
if (id)
break;
}
return id;
}
int main(int argc, char **argv)
{
int drm_fd;
drmModeRes *res;
drmModePlaneRes *plane_res;
drmModeConnector *conn;
drmModeObjectProperties *props;
int crtc_id;
int conn_id;
int plane_id;
int property_temp;
uint32_t blob_id;
drmModeAtomicReqPtr req = NULL;
drm_fd = open("/dev/dri/card0", O_RDWR | O_CLOEXEC);
if (drm_fd < 0) {
printf("open card error\n");
return 0;
}
res = drmModeGetResources(drm_fd);
if (!res) {
printf("getresource error\n");
return 0;
}
crtc_id = res->crtcs[0];
conn_id = res->connectors[1];
drmSetClientCap(drm_fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
plane_res = drmModeGetPlaneResources(drm_fd);
plane_id = plane_res->planes[1];
printf("plane%d, crtc%d, conn%d\n", plane_id, crtc_id, conn_id);
conn = drmModeGetConnector(drm_fd, conn_id);
if (!conn) {
printf("get conn error\n");
return 0;
}
if (conn->count_modes) {
printf("print mode counts %d\n", conn->count_modes);
for (int i = 0; i < conn->count_modes; i++) {
dump_mode(&conn->modes[i]);
buf.width = conn->modes[i].hdisplay;
buf.height = conn->modes[i].vdisplay;
}
}
modeset_create_fb(drm_fd, &buf);
drmSetClientCap(drm_fd, DRM_CLIENT_CAP_ATOMIC, 1);
req = drmModeAtomicAlloc();
props = drmModeObjectGetProperties(drm_fd, conn_id, DRM_MODE_OBJECT_CONNECTOR);
property_temp = get_property_id(drm_fd, props, "CRTC_ID");
drmModeAtomicAddProperty(req, conn_id, property_temp, crtc_id);
drmModeFreeObjectProperties(props);
drmModeCreatePropertyBlob(drm_fd, &conn->modes[0], sizeof(conn->modes[0]), &blob_id);
props = drmModeObjectGetProperties(drm_fd, crtc_id, DRM_MODE_OBJECT_CRTC);
property_temp = get_property_id(drm_fd, props, "ACTIVE");
drmModeAtomicAddProperty(req, crtc_id, property_temp, 1);
property_temp = get_property_id(drm_fd, props, "MODE_ID");
drmModeAtomicAddProperty(req, crtc_id, property_temp, blob_id);
drmModeFreeObjectProperties(props);
props = drmModeObjectGetProperties(drm_fd, plane_id, DRM_MODE_OBJECT_PLANE);
property_temp = get_property_id(drm_fd, props, "CRTC_ID");
drmModeAtomicAddProperty(req, plane_id, property_temp, crtc_id);
property_temp = get_property_id(drm_fd, props, "FB_ID");
drmModeAtomicAddProperty(req, plane_id, property_temp, buf.fb_id);
property_temp = get_property_id(drm_fd, props, "CRTC_X");
drmModeAtomicAddProperty(req, plane_id, property_temp, 0);
property_temp = get_property_id(drm_fd, props, "CRTC_Y");
drmModeAtomicAddProperty(req, plane_id, property_temp, 0);
property_temp = get_property_id(drm_fd, props, "CRTC_W");
drmModeAtomicAddProperty(req, plane_id, property_temp, 1920);
property_temp = get_property_id(drm_fd, props, "CRTC_H");
drmModeAtomicAddProperty(req, plane_id, property_temp, 1080);
property_temp = get_property_id(drm_fd, props, "SRC_X");
drmModeAtomicAddProperty(req, plane_id, property_temp, 0);
property_temp = get_property_id(drm_fd, props, "SRC_Y");
drmModeAtomicAddProperty(req, plane_id, property_temp, 0);
property_temp = get_property_id(drm_fd, props, "SRC_W");
drmModeAtomicAddProperty(req, plane_id, property_temp, 1920<<16);
property_temp = get_property_id(drm_fd, props, "SRC_H");
drmModeAtomicAddProperty(req, plane_id, property_temp, 1080<<16);
drmModeFreeObjectProperties(props);
drmModeAtomicCommit(drm_fd, req, DRM_MODE_ATOMIC_NONBLOCK | DRM_MODE_ATOMIC_ALLOW_MODESET , NULL);
getchar();
drmModeFreeConnector(conn);
drmModeFreePlaneResources(plane_res);
drmModeFreeResources(res);
close(drm_fd);
return 0;
}
1.
drm_public drmModeResPtr drmModeGetResources(int fd)
drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res) //调用ioctl,填充返回的drmModeResPtr
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0), //kernel实现
drm_mode_getresources //Construct a set of configuration description structures and return them to the user, including CRTC, connector and framebuffer configuration.
//本质上是返回一组framebuffer/crtc/encoder/connector id
返回类型:
typedef struct _drmModeRes {
int count_fbs;
uint32_t *fbs;
int count_crtcs;
uint32_t *crtcs;
int count_connectors;
uint32_t *connectors;
int count_encoders;
uint32_t *encoders;
uint32_t min_width, max_width;
uint32_t min_height, max_height;
} drmModeRes, *drmModeResPtr;
2.
drm_public drmModePlaneResPtr drmModeGetPlaneResources(int fd)
drmIoctl(fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &res)//调用ioctl,填充返回的drmModePlaneResPtr
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),//kernel
drm_mode_getplane_res//从返回参数也能看出来,和上面一致,那么之前为什么要设置drmSetClientCap(drm_fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
drm_mode_getplane_res里面也有写到Unless userspace set the 'universal planes' capability bit, only advertise overlays.
返回类型:
typedef struct _drmModePlaneRes {
uint32_t count_planes;
uint32_t *planes;
} drmModePlaneRes, *drmModePlaneResPtr;
3.
drm_public drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
_drmModeGetConnector(fd, connector_id, 1); //最后一个0或者1的区别应该是0代表整个pipeline还不通,而1代表整个pipeline是通的,也就是可能处于显示的状态;区别在于内核部分
drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn)
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, 0),
drm_mode_getconnector//重要的是drmModeModeInfoPtr modes;这个对应了屏幕的分辨率等信息
if (out_resp->count_modes == 0) {//这个地方就是上面0和1的区别,如果上面是0,这个时候count_modes就是1,那么他就不会给connector调用fill_mode函数
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
dev->mode_config.max_height);}
返回类型:
typedef struct _drmModeConnector {
uint32_t connector_id;
uint32_t encoder_id; /**< Encoder currently connected to */
uint32_t connector_type;
uint32_t connector_type_id;
drmModeConnection connection;
uint32_t mmWidth, mmHeight; /**< HxW in millimeters */
drmModeSubPixel subpixel;
int count_modes;
drmModeModeInfoPtr modes;
int count_props;
uint32_t *props; /**< List of property ids */
uint64_t *prop_values; /**< List of property values */
int count_encoders;
uint32_t *encoders; /**< List of encoder ids */
} drmModeConnector, *drmModeConnectorPtr;
4.
drm_public drmModeAtomicReqPtr drmModeAtomicAlloc(void)//没有内核调用,仅仅初始化返回参数
返回类型:
struct _drmModeAtomicReq {
uint32_t cursor;
uint32_t size_items;
drmModeAtomicReqItemPtr items;};
5.
drm_public drmModeObjectPropertiesPtr drmModeObjectGetProperties(int fd, uint32_t object_id, uint32_t object_type)
struct drm_mode_obj_get_properties properties;
properties.obj_id = object_id;
properties.obj_type = object_type;
drmIoctl(fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, &properties)
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
drm_mode_obj_get_properties_ioctl //通过obj_id和obj_type获得props_ptr/prop_value_ptr以及count_props
obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);//通过obj的id和type获得对应的obj
ret = drm_mode_object_get_properties(obj, file_priv->atomic, (uint32_t __user *)(unsigned long)(arg->props_ptr), (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), &arg->count_props);//获取对应obj的props_ptr和prop_value_ptr以及count
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
返回类型:
typedef struct _drmModeObjectProperties {
uint32_t count_props;
uint32_t *props;
uint64_t *prop_values;
} drmModeObjectProperties, *drmModeObjectPropertiesPtr;
6.
drm_public drmModePropertyPtr drmModeGetProperty(int fd, uint32_t property_id)
drmIoctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop)
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, 0),
drm_mode_getproperty_ioctl //根据property_id返回对应的property的结构体属性;也就是填充drmModePropertyPtr
property = drm_property_find(dev, file_priv, out_resp->prop_id);
返回类型:
typedef struct _drmModeProperty {
uint32_t prop_id;
uint32_t flags;
char name[DRM_PROP_NAME_LEN];
int count_values;
uint64_t *values; /* store the blob lengths */
int count_enums;
struct drm_mode_property_enum *enums;
int count_blobs;
uint32_t *blob_ids; /* store the blob IDs */
} drmModePropertyRes, *drmModePropertyPtr;
7.
drm_public int drmModeAtomicAddProperty(drmModeAtomicReqPtr req, uint32_t object_id, uint32_t property_id, uint64_t value)
因为atomic的特性,这些属性的设置,是不会调用到内核的
req->items[req->cursor].object_id = object_id;
req->items[req->cursor].property_id = property_id;
req->items[req->cursor].value = value;
req->cursor++;
可以看到是以cursor为计数,设置object_id/property_id/value三者
8.
drm_public int drmModeAtomicCommit(int fd, drmModeAtomicReqPtr req, uint32_t flags, void *user_data)
这里面会先对之前我们设置的所有属性排序,我理解这个排序可能是要先设置buffer,然后plane,然后crtc?
ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ATOMIC, &atomic);
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER),
state = drm_atomic_state_alloc(dev); //This allocates an empty atomic state to track updates.
...
obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); //find obj
prop = drm_mode_obj_find_prop_id(obj, prop_id); //find prop
ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value); //实际设置propperty的地方,比较复杂
ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, &num_fences); //fence
ret = drm_atomic_check_only(state);/ret = drm_atomic_nonblocking_commit(state);/ret = drm_atomic_commit(state);
complete_signaling(dev, state, fence_state, num_fences, !ret); //fence
drm_atomic_state_put(state);
整体来说:
所以看下来kms的设置逻辑并不复杂,追寻一套固定的逻辑
https://www.kernel.org/doc/html/latest/gpu/drm-kms.html#c.drm_mode_object
所有KMS对象的基本结构是drm_mode_object结构。它提供的基本服务之一是跟踪property,这对于atomic的ioctl特别重要。这里有点令人惊讶的部分是,property不是直接在每个object上实例化的,而是独立的mode-object本身,由结构drm_property表示,它只指定property的类型和值范围。任何给定的属性都可以使用drm_object_attach_property()多次附加到不同的对象上
static int modeset_create_fb(int fd, struct buffer_object *bo)
{
struct drm_mode_create_dumb create = {};
struct drm_mode_map_dumb map = {};
create.width = bo->width;
create.height = bo->height;
create.bpp = 32;
drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create); //1
bo->pitch = create.pitch;
bo->size = create.size;
bo->handle = create.handle; //2
drmModeAddFB(fd, bo->width, bo->height, 24, 32, bo->pitch, //3
bo->handle, &bo->fb_id);
map.handle = create.handle; //4
drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map); //5
bo->vaddr = mmap(0, create.size, PROT_READ | PROT_WRITE, //6
MAP_SHARED, fd, map.offset);
memset(bo->vaddr, 0xff, bo->size/2);
memset(bo->vaddr + bo->size/2, 0x88, bo->size/2);
return 0;
}
GEM只有一个用于共享和销毁对象的公共用户空间接口。 https://elixir.bootlin.com/linux/v5.10.13/source/include/drm/drm_gem.h#L184
GEM的核心思想是用32位id识别图形缓冲区对象。原因是“X耗尽了打开的fds”(KDE很容易达到几千)。GEM背后的核心设计原则是,内核完全控制这些缓冲区对象的分配,并可以以任何它认为合适的方式自由移动。这是为了让多个进程的并发渲染成为可能,而用户空间仍然可以假设它是gpu独有的——GEM的意思是“图形执行管理器”。https://blog.ffwll.ch/2011/05/gem-overview.html;https://blog.ffwll.ch/2013/01/i915gem-crashcourse-overview.html
struct drm_gem_object 里面关联的是struct dma_buf *dma_buf;
关于idr机制https://lwn.net/Articles/103209/
KMS API没有标准化后备存储对象的创建,而是将其留给特定于驱动程序的ioctl。此外,实际上,即使是为基
于GEM的驱动程序创建缓冲区对象也是通过特定于驱动程序的ioctl完成的——GEM只有一个用于共享和销毁对象
的公共用户空间接口。虽然对于包括特定于设备的用户空间组件(例如libdrm)的成熟图形栈来说不是问题,但
这一限制使基于drm的早期引导图形变得不必要地复杂。
drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create); //1,返回的是create对象,会填充create的handle,pitch,size
drm_mode_create_dumb_ioctl
return drm_mode_create_dumb(dev, data, file_priv);
return dev->driver->dumb_create(file_priv, dev, args);
msm_gem_dumb_create
return msm_gem_new_handle(dev, file, args->size, MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
obj = msm_gem_new(dev, size, flags); //创建 drm_gem_object.这个结构定义了GEM缓冲区对象的通用部分,主要是处理mmap和用户空间句柄。可以看到它里面关联的dma_buf参看kernel定义:https://elixir.bootlin.com/linux/v5.10.13/source/include/drm/drm_gem.h#L184
ret = drm_gem_handle_create(file, obj, handle); //create a gem handle for an object;一个gem_obj对应一个handle,这个handle就是drm_ioctl_mode_addfb里面用到的handle
drm_gem_handle_create_tail
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); //返回一个handle
...
handle = ret; //可以看到这个handle是通过idr_alloc生成的
drm_gem_object_unreference_unlocked(obj);
除了偏移管理之外,vma偏移管理器还处理访问管理。对于每个允许访问给定节点的打开文件上下文,必须调用
drm_vma_node_allow()。否则,对这个打开文件的mmap()调用与该节点的偏移量相同的函数将会在使用-
eaccess时失败。若要再次撤销访问权限,请使用drm_vma_node_revoke()。然而,如果需要的话,调用者负
责销毁已经存在的映射。https://www.kernel.org/doc/html/v4.10/gpu/drm-mm.html
drmModeAddFB(fd, bo->width, bo->height, 24, 32, bo->pitch, bo->handle, &bo->fb_id); //3,Add a new FB to the specified CRTC, given a user request. This is the original addfb ioctl which only supported RGB formats.
ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB, &f)
drm_mode_addfb_ioctl
return drm_mode_addfb(dev, data, file_priv);
r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth);
/* convert to new format and call new ioctl */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
r.pitches[0] = or->pitch;
r.handles[0] = or->handle;
ret = drm_mode_addfb2(dev, &r, file_priv); //嗯,本质还是drm_mode_addfb2
fb = drm_internal_framebuffer_create(dev, r, file_priv);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
msm_framebuffer_create
struct drm_gem_object *bos[4] = {0};
bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]); //根据file找到对应的handle,实际上是这通过r.handles找到对应的gbm_object,也就是bos
fb = msm_framebuffer_init(dev, mode_cmd, bos);//设置drm_framebuffer 结构体,Allocates an ID for the framebuffer's parent mode object, sets its mode functions & device file and adds it to the master fd list.
msm_fb->planes[i] = bos[i]; //gemobject塞到这msm-fb->planes里面了
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); // fill out framebuffer metadata,一定搞清楚,这个meta-data就是这个buffer的属性,比方说它对应的dev,对应的format,对应的宽高,pitch,offset等
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); //把这个fb-object塞到drm管控的里面
drm_mode_object_register(dev, &fb->base);
or->fb_id = r.fb_id;
太长了,总体逻辑是,你之前创建的buffer,handle给我,我找到对应的gem object,然后为他创建drm_framebuffer,最后我把drm_framebuffer的fb_id给你
drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map); //5,可以看到处理了一大堆,本质就是得到offset,因为只有有这个offset,mmap才能准确的对应gem,对应准确的fb-id
drm_mode_mmap_dumb_ioctl
if (dev->driver->dumb_map_offset) return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
msm_gem_dumb_map_offset
obj = drm_gem_object_lookup(file, handle);//先通过handle找到对应的gem-object
*offset = msm_gem_mmap_offset(obj);
offset = mmap_offset(obj);
ret = drm_gem_create_mmap_offset(obj);//GEM内存映射的工作原理是将一个假的mmap偏移量返回给用户空间,以便在后续的mmap(2)调用中使用。然后,DRM核心代码根据偏移量查找对象,并设置各种内存映射结构。
return drm_gem_create_mmap_offset_size(obj, obj->size);
return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, size / PAGE_SIZE);
return drm_vma_node_offset_addr(&obj->vma_node);
else return drm_gem_dumb_map_offset(file_priv, dev, args->handle, &args->offset);
ret = drm_gem_create_mmap_offset(obj);
后面就是拿到offset,然后map,然后填充buffer
所以我理解,这个buffer真实存在,是在mmap的时候才存在的,之前虽然创建了gem/framebuffer等,实际是创建的node之类的offset,是给mmap提供支持,只有mmap以后,这块内容就真的有了,然后我们才去填充了buffer,那么umap这个buffer没有消失。是因为node节点的存在?umap的时候,是仅仅把对应的指针释放,因该不会立即释放内存,如果释放了,这个内容一定是存在与硬盘,这个硬盘地址会如何保存呢?那么kernel再次去读,又要花费大量的时间。---这块不是很清楚
static int create_gbm_buffer(int fd, struct buffer_object *buf, int width, int height, int secure)
{
uint32_t flags;
void *data = NULL;
int bo_fd = -1, meta_fd = -1;
size_t size;
int i = 0;
gbm_fmt = GBM_FORMAT_ABGR8888;
flags = GBM_BO_USE_SCANOUT | GBM_BO_USE_RENDERING;
gbm = gbm_create_device(fd); //1,Create a gbm device for allocating buffers
buf->gbmbo = gbm_bo_create(gbm, width, height, gbm_fmt, flags); //2
bo_fd = gbm_bo_get_fd(buf->gbmbo); //3
BUFFER_STRIDE = gbm_bo_get_stride(buf->gbmbo); //4
gbm_perform(GBM_PERFORM_GET_METADATA_ION_FD, buf->gbmbo, &meta_fd); //5
gbm_perform(GBM_PERFORM_GET_BO_SIZE, buf->gbmbo, &size); //6
buf->size = size;
buf->vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, bo_fd, 0); //7
/* paint the padding */
memset(buf->vaddr, 0xff, size);
uint32 *data = buf->vaddr;
for (i = 0; i < buf->size/12; i++)
*data++ = 0xffff0000;
for (i = 0; i < buf->size/12; i++)
*data++ = 0xff00ff00;
for (i = 0; i < buf->size/12; i++)
*data++ = 0xff0000ff;
buf->handle = gbm_bo_get_handle(buf->gbmbo).u32; //8
drmModeAddFB(fd, buf->width, buf->height, 32, 32, BUFFER_STRIDE, //9
buf->handle, &buf->fb_id);
}
https://github.com/randcd-APY/QuectelShare/blob/92fbb0f6117219f63b3a83887e16e4f8335ab493/display/libgbm/src/msmgbm.c
gbm = gbm_create_device(fd); //1
msmgbm_device_create
::https://github.com/randcd-APY/QuectelShare/blob/92fbb0f6117219f63b3a83887e16e4f8335ab493/display/libgbm/src/msmgbm.c
//open the ion device
msm_gbmdevice->iondev_fd = ion_open(); //打开/dev/ion节点;后续初始化相应的钩子函数
gbmdevice = &msm_gbmdevice->base;
gbmdevice->fd = fd;
gbmdevice->destroy = msmgbm_device_destroy;
gbmdevice->is_format_supported = msmgbm_device_is_format_supported;
gbmdevice->bo_create = msmgbm_bo_create;
gbmdevice->bo_import = msmgbm_bo_import;
gbmdevice->surface_create = msmgbm_surface_create;
msm_gbmdevice->fd = fd;
msm_gbmdevice->magic = QCMAGIC;
可以看到,填充结构体,实际创建ion
buf->gbmbo = gbm_bo_create(gbm, width, height, gbm_fmt, flags); //2
msmgbm_bo_create
msm_gbmbo = (struct msmgbm_bo *)calloc(1, sizeof(struct msmgbm_bo)); // /* Create gbm bo object */
/* First we will get ion_fd and gem handle for the frame buffer
* Size of the ION buffer is in accordance to returned from the adreno helpers
* Alignment of the buffer is fixed to Page size
* ION Memory is from, the System heap
* We get the gem handle from the ion fd using PRIME ioctls
*/
struct ion_allocation_data ionAllocData;
...
ionAllocData.heap_id_mask = GetIonHeapId(usage); //设置ion_heap
ionAllocData.flags = GetIonAllocFlags(usage); //设置flag
ionAllocData.len = size; //设置size
ioctl(msm_dev->iondev_fd, ION_IOC_ALLOC, &ionAllocData) //申请buffer
::https://github.com/ztc1997/android_kernel_xiaomi_sm6150/blob/e86156a252488d23bb454805364124da76042ce1/drivers/staging/android/ion/ion-ioctl.c#L86
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ion_alloc_fd //生成fd;填充ionAllocData.fd
struct dma_buf *dmabuf;
dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
buffer = ion_buffer_alloc(dev, len, heap_id_mask, flags);//按优先级顺序遍历该系统中可用的heap列表。如果客户端支持heap类型,并且与调用者的请求匹配,则从该heap类型分配。重复此操作,直到allocate成功或尝试了所有heap为止
buffer = ion_buffer_create(heap, dev, len, flags);// 填充的是ion_buffer
ret = heap->ops->allocate(heap, buffer, len, flags); //不同的heap对应的文件不同,我们看一下system-heap::https://github.com/knuxdroid/baffinlite-mainline/blob/7e263ddff4cab95e28d8a08c0d91aeadbfa8faca/drivers/staging/android/ion/ion_system_heap.c
ion_system_heap_allocate//注意不同的heap对应的allocate函数是不一样的
alloc_largest_available
page = alloc_buffer_page(heap, buffer, orders[i], &from_pool); //再往下就是真正分配page的地方,这也是为啥要设置对齐方式为页
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
...
dmabuf = dma_buf_export(&exp_info); //创建一个新的dma_buf,并将一个anon文件与这个缓冲区关联起来,以便可以导出它。还要将分配器特定的数据和操作连接到缓冲区。此外,为出口商提供一个名称字符串;有用的调试。
fd = dma_buf_fd(dmabuf, O_CLOEXEC); //returns a file descriptor for the given dma_buf;因为已经创建了dma-buf,并且anon文件和这个缓冲区关联了,那么就可以拿到文件的fd了
fd = get_unused_fd_flags(flags);
fd_install(fd, dmabuf->file);
data_fd = ionAllocData.fd; //拿到了fd;看上面的逻辑,就是ion去创建的buffer,要注意heap的类型等,然后创建dma-buf,用dma-buf,实现anon文件与这个缓冲区的关联,然后把这个fd传到用户层
base = mmap(NULL,size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); //直接用户空间mmap fd获得地址;这里只是尝试一下是否正常。不是真的要操作buffer
drm_args.fd = fd;
ioctl(msm_dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &drm_args) //最终会填充drm_args.handle
drm_prime_fd_to_handle_ioctl
return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);//通过fd,去获取handle
drm_gem_prime_fd_to_handle //PRIME import function for GEM drivers
dma_buf = dma_buf_get(prime_fd); //fd -> dma_buf
ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle);//看上去像二叉树查找,查找对应dma_buf是否已经有了handle,如果dma_buf已经有了handle,就把dmabuffer的handle赋值出去;然后直接返回
如果没有的话,会走:https://blog.csdn.net/u012839187/article/details/113617601
dma_buf_attach(dma_buf, attach_dev);
dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
drm_prime_add_buf_handle //给对应的drm_gem_object,对应的handle
//有了handle,那么fb_id还会远吗?直接通过DRM_XX_ADDFB就可以获得fb_id,那么绘画什么的因为有mmap的addr,也就可以实现了
/* To get ion_fd and gem handle for the metadata structure
* Alignment of the buffer is fixed to Page size
* ION Memory is from, the System heap
* We get the gem handle from the ion fd using PRIME ioctls
*/
ionAllocData.len = sizeof(struct meta_data_t);
ionAllocData.heap_id_mask= ION_HEAP(ION_SYSTEM_HEAP_ID); /* System Heap */
ionAllocData.flags |= ION_FLAG_CACHED;
mt_size = ionAllocData.len;
ioctl(msm_dev->iondev_fd, ION_IOC_ALLOC, &ionAllocData)//同样的操作创建meta_data.来保存data的一些属性,特性
mt_data_fd = ionAllocData.fd;
gbmbo = &msm_gbmbo->base;
gbmbo->ion_fd = data_fd;
gbmbo->ion_metadata_fd = mt_data_fd;
gbmbo->handle.u32 = gem_handle;
gbmbo->metadata_handle.u32 = mt_gem_handle;
gbmbo->fbid = 0; //$ drmModeAddFB2 ?
gbmbo->format = format;
gbmbo->width = width; //BO width
gbmbo->height = height; //BO height
gbmbo->stride = aligned_width*Bpp;
gbmbo->size = size; // Calculated by qry_size
gbmbo->usage_flags = usage;
gbmbo->aligned_width = aligned_width;
gbmbo->aligned_height = aligned_height;
gbmbo->bo_destroy = msmgbm_bo_destroy;
gbmbo->bo_get_fd = msmgbm_bo_get_fd;
gbmbo->bo_get_device = msmgbm_bo_get_device;
gbmbo->bo_write = msmgbm_bo_write;
msm_gbmbo->device = msm_dev;
msm_gbmbo->cpuaddr = base;
msm_gbmbo->mt_cpuaddr = mt_base;
msm_gbmbo->current_state = GBM_BO_STATE_FREE;
msm_gbmbo->size = size;
msm_gbmbo->mt_size = mt_size;
msm_gbmbo->magic = QCMAGIC;
#ifndef TARGET_ION_ABI_VERSION
msm_gbmbo->ion_handle = handle_data.handle;
msm_gbmbo->ion_mt_handle = mt_handle_data.handle;
#endif
bo_handles[0] = gbmbo->handle.u32;
pitches[0] = gbmbo->stride;
第一次ion弄出来的buffer是用来给drm_framebuffer的,第二次创建的大小就是meta_data_t,这个结构体如下:
struct meta_data_t {
uint32_t operation; /* specific operation or state*/
uint32_t interlaced; /* video buffer scan */
uint32_t s3d_format; /* 3D video format supported */
uint32_t linear_format; /* Producer output buffer is linear for UBWC Interlaced video */
uint32_t color_space; /* color space specfics */
uint32_t map_secure_buffer; /* Flag to represent SecureBuffer being used for GPU*/
int is_buffer_secure; /* Flag to query if buffer is secure*/
int is_buffer_ubwc; /* Flag to query if buffer is UBWC allocated */
int igc; /* IGC value*/
float refresh_rate; /* video referesh rate*/
ColorMetaData color_info; /* Color Aspects + HDR info */
uint64_t vt_timestamp; /* timestamp set by camera, intended for VT*/
uint32_t isVideoPerfMode; /* set by camera indicates buffer will be used for
High performace video use case */
};
可以猜到,大概率这个meta是用来管理我们之前申请的ion buffer的一些特性的
bo_fd = gbm_bo_get_fd(buf->gbmbo); //3
msmgbm_bo_get_fd
return bo->ion_fd;
BUFFER_STRIDE = gbm_bo_get_stride(buf->gbmbo); //4
估计和上面一样;没找到
buf->vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, bo_fd, 0); //7,可以根据3知道这个bo_fd就是ion_fd; 同样的也可以知道ion_fd实际上是dma-buf关联的anon file的fd,也就是真正的绘图地址的buffer
buf->handle = gbm_bo_get_handle(buf->gbmbo).u32; //8
return bo->handle;
drmModeAddFB(fd, buf->width, buf->height, 32, 32, BUFFER_STRIDE, //9
buf->handle, &buf->fb_id);
这个和dumb buffer没区别,一样是创建drm_framebuffer,然后返回fb_id
其实本质上libgbm就是对ion的一层封装而已
ion就不看了,因为gbm buffer内部实现就是ion
需要说明的是:
modetest with dumb buffer: https://blog.csdn.net/u012839187/article/details/105833584
libgbm以及新的memory的api: https://lwn.net/Articles/734849/
libgbm的优缺点: https://lwn.net/Articles/703749/
private with libgbm and secure feature: https://blog.csdn.net/u012839187/article/details/113602453
dma_buf: https://www.kernel.org/doc/html/v5.6/driver-api/dma-buf.html#c.dma_buf
The dma-buf subsystem provides the framework for sharing buffers for hardware (DMA) access across multiple device drivers and subsystems, and for synchronizing asynchronous hardware access.
This is used, for example, by drm “prime” multi-GPU support, but is of course not limited to GPU use cases.
The three main components of this are: (1) dma-buf, representing a sg_table and exposed to userspace as a file descriptor to allow passing between devices, (2) fence, which provides a mechanism to signal when one device as finished access, and (3) reservation, which manages the shared or exclusive fence(s) associated with the buffer.
Direct Memory Access (DMA)
This is the most complex of the data-transfer systems as both the host and the slave can access each others memory in any way they chose. As most !CS or !CprE people can quickly see, that only leads to more problems. So, DMA programming is the most complex of the three normal types of memory access. With DMA, an external chip/processor (let's be honest, a 3D processor counts as an external processor in its own right) can see anything in out memory space and we can see anything in its memory space -- barring limitations from things like the GART. With DMA, a typical interaction with an external chip/ processor is "here's an address at which you can find a complex data structure in my memory space in which you can find the commands and data to describe the operation which I wish you to perform. Have fun. Oh, tell me when you're done." DMA will normally allow the highest level of performance, but it does require some compromises that may sacrifice some latency for bandwidth.