Replies: 1 comment 2 replies
-
@laycookie not sure if you are still interested in this, but I managed to get something similar working, that being importing a dmabuf to a wgpu texture for both vulkan and gles. I create buffers using the smithay For the vulkan backend: mod dmabuf {
use smithay::{
backend::allocator::{dmabuf::Dmabuf, format::get_bpp, Buffer},
reexports::{ash, gbm::Format},
};
use std::os::fd::IntoRawFd;
const MIP_LEVEL_COUNT: u32 = 1;
const SAMPLE_COUNT: (ash::vk::SampleCountFlags, u32) = (ash::vk::SampleCountFlags::TYPE_1, 1);
const TEXTURE_DIMENSION: (ash::vk::ImageType, wgpu::TextureDimension) =
(ash::vk::ImageType::TYPE_2D, wgpu::TextureDimension::D2);
const ARRAY_LAYERS: u32 = 1;
pub const TEXTURE_FORMAT: (ash::vk::Format, wgpu::TextureFormat, Format) = (
ash::vk::Format::R8G8B8A8_UNORM,
wgpu::TextureFormat::Rgba8Unorm,
Format::Abgr8888,
);
const USAGE: (
ash::vk::ImageUsageFlags,
wgpu::hal::TextureUses,
wgpu::TextureUsages,
) = (
ash::vk::ImageUsageFlags::COLOR_ATTACHMENT,
wgpu::hal::TextureUses::COLOR_TARGET,
wgpu::TextureUsages::RENDER_ATTACHMENT,
);
pub unsafe fn texture_from_dmabuf(device: &wgpu::Device, dmabuf: &Dmabuf) -> wgpu::Texture {
let (hal_texture, hal_descriptor) = device
.as_hal::<wgpu::hal::vulkan::Api, _, _>(|device| {
if let Some(device) = device {
Some(hal_texture_from_dmabuf(device, dmabuf))
} else {
None
}
})
.flatten()
.expect("Unable to create hal texture");
device.create_texture_from_hal::<wgpu::hal::vulkan::Api>(
hal_texture,
&wgpu::TextureDescriptor {
label: hal_descriptor.label,
dimension: hal_descriptor.dimension,
size: hal_descriptor.size,
format: hal_descriptor.format,
usage: USAGE.2,
mip_level_count: hal_descriptor.mip_level_count,
sample_count: hal_descriptor.sample_count,
view_formats: hal_descriptor.view_formats.as_slice(),
},
)
}
unsafe fn hal_texture_from_dmabuf(
device: &wgpu::hal::vulkan::Device,
dmabuf: &Dmabuf,
) -> (
wgpu::hal::vulkan::Texture,
wgpu::hal::TextureDescriptor<'static>,
) {
let vk_instance = device.shared_instance().raw_instance();
let vk_device = device.raw_device();
let vk_physical_device = device.raw_physical_device();
let mem_properties = vk_instance.get_physical_device_memory_properties(vk_physical_device);
let memory_type_index = mem_properties
.memory_types
.into_iter()
.enumerate()
.find(|(_, mem)| {
mem.property_flags
.contains(ash::vk::MemoryPropertyFlags::DEVICE_LOCAL)
})
.expect("Unable to find memory type index")
.0;
let dma_fd = dmabuf
.handles()
.last()
.expect("No dmabuf fd")
.try_clone_to_owned()
.expect("Cant clone dmabuf fd");
let mut import_mem_fd_info = ash::vk::ImportMemoryFdInfoKHR::default()
.handle_type(ash::vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT)
.fd(dma_fd.into_raw_fd());
let bytes_per_pixel = get_bpp(dmabuf.format().code).expect("Cant get bpp for dmabuf") / 8;
let size = dmabuf.width() * dmabuf.height() * bytes_per_pixel as u32;
let allocate_info = ash::vk::MemoryAllocateInfo::default()
.push_next(&mut import_mem_fd_info)
.allocation_size(size as u64)
.memory_type_index(memory_type_index as u32);
let memory = vk_device
.allocate_memory(&allocate_info, None)
.expect("Unable to import memory");
let image_info = ash::vk::ImageCreateInfo::default()
.flags(ash::vk::ImageCreateFlags::empty())
.image_type(TEXTURE_DIMENSION.0)
.format(TEXTURE_FORMAT.0)
.extent(ash::vk::Extent3D {
width: dmabuf.width(),
height: dmabuf.height(),
depth: 0,
})
.mip_levels(MIP_LEVEL_COUNT)
.array_layers(ARRAY_LAYERS)
.samples(SAMPLE_COUNT.0)
.tiling(ash::vk::ImageTiling::LINEAR)
.usage(USAGE.0)
.sharing_mode(ash::vk::SharingMode::EXCLUSIVE)
.initial_layout(ash::vk::ImageLayout::UNDEFINED);
let image = vk_device
.create_image(&image_info, None)
.expect("Cant create image");
vk_device
.bind_image_memory(
image,
memory,
dmabuf.offsets().last().expect("No offset") as u64,
)
.expect("Unable to bind memory to image");
let texture_descriptor = wgpu::hal::TextureDescriptor {
label: Some("Iced DMABUF imported texture"),
dimension: TEXTURE_DIMENSION.1,
size: wgpu::Extent3d {
width: dmabuf.width(),
height: dmabuf.height(),
depth_or_array_layers: ARRAY_LAYERS,
},
mip_level_count: MIP_LEVEL_COUNT,
sample_count: SAMPLE_COUNT.1,
format: TEXTURE_FORMAT.1,
usage: USAGE.1,
memory_flags: wgpu::hal::MemoryFlags::empty(),
view_formats: Vec::new(),
};
let hal_texture =
wgpu::hal::vulkan::Device::texture_from_raw(image, &texture_descriptor, None);
(hal_texture, texture_descriptor)
}
} And for gles (wgpu version for this one is not the most recent one, but it should still work. gles backend of the latest version didnt work for me which is why i did the vulkan thing in the first place) static TEXTURE_FORMAT: (wgpu::TextureFormat, Fourcc) =
(wgpu::TextureFormat::Rgba8Unorm, Fourcc::Abgr8888);
fn import_dmabuf(&self, dmabuf: &Dmabuf) -> wgpu::Texture {
let hal_instance = unsafe { self.wgpu.instance.as_hal::<wgpu::hal::gles::Api>() }.unwrap();
let egl_raw_context = unsafe {
self.wgpu
.adapter
.as_hal::<wgpu::hal::gles::Api, _, _>(|adapter| {
adapter.map(|adapter| adapter.adapter_context().raw_context())
})
.unwrap()
};
let egl_raw_display = hal_instance.raw_display().as_ptr();
let egl_raw_config = hal_instance.egl_config().as_ptr();
let egl_context = unsafe {
smithay::backend::egl::EGLContext::from_raw(
egl_raw_display,
egl_raw_config,
egl_raw_context,
)
.unwrap()
};
// Create a smithay glesrenderer with the same context as wgpu lol
let mut gles_renderer =
unsafe { smithay::backend::renderer::gles::GlesRenderer::new(egl_context).unwrap() };
let texture = gles_renderer.import_dmabuf(dmabuf, None).unwrap();
let hal_texture_descriptor = wgpu::hal::TextureDescriptor {
label: Some("Iced render dmabuf hal import"),
format: TEXTURE_FORMAT.0,
dimension: wgpu::TextureDimension::D2,
size: wgpu::Extent3d {
width: texture.size().w as u32,
height: texture.size().h as u32,
depth_or_array_layers: 1,
},
usage: wgpu::hal::TextureUses::PRESENT,
memory_flags: wgpu::hal::MemoryFlags::empty(),
mip_level_count: 1,
sample_count: 1,
view_formats: vec![],
};
let hal_texture = unsafe {
self.wgpu
.device
.as_hal::<wgpu::hal::gles::Api, _, _>(|device| {
device.map(|device| {
device.texture_from_raw(
NonZero::new(texture.tex_id()).unwrap(),
&hal_texture_descriptor,
Some(Box::new(texture)),
)
})
})
.unwrap()
.unwrap()
};
let wgpu_texture = unsafe {
self.wgpu
.device
.create_texture_from_hal::<wgpu::hal::gles::Api>(
hal_texture,
&wgpu::TextureDescriptor {
label: Some("Iced render dmabuf import"),
format: hal_texture_descriptor.format,
dimension: hal_texture_descriptor.dimension,
size: hal_texture_descriptor.size,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
mip_level_count: hal_texture_descriptor.mip_level_count,
sample_count: hal_texture_descriptor.sample_count,
view_formats: &[],
},
)
};
return wgpu_texture;
} |
Beta Was this translation helpful? Give feedback.
2 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Hello, I'm having a difficult time trying to efficiently exchange data between wgpu and wayland. In order to display an image on wayland I need to attach wl_buffer that contains the image to a surface, on there end. My issue is that I'm not exactly sure how can I create a buffer on a GPU that will be accessible by both wayland trough wl_buffer, and by wgpu trough wgpu::buffer.
In Smithay/client-toolkit there is a good example that creates a wgpu::Surface and uses it to configure and render the image, however I don't believe this is an option for me because I have to copy some data from another wl_buffer provided to me by the system. This is easily achieved by just using
wl_buffer.copy(&wl_buffer)
or I'm getting back to square one were I need to find a way to get data from wl_buffer to wgpu::buffer.Wayland allows me to create a wl_buffer from dmabuf fd so if I will be able to allocate data on my gpu and get dmabuf fd my issue will essentialy be solved but I haven't found a way to export dmabuf fd from a wgpu::Buffer or even just import data to wgpu::Buffer from dmabuf fd, so I'm at a loss right now.
Beta Was this translation helpful? Give feedback.
All reactions