Skip to content

Commit

Permalink
Merge pull request #245 from linebender/reuse_buf
Browse files Browse the repository at this point in the history
Prototype of buffer reuse
  • Loading branch information
raphlinus authored Jan 13, 2023
2 parents 96bf4f3 + ed437f7 commit 02d8b28
Show file tree
Hide file tree
Showing 2 changed files with 79 additions and 36 deletions.
4 changes: 2 additions & 2 deletions shader/coarse.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,8 @@ fn main(
}
workgroupBarrier();
}
if bin_tile_x < config.width_in_tiles && bin_tile_y < config.height_in_tiles {
//ptcl[cmd_offset] = CMD_END;
if bin_tile_x + tile_x < config.width_in_tiles && bin_tile_y + tile_y < config.height_in_tiles {
ptcl[cmd_offset] = CMD_END;
// TODO: blend stack allocation
}
}
111 changes: 77 additions & 34 deletions src/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ use std::{
use futures_intrusive::channel::shared::GenericOneshotReceiver;
use parking_lot::RawMutex;
use wgpu::{
util::DeviceExt, BindGroup, BindGroupLayout, Buffer, BufferAsyncError, BufferSlice, BufferView,
ComputePipeline, Device, Queue, Texture, TextureAspect, TextureFormat, TextureUsages,
TextureView, TextureViewDimension,
util::DeviceExt, BindGroup, BindGroupLayout, Buffer, BufferAsyncError, BufferSlice,
BufferUsages, BufferView, ComputePipeline, Device, Queue, Texture, TextureAspect,
TextureFormat, TextureUsages, TextureView, TextureViewDimension,
};

pub type Error = Box<dyn std::error::Error>;
Expand All @@ -41,6 +41,7 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0);

pub struct Engine {
shaders: Vec<Shader>,
pool: ResourcePool,
}

struct Shader {
Expand Down Expand Up @@ -123,9 +124,17 @@ struct BindMap {
image_map: HashMap<Id, (Texture, TextureView)>,
}

#[derive(Default)]
struct ResourcePool {
bufs: HashMap<(u64, BufferUsages), Vec<Buffer>>,
}

impl Engine {
pub fn new() -> Engine {
Engine { shaders: vec![] }
Engine {
shaders: vec![],
pool: Default::default(),
}
}

/// Add a shader.
Expand Down Expand Up @@ -233,21 +242,19 @@ impl Engine {
for command in &recording.commands {
match command {
Command::Upload(buf_proxy, bytes) => {
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: &bytes,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
});
let usage =
BufferUsages::COPY_SRC | BufferUsages::COPY_DST | BufferUsages::STORAGE;
let buf = self.pool.get_buf(bytes.len() as u64, usage, device);
// TODO: if buffer is newly created, might be better to make it mapped at creation
// and copy. However, we expect reuse will be most common.
queue.write_buffer(&buf, 0, bytes);
bind_map.insert_buf(buf_proxy.id, buf);
}
Command::UploadUniform(buf_proxy, bytes) => {
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: &bytes,
usage: wgpu::BufferUsages::UNIFORM,
});
let usage = BufferUsages::UNIFORM | BufferUsages::COPY_DST;
// Same consideration as above
let buf = self.pool.get_buf(bytes.len() as u64, usage, device);
queue.write_buffer(&buf, 0, bytes);
bind_map.insert_buf(buf_proxy.id, buf);
}
Command::UploadImage(image_proxy, bytes) => {
Expand Down Expand Up @@ -310,6 +317,7 @@ impl Engine {
&shader.bind_group_layout,
bindings,
external_resources,
&mut self.pool,
)?;
let mut cpass = encoder.begin_compute_pass(&Default::default());
cpass.set_pipeline(&shader.pipeline);
Expand All @@ -328,12 +336,13 @@ impl Engine {
downloads.buf_map.insert(proxy.id, buf);
}
Command::Clear(proxy, offset, size) => {
let buffer = bind_map.get_or_create(*proxy, device)?;
let buffer = bind_map.get_or_create(*proxy, device, &mut self.pool)?;
encoder.clear_buffer(buffer, *offset, *size);
}
}
}
queue.submit(Some(encoder.finish()));
self.pool.reap_bindmap(bind_map);
Ok(downloads)
}
}
Expand Down Expand Up @@ -481,6 +490,7 @@ impl BindMap {
layout: &BindGroupLayout,
bindings: &[ResourceProxy],
external_resources: &[ExternalResource],
pool: &mut ResourcePool,
) -> Result<BindGroup, Error> {
// These functions are ugly and linear, but the remap array should generally be
// small. Should find a better solution for this.
Expand Down Expand Up @@ -519,14 +529,9 @@ impl BindMap {
continue;
}
if let Entry::Vacant(v) = self.buf_map.entry(proxy.id) {
let buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: proxy.size,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let usage =
BufferUsages::COPY_SRC | BufferUsages::COPY_DST | BufferUsages::STORAGE;
let buf = pool.get_buf(proxy.size, usage, device);
v.insert(buf);
}
}
Expand Down Expand Up @@ -595,18 +600,17 @@ impl BindMap {
Ok(bind_group)
}

fn get_or_create(&mut self, proxy: BufProxy, device: &Device) -> Result<&Buffer, Error> {
fn get_or_create(
&mut self,
proxy: BufProxy,
device: &Device,
pool: &mut ResourcePool,
) -> Result<&Buffer, Error> {
match self.buf_map.entry(proxy.id) {
Entry::Occupied(occupied) => Ok(occupied.into_mut()),
Entry::Vacant(vacant) => {
let buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: proxy.size,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let usage = BufferUsages::COPY_SRC | BufferUsages::COPY_DST | BufferUsages::STORAGE;
let buf = pool.get_buf(proxy.size, usage, device);
Ok(vacant.insert(buf))
}
}
Expand Down Expand Up @@ -648,3 +652,42 @@ impl<'a> DownloadsMapped<'a> {
Ok(slice.get_mapped_range())
}
}

const SIZE_CLASS_BITS: u32 = 1;

impl ResourcePool {
/// Get a buffer from the pool or create one.
fn get_buf(&mut self, size: u64, usage: BufferUsages, device: &Device) -> Buffer {
let rounded_size = Self::size_class(size, SIZE_CLASS_BITS);
if let Some(buf_vec) = self.bufs.get_mut(&(rounded_size, usage)) {
if let Some(buf) = buf_vec.pop() {
return buf;
}
}
device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: rounded_size,
usage,
mapped_at_creation: false,
})
}

fn reap_bindmap(&mut self, bind_map: BindMap) {
for (_id, buf) in bind_map.buf_map {
let size = buf.size();
let usage = buf.usage();
self.bufs.entry((size, usage)).or_default().push(buf);
}
}

/// Quantize a size up to the nearest size class.
fn size_class(x: u64, bits: u32) -> u64 {
if x > 1 << bits {
let a = (x - 1).leading_zeros();
let b = (x - 1) | (((u64::MAX / 2) >> bits) >> a);
b + 1
} else {
1 << bits
}
}
}

0 comments on commit 02d8b28

Please sign in to comment.