Главная Статьи Ссылки Скачать Скриншоты Юмор Почитать Tools Проекты Обо мне Гостевая Форум |
Написание отложенного освещения заметно сложнее, чем традиционный рендеринг - необходимо осуществить построение G-буфера (т.е. рендеринг в фреймбуфер с несколькими цветовыми подключениями) и после этого выполнить расчет освещения, т.е. полноэкранный эффект.
Т.е. нам понадобится дополнительный проход рендеринга (объект Renderpass), а также отдельные объекты-конвейеры и командные буфера для обоих проходов.
Кроме того, мы будем использовать два новых класса - Framebuffer
и ScreenQuad
.
Первый из них является оберткой для VkFramebuffer
и представляет собой фреймбуфер с
рядом подключений.
А второй - это набор объектов для осуществления полноэкранного прохода.
class Framebuffer
{
struct Attachment
{
Texture texture;
VkAttachmentDescription description;
};
Device * device = nullptr;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
uint32_t width, height;
Renderpass renderpass;
std::vector<Attachment *> attachments;
public:
Framebuffer () = default;
Framebuffer ( Framebuffer&& f )
{
std::swap ( device, f.device );
std::swap ( framebuffer, f.framebuffer );
std::swap ( width, f.width );
std::swap ( height, f.height );
std::swap ( attachments, f.attachments );
//renderpass = f.renderpass;
//f.renderpass = Renderpass ()
}
Framebuffer ( const Framebuffer& ) = delete;
~Framebuffer ()
{
clean ();
}
Framebuffer& operator = ( const Framebuffer& ) = delete;
VkFramebuffer getHandle () const
{
return framebuffer;
}
uint32_t getWidth () const
{
return width;
}
uint32_t getHeight () const
{
return height;
}
Renderpass& getRenderpass ()
{
return renderpass;
}
const Texture& getAttachment ( uint32_t index ) const
{
return attachments [index]->texture;
}
Texture& getAttachment ( uint32_t index )
{
return attachments [index]->texture;
}
void clean ()
{
for ( auto at : attachments )
delete at;
attachments.clear ();
vkDestroyFramebuffer ( device->getDevice (), framebuffer, nullptr );
framebuffer = VK_NULL_HANDLE;
}
Framebuffer& init ( Device& dev, uint32_t w, uint32_t h )
{
device = &dev;
width = w;
height = h;
return *this;
}
Framebuffer& addAttachment ( VkFormat format, VkImageUsageFlags usage, VkImageLayout finalLayout = VK_IMAGE_LAYOUT_UNDEFINED )
{
//VkImageAspectFlags aspectMask = 0;
VkImageLayout imageLayout = finalLayout;
if ( finalLayout == VK_IMAGE_LAYOUT_UNDEFINED )
{
if ( usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT )
{
//aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
if ( usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT )
{
//aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
imageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
}
}
//assert ( aspectMask > 0 );
Attachment * attachment = new Attachment;
attachment->texture.create ( *device, width, height, 1, 1, format, VK_IMAGE_TILING_OPTIMAL, usage | VK_IMAGE_USAGE_SAMPLED_BIT, 0 );
//attachment->texture.createImageView ( aspectMask );
attachment->description = {};
attachment->description.samples = VK_SAMPLE_COUNT_1_BIT;
attachment->description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attachment->description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attachment->description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachment->description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachment->description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachment->description.finalLayout = imageLayout;
attachment->description.format = format;
attachments.push_back ( attachment );
return *this;
}
Framebuffer& create ()
{
VkFramebufferCreateInfo createInfo = {};
std::vector<VkImageView> views;
for ( size_t i = 0; i < attachments.size (); i++ )
{
auto& desc = attachments [i]->description;
if ( attachments [i]->texture.getImage ().hasDepth () )
{
renderpass.addAttachment ( desc.format, desc.initialLayout, desc.finalLayout ); //VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL );
renderpass.addDepthSubpass ( (uint32_t) i );
}
else
{
renderpass.addAttachment ( desc.format, desc.initialLayout, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL );
renderpass.addSubpass ( (uint32_t) i );
}
views.push_back ( attachments [i]->texture.getImageView () );
}
renderpass.create ( *device ); // 2 dependencices ???
createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
createInfo.renderPass = renderpass.getHandle ();
createInfo.pAttachments = views.data();
createInfo.attachmentCount = (uint32_t) views.size ();
createInfo.width = width;
createInfo.height = height;
createInfo.layers = 1;
if ( vkCreateFramebuffer ( device->getDevice (), &createInfo, nullptr, &framebuffer ) != VK_SUCCESS )
fatal () << "Framebuffer: error creating framebuffer" << Log::endl;
return *this;
}
};
class ScreenQuad
{
Buffer buffer;
public:
ScreenQuad () = default;
ScreenQuad ( const ScreenQuad& ) = delete;
ScreenQuad& operator = ( const ScreenQuad& ) = delete;
bool isOk () const
{
return buffer.isOk ();
}
CommandBuffer& render ( CommandBuffer& commandBuffer )
{
assert ( buffer.getHandle () != VK_NULL_HANDLE );
return commandBuffer.bindVertexBuffers ( { {buffer, 0} } ).draw ( 6 );
}
// register vertex attributes in pipeline
GraphicsPipeline& setVertexAttrs ( GraphicsPipeline& pipeline )
{
pipeline.addVertexAttr ( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0 );
return pipeline;
}
// create buffer
void create ( Device& device )
{
static const float vertices [] =
{
-1, -1, 0, 0,
-1, 1, 0, 1,
1, 1, 1, 1,
-1, -1, 0, 0,
1, 1, 1, 1,
1, -1, 1, 0
};
uint32_t size = sizeof ( vertices );
Buffer stagingBuffer;
SingleTimeCommand cmd ( device );
// use staging buffer to copy data to GPU-local memory
stagingBuffer.create ( device, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT );
stagingBuffer.copy ( vertices, size );
buffer.create ( device, size, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT );
buffer.copyBuffer ( cmd, stagingBuffer, size );
}
};
Исходя из того, что у нас будет два прохода рендеринга (построение G-буфера и расчет освещения по нему), то класс нашего приложения будет содержать следующие поля:
struct UniformBufferObject
{
glm::mat4 mv;
glm::mat4 proj;
glm::mat4 nm;
glm::vec4 light;
};
class DeferredWindow : public VulkanWindow
{
std::vector<CommandBuffer> commandBuffers;
std::vector<Buffer> uniformBuffers;
std::vector<DescriptorSet> descriptorSets;
GraphicsPipeline pipeline;
Renderpass renderPass;
Sampler sampler;
Framebuffer fb; // G-buffer
CommandBuffer offscreenCmd;
DescriptorSet offscreenDescriptorSet;
Semaphore offscreenSemaphore;
GraphicsPipeline offscreenPipeline;
ScreenQuad screen; // class used to do screen processing
float zMin = 0.1f;
float zMax = 100.0f;
double time = 0;
DescriptorSet offscreenDescriptorSet1, offscreenDescriptorSet2, offscreenDescriptorSet3;
Mesh * box1 = nullptr; // decalMap, bump1 -> DS1
Mesh * box2 = nullptr; // stoneMap. bump2 -> DS2
Mesh * box3 = nullptr;
Mesh * box4 = nullptr;
Mesh * box5 = nullptr;
Mesh * knot = nullptr; // knotMap, bump2 -> DS3
Texture decalMap, stoneMap, knotMap;
Texture bump1, bump2;
Задачей конструктора, как и ранее, является создание и инициализация всех используемых классов, в том числе и создание/загрузка геометрии.
DeferredWindow ( int w, int h, const std::string& t ) : VulkanWindow ( w, h, t, true )
{
sampler.setMinFilter ( VK_FILTER_LINEAR ).setMagFilter ( VK_FILTER_LINEAR ).create ( device );
screen.create ( device );
decalMap.load ( device, "../../Textures/oak.jpg" );
stoneMap.load ( device, "../../Textures/block.jpg" );
knotMap.load ( device, "../../Textures/Oxidated.jpg" );
bump1.load ( device, "../../Textures/wood_normal.png" );
bump2.load ( device, "../../Textures/brick_nm.bmp" );
// create G-buffer with 2 RGBA16F color attachments and depth attachment
fb.init ( device, getWidth (), getHeight () )
.addAttachment ( VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL )
.create ();
// create meshes
box1 = createBox ( device, glm::vec3 ( -6, -0.1, -6 ), glm::vec3 ( 12, 3, 12 ), nullptr, true );
box2 = createBox ( device, glm::vec3 ( -1.5, 0, -0.5 ), glm::vec3 ( 1, 2, 2 ) );
box3 = createBox ( device, glm::vec3 ( 1.5, 0, -0.5 ), glm::vec3 ( 1, 1, 1 ) );
box4 = createBox ( device, glm::vec3 ( -4, 0, -0.5 ), glm::vec3 ( 1, 1, 1 ) );
box5 = createBox ( device, glm::vec3 ( -4, 0, -4 ), glm::vec3 ( 1, 1, 1 ) );
knot = createKnot ( device, 1, 4, 120, 30 );
// create all pipelines
createPipelines ();
setController ( new CameraController ( this ) );
}
~DeferredWindow ()
{
delete box1;
delete box2;
delete box3;
delete box4;
delete box5;
delete knot;
}
Для создания и уничтожения uniform-буферов будем использовать те же методы, что и ранее -
createUniformBuffers
и freeUniformBuffers
.
void createUniformBuffers ()
{
VkDeviceSize bufferSize = sizeof ( UniformBufferObject );
uniformBuffers.resize ( swapChain.imageCount() );
for ( size_t i = 0; i < swapChain.imageCount (); i++ )
uniformBuffers [i].create ( device, bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT );
}
void freeUniformBuffers ()
{
for ( size_t i = 0; i < swapChain.imageCount (); i++ )
uniformBuffers [i].clean ();
}
Метод createDescriptorSets
более сложен, чем ранее, поскольку у нас будет большее число множеств
дескрипторов.
void createDescriptorSets ()
{
descriptorSets.resize ( swapChain.imageCount () );
// create descriptors for last pass
for ( uint32_t i = 0; i < swapChain.imageCount (); i++ )
{
descriptorSets [i]
.setLayout ( device, descAllocator, pipeline.getDescLayout () )
.addBuffer ( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, uniformBuffers [i], 0, sizeof ( UniformBufferObject ) )
.addImage ( 1, fb.getAttachment ( 0 ), sampler )
.create ();
}
// create descriptors for rendering boxes
offscreenDescriptorSet1
.setLayout ( device, descAllocator, offscreenPipeline.getDescLayout () )
.addBuffer ( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, uniformBuffers [0], 0, sizeof ( UniformBufferObject ) )
.addImage ( 1, decalMap, sampler )
.addImage ( 2, bump1, sampler )
.create ();
offscreenDescriptorSet2
.setLayout ( device, descAllocator, offscreenPipeline.getDescLayout () )
.addBuffer ( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, uniformBuffers [0], 0, sizeof ( UniformBufferObject ) )
.addImage ( 1, stoneMap, sampler )
.addImage ( 2, bump2, sampler )
.create ();
offscreenDescriptorSet3
.setLayout ( device, descAllocator, offscreenPipeline.getDescLayout () )
.addBuffer ( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, uniformBuffers [0], 0, sizeof ( UniformBufferObject ) )
.addImage ( 1, knotMap, sampler )
.addImage ( 2, bump2, sampler )
.create ();
}
Так как у нас два прохода, то и создание конвейеров рендеринга также станет сложнее.
void createPipelines () override
{
createUniformBuffers ();
createDefaultRenderPass ( renderPass );
screen.setVertexAttrs ( pipeline )
.setDevice ( device )
.setVertexShader ( "shaders/ds-3-3.vert.spv" )
.setFragmentShader ( "shaders/ds-3-3.frag.spv" )
.setSize ( swapChain.getExtent ().width, swapChain.getExtent ().height )
.addVertexBinding ( sizeof ( float ) * 4, 0, VK_VERTEX_INPUT_RATE_VERTEX )
.addDescLayout ( 0, DescSetLayout ()
.add ( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT )
.add ( 1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT ) ) // color attachment 0
.setCullMode ( VK_CULL_MODE_NONE )
.setFrontFace ( VK_FRONT_FACE_COUNTER_CLOCKWISE )
.setDepthTest ( true )
.setDepthWrite ( true )
.create ( renderPass );
offscreenPipeline
.setDevice ( device )
.setVertexShader ( "shaders/depthpass.vert.spv" )
.setFragmentShader ( "shaders/depthpass.frag.spv" )
.setSize ( swapChain.getExtent ().width, swapChain.getExtent ().height )
.addVertexBinding ( sizeof ( BasicVertex ) )
.addVertexAttributes<BasicVertex> ()
.addDescLayout ( 0, DescSetLayout ()
.add ( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT )
.add ( 1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT ) // decal
.add ( 2, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT ) ) // bump
.setCullMode ( VK_CULL_MODE_NONE )
.setFrontFace ( VK_FRONT_FACE_COUNTER_CLOCKWISE )
.setDepthTest ( true )
.setDepthWrite ( true )
.create ( fb.getRenderpass () );
// create before command buffers
swapChain.createFramebuffers ( renderPass, depthTexture.getImageView () );
createDescriptorSets ();
createCommandBuffers ( renderPass );
createOffscreenCommandBuffer ();
}
void freePipelines () override
{
commandBuffers.clear ();
pipeline.clean ();
renderPass.clean ();
freeUniformBuffers ();
descriptorSets.clear ();
descAllocator.clean ();
}
Кроме того у нас будет два метода для создания командных буферов - createCommandBuffers
и createOffscreenCommandBuffer
.
void createCommandBuffers ( Renderpass& renderPass )
{
auto framebuffers = swapChain.getFramebuffers ();
commandBuffers = device.allocCommandBuffers ( (uint32_t)framebuffers.size ());
for ( size_t i = 0; i < commandBuffers.size(); i++ )
{
commandBuffers [i].begin ().beginRenderPass ( RenderPassInfo ( renderPass ).framebuffer ( framebuffers [i] ).extent ( swapChain.getExtent ().width, swapChain.getExtent ().height ).clearColor().clearDepthStencil() )
.pipeline ( pipeline )
.addDescriptorSets ( { descriptorSets[i] } )
.render ( &screen )
.end ();
}
}
void createOffscreenCommandBuffer ()
{
offscreenSemaphore.create ( device );
offscreenCmd.create ( device );
offscreenCmd.begin ( true ).beginRenderPass ( RenderPassInfo ( fb.getRenderpass() ).clearDepthStencil ().framebuffer ( fb ).extent ( fb.getWidth (), fb.getHeight () ) )
.pipeline ( offscreenPipeline )
.addDescriptorSets ( { offscreenDescriptorSet1 } ).render ( box1 )
.addDescriptorSets ( { offscreenDescriptorSet2 } ).render ( box2 ).render ( box3 ).render ( box4 ).render ( box5 )
.addDescriptorSets ( { offscreenDescriptorSet3 } ).render ( knot )
.end ();
}
И небольшие изменения претерпевает и метод submit
в связи с наличием двух проходов рендеринга.
void submit ( uint32_t imageIndex ) override
{
updateUniformBuffer ( imageIndex );
VkFence currentFence = swapChain.currentInFlightFence ();
vkResetFences ( device.getDevice (), 1, ¤tFence );
SubmitInfo ()
.wait ( { { swapChain.currentAvailableSemaphore (), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT } } )
.buffers ( { offscreenCmd } )
.signal ( { offscreenSemaphore.getHandle () } )
.submit ( device.getGraphicsQueue (), swapChain.currentInFlightFence () );
SubmitInfo ()
.wait ( { { offscreenSemaphore.getHandle (), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT } } )
.buffers ( { commandBuffers [imageIndex] } )
.signal ( { swapChain.currentRenderFinishedSemaphore () } )
.submit ( device.getGraphicsQueue () );
}
Весь исходный код к этому примеру можно скачать из репозитория проекта - https://github.com/steps3d/vulkan-with-classes.