Subversion Repository Public Repository

Divide-Framework

This repository has no backups
This repository's network speed is throttled to 100KB/sec

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
#include "stdafx.h"

#include "config.h"

#include "Headers/GFXDevice.h"

#include "GUI/Headers/GUI.h"
#include "GUI/Headers/GUIText.h"
#include "GUI/Headers/GUIFlash.h"

#include "Rendering/Headers/Renderer.h"

#include "Managers/Headers/SceneManager.h"
#include "Managers/Headers/RenderPassManager.h"

#include "Core/Headers/PlatformContext.h"
#include "Core/Time/Headers/ProfileTimer.h"

#include "Platform/Video/Headers/IMPrimitive.h"
#include "Platform/Video/Textures/Headers/Texture.h"
#include "Platform/Video/Shaders/Headers/ShaderProgram.h"
#include "Platform/Video/Buffers/ShaderBuffer/Headers/ShaderBuffer.h"

namespace Divide {

void GFXDevice::uploadGPUBlock() {
    if (_gpuBlock._needsUpload) {
        // We flush the entire buffer on update to inform the GPU that we don't
        // need the previous data. Might avoid some driver sync
        _gfxDataBuffer->writeData(&_gpuBlock._data);
        _gfxDataBuffer->bind(ShaderBufferLocation::GPU_BLOCK);
        _api->updateClipPlanes();
        _gpuBlock._needsUpload = false;
    }
}

void GFXDevice::renderQueueToSubPasses(RenderBinType queueType, RenderSubPassCmd& subPassCmd) {
    RenderPackageQueue& renderQueue = _renderQueues[queueType._to_integral()];

    assert(renderQueue.locked() == false);
    if (!renderQueue.empty()) {
        U32 queueSize = renderQueue.size();
        for (U32 idx = 0; idx < queueSize; ++idx) {
            RenderPackage& package = renderQueue.getPackage(idx);
            GenericDrawCommands& drawCommands = package._drawCommands;
            if (drawCommands.size() > 0 && batchCommands(drawCommands)) {
                std::for_each(std::begin(drawCommands),
                                std::end(drawCommands),
                                [&](GenericDrawCommand& cmd) -> void {
                                    cmd.enableOption(GenericDrawCommand::RenderOptions::RENDER_INDIRECT);
                                });

                for (ShaderBufferList::value_type& it : package._shaderBuffers) {
                    subPassCmd._shaderBuffers.emplace_back(it._buffer, it._slot, it._range);
                }

                subPassCmd._textures.set(package._textureData);
                subPassCmd._commands.insert(std::cbegin(subPassCmd._commands),
                                            std::cbegin(drawCommands),
                                            std::cend(drawCommands));
            }
        }
        renderQueue.clear();
    }
}

void GFXDevice::flushCommandBuffer(const CommandBuffer& commandBuffer) {
    uploadGPUBlock();
    _api->flushCommandBuffer(commandBuffer);
}

void GFXDevice::lockQueue(RenderBinType type) {
    _renderQueues[type._to_integral()].lock();
}

void GFXDevice::unlockQueue(RenderBinType type) {
    _renderQueues[type._to_integral()].unlock();
}

U32 GFXDevice::renderQueueSize(RenderBinType queueType) {
    U32 queueIndex = queueType._to_integral();
    assert(_renderQueues[queueIndex].locked() == false);
    const RenderPackageQueue& queue = _renderQueues[queueIndex];

    return queue.size();
}

void GFXDevice::addToRenderQueue(RenderBinType queueType, const RenderPackage& package) {
    U32 queueIndex = queueType._to_integral();

    assert(_renderQueues[queueIndex].locked() == true);

    if (!package.isRenderable()) {
        return;
    }

    RenderPackageQueue& queue = _renderQueues[queueIndex];

    if (!queue.empty()) {
        RenderPackage& previous = queue.back();

        if (previous.isCompatible(package)) {
            previous._drawCommands.insert(std::cend(previous._drawCommands),
                                          std::cbegin(package._drawCommands),
                                          std::cend(package._drawCommands));
            return;
        }
    } else {
        queue.reserve(Config::MAX_VISIBLE_NODES);
    }

    queue.push_back(package);
}

/// Prepare the list of visible nodes for rendering
GFXDevice::NodeData& GFXDevice::processVisibleNode(const SceneGraphNode& node, U32 dataIndex) {
    NodeData& dataOut = _matricesData[dataIndex];

    RenderingComponent* const renderable = node.get<RenderingComponent>();
    AnimationComponent* const animComp = node.get<AnimationComponent>();
    PhysicsComponent* const transform = node.get<PhysicsComponent>();

    // Extract transform data (if available)
    // (Nodes without transforms are considered as using identity matrices)
    if (transform) {
        // ... get the node's world matrix properly interpolated
        dataOut._worldMatrix.set(transform->getWorldMatrix(getFrameInterpolationFactor()));

        dataOut._normalMatrixWV.set(dataOut._worldMatrix);
        if (!transform->isUniformScaled()) {
            // Non-uniform scaling requires an inverseTranspose to negate
            // scaling contribution but preserve rotation
            dataOut._normalMatrixWV.setRow(3, 0.0f, 0.0f, 0.0f, 1.0f);
            dataOut._normalMatrixWV.inverseTranspose();
            dataOut._normalMatrixWV.mat[15] = 0.0f;
        }
        dataOut._normalMatrixWV.setRow(3, 0.0f, 0.0f, 0.0f, 0.0f);

        // Calculate the normal matrix (world * view)
        dataOut._normalMatrixWV *= getMatrix(MATRIX::VIEW);
    }

    // Since the normal matrix is 3x3, we can use the extra row and column to store additional data
    dataOut._normalMatrixWV.element(0, 3) = to_F32(animComp ? animComp->boneCount() : 0);
    dataOut._normalMatrixWV.setRow(3, node.get<BoundsComponent>()->getBoundingSphere().asVec4());
    // Get the material property matrix (alpha test, texture count, texture operation, etc.)
    renderable->getRenderingProperties(dataOut._properties, dataOut._normalMatrixWV.element(1, 3), dataOut._normalMatrixWV.element(2, 3));
    // Get the colour matrix (diffuse, specular, etc.)
    renderable->getMaterialColourMatrix(dataOut._colourMatrix);

    return dataOut;
}

void GFXDevice::buildDrawCommands(const RenderQueue::SortedQueues& sortedNodes,
                                  SceneRenderState& sceneRenderState,
                                  RenderPass::BufferData& bufferData,
                                  bool refreshNodeData)
{
    Time::ScopedTimer timer(_commandBuildTimer);
    // If there aren't any nodes visible in the current pass, don't update anything (but clear the render queue

    RenderStagePass currentStage = getRenderStage();
    if (refreshNodeData) {
        bufferData._lastCommandCount = 0;
        bufferData._lasNodeCount = 0;
    }

    if (currentStage.stage() == RenderStage::SHADOW) {
        Light* shadowLight = LightPool::currentShadowCastingLight();
        assert(shadowLight != nullptr);
        if (!COMPARE(_gpuBlock._data._renderProperties.x, shadowLight->getShadowProperties()._arrayOffset.x)) {
            _gpuBlock._data._renderProperties.x = to_F32(shadowLight->getShadowProperties()._arrayOffset.x);
            _gpuBlock._needsUpload = true;
        }
        U8 shadowPasses = shadowLight->getLightType() == LightType::DIRECTIONAL
                                                       ? shadowLight->getShadowMapInfo()->numLayers()
                                                       : 1;
        if (!COMPARE(_gpuBlock._data._renderProperties.y, to_F32(shadowPasses))) {
            _gpuBlock._data._renderProperties.y = to_F32(shadowPasses);
            _gpuBlock._needsUpload = true;
        }
    }

    U32 nodeCount = 0;
    U32 cmdCount = 0;

    for (const vectorImpl<SceneGraphNode*>& queue : sortedNodes) {
        std::for_each(std::begin(queue), std::end(queue),
            [&](SceneGraphNode* node) -> void
            {
                RenderingComponent* renderable = node->get<RenderingComponent>();
                Attorney::RenderingCompGFXDevice::prepareDrawPackage(*renderable, sceneRenderState, currentStage);
            });

        std::for_each(std::begin(queue), std::end(queue),
            [&](SceneGraphNode* node) -> void
            {
                RenderingComponent* renderable = node->get<RenderingComponent>();
                RenderPackage& pkg = 
                    Attorney::RenderingCompGFXDevice::getDrawPackage(*renderable,
                                                                     sceneRenderState,
                                                                     currentStage,
                                                                     refreshNodeData ? cmdCount
                                                                                     : renderable->commandOffset(),
                                                                     refreshNodeData ? nodeCount
                                                                                     : renderable->commandIndex());
                if (pkg.isRenderable()) {
                    if (refreshNodeData) {
                        NodeData& dataOut = processVisibleNode(*node, nodeCount);
                        //set properties.w to -1 to skip occlusion culling for the node
                        dataOut._properties.w = pkg.isOcclusionCullable() ? 1.0f : -1.0f;
                        for (GenericDrawCommand& cmd : pkg._drawCommands) {
                            for (U32 i = 0; i < cmd.drawCount(); ++i) {
                                _drawCommandsCache[cmdCount++].set(cmd.cmd());
                            }
                        }
                    }
                    nodeCount++;
                }
            });
    }

    if (refreshNodeData) {
        bufferData._lastCommandCount = cmdCount;
        bufferData._lasNodeCount = nodeCount;

        assert(cmdCount >= nodeCount);
        // If the buffer update required is large enough, just replace the entire thing
        if (nodeCount > Config::MAX_VISIBLE_NODES / 2) {
            bufferData._renderData->writeData(_matricesData.data());
        } else {
            // Otherwise, just update the needed range to save bandwidth
            bufferData._renderData->writeData(0, nodeCount, _matricesData.data());
        }

        ShaderBuffer& cmdBuffer = *bufferData._cmdBuffer;
        cmdBuffer.writeData(_drawCommandsCache.data());
        _api->registerCommandBuffer(cmdBuffer);

        // This forces a sync for each buffer to make sure all data is properly uploaded in VRAM
        bufferData._renderData->bind(ShaderBufferLocation::NODE_INFO);
    }
}

void GFXDevice::occlusionCull(const RenderPass::BufferData& bufferData, const Texture_ptr& depthBuffer) {
    static const U32 GROUP_SIZE_AABB = 64;
    uploadGPUBlock();

    bufferData._cmdBuffer->bind(ShaderBufferLocation::GPU_COMMANDS);
    bufferData._cmdBuffer->bindAtomicCounter();

    depthBuffer->bind(to_U8(ShaderProgram::TextureUsage::DEPTH));
    U32 cmdCount = bufferData._lastCommandCount;

    _HIZCullProgram->bind();
    _HIZCullProgram->Uniform("dvd_numEntities", cmdCount);
    _HIZCullProgram->DispatchCompute((cmdCount + GROUP_SIZE_AABB - 1) / GROUP_SIZE_AABB, 1, 1);
    _HIZCullProgram->SetMemoryBarrier(ShaderProgram::MemoryBarrierType::COUNTER);
}

U32 GFXDevice::getLastCullCount() const {
    const RenderPass::BufferData& bufferData = parent().renderPassManager().getBufferData(RenderStage::DISPLAY, 0);

    U32 cullCount = bufferData._cmdBuffer->getAtomicCounter();
    if (cullCount > 0) {
        bufferData._cmdBuffer->resetAtomicCounter();
    }
    return cullCount;
}

bool GFXDevice::batchCommands(GenericDrawCommands& commands) const {
    auto batch = [](GenericDrawCommand& previousIDC,
                    GenericDrawCommand& currentIDC)  -> bool {
            if (previousIDC.compatible(currentIDC) &&
                // Batchable commands must share the same buffer
                previousIDC.sourceBuffer()->getGUID() == currentIDC.sourceBuffer()->getGUID())
            {
                U32 prevCount = previousIDC.drawCount();
                if (previousIDC.cmd().baseInstance + prevCount != currentIDC.cmd().baseInstance) {
                    return false;
                }
                // If the rendering commands are batchable, increase the draw count for the previous one
                previousIDC.drawCount(to_U16(prevCount + currentIDC.drawCount()));
                // And set the current command's draw count to zero so it gets removed from the list later on
                currentIDC.drawCount(0);

                return true;
            }

            return false;
    };


    vectorAlg::vecSize previousCommandIndex = 0;
    vectorAlg::vecSize currentCommandIndex = 1;
    const vectorAlg::vecSize commandCount = commands.size();
    for (; currentCommandIndex < commandCount; ++currentCommandIndex) {
        GenericDrawCommand& previousCommand = commands[previousCommandIndex];
        GenericDrawCommand& currentCommand = commands[currentCommandIndex];
        if (!batch(previousCommand, currentCommand))
        {
            previousCommandIndex = currentCommandIndex;
        }
    }

    commands.erase(std::remove_if(std::begin(commands),
                   std::end(commands),
                   [](const GenericDrawCommand& cmd) -> bool {
                       return cmd.drawCount() == 0;
                   }),
                   std::end(commands));
    return true;
}

bool GFXDevice::draw(const GenericDrawCommand& cmd) {
    uploadGPUBlock();
    if (_api->draw(cmd)) {
        if (cmd.isEnabledOption(GenericDrawCommand::RenderOptions::RENDER_GEOMETRY)) {
            registerDrawCall();
        }
        if (cmd.isEnabledOption(GenericDrawCommand::RenderOptions::RENDER_WIREFRAME)) {
            registerDrawCall();
        }
        return true;
    }

    return false;
}


void GFXDevice::flushDisplay(const vec4<I32>& targetViewport) {
    PipelineDescriptor pipelineDescriptor;
    pipelineDescriptor._stateHash = getDefaultStateBlock(true);
    pipelineDescriptor._shaderProgram = _displayShader;

    GenericDrawCommand triangleCmd;
    triangleCmd.primitiveType(PrimitiveType::TRIANGLES);
    triangleCmd.drawCount(1);
    triangleCmd.pipeline(newPipeline(pipelineDescriptor));

    RenderTarget& screen = renderTarget(RenderTargetID(RenderTargetUsage::SCREEN));
    screen.bind(to_U8(ShaderProgram::TextureUsage::UNIT0),
                RTAttachmentType::Colour,
                to_U8(ScreenTargets::ALBEDO));


    GFX::Scoped2DRendering scoped2D(*this);
    GFX::ScopedViewport targetArea(*this, targetViewport);

    // Blit render target to screen
    draw(triangleCmd);

    // Render all 2D debug info and call API specific flush function
    ReadLock r_lock(_2DRenderQueueLock);
    for (std::pair<U32, GUID2DCbk>& callbackFunction : _2dRenderQueue) {
        callbackFunction.second.second();
    }
}

};

Commits for Divide-Framework/trunk/Source Code/Platform/Video/GFXDeviceDraw.cpp

Diff revisions: vs.
Revision Author Commited Message
912 Diff Diff IonutCava picture IonutCava Sun 02 Jul, 2017 23:42:39 +0000

[Ionut]
- Add experimental Weighted Blended Order Independed Transparency (ref: http://casual-effects.blogspot.co.uk/2015/03/implemented-weighted-blended-order.html)
— Add per drawbuffer blend
— All translucent renderbin items go via the new OIT 2-step rendering: accumulation and composition
- Make sure we have proper blend enabled for text rendering
- Add a primitive form of PushConstants (unused yet. Emulated via Uniform calls)
- Fix bug with XMLParser not using case insensitive key lookups

909 Diff Diff IonutCava picture IonutCava Thu 29 Jun, 2017 23:57:18 +0000

[Ionut]
- Rework RenderTarget class
— RTAttachment now deal with textures directly
— RT size needs to be specified upfront
- Application class is no longer a Singleton but passed around in the PlatformContext

905 Diff Diff IonutCava picture IonutCava Mon 26 Jun, 2017 15:56:21 +0000

[Ionut]
Untested code

- Rework RenderBin/RenderPass/RenderQueue system to allow per-bin type rendering decision
— Added to allow implementation of Weighted, Blended Order-Independent Transparency later on
- Better detection of transparency and translucency in textures and materials. Used to separate Alpha-Discard based geometry from OIT translucent geometry

899 Diff Diff IonutCava picture IonutCava Fri 23 Jun, 2017 15:37:44 +0000

[IonutCava]
- Move sRGB conversion and fog calculation to post-processing stages (fog in postProcessing using the depth buffer and sRGB in the presentToScreen step)
- Split the bloom.glsl file into multiple shaders: bloom, tonemap and luminanceCalc for ease of use purposes
- Rework RenderBin sorting: sort translucent/transparent nodes by material in depth passes
- Sort order is no longer part of RenderBin for better control

896 Diff Diff IonutCava picture IonutCava Thu 22 Jun, 2017 16:13:10 +0000

[Ionut]
- Add vs140 platform support
- Remove GPU sync options for tasks. This should be an API-level capability.

895 Diff Diff IonutCava picture IonutCava Wed 21 Jun, 2017 21:10:26 +0000

[IonutCava]
- Reorder and cleanup OpenGL backend a bit.
- Small code cleanup
- Some small profile-guided optimizations

893 Diff Diff IonutCava picture IonutCava Sun 18 Jun, 2017 17:33:07 +0000

[Ionut]
- Initial implementation of a PipelineStateObject (holds shader program, rasterizer state, etc)
- Rework PCH implementation a bit because VS2017 no longer has a /ZM option

890 Diff Diff IonutCava picture IonutCava Sun 14 May, 2017 20:54:59 +0000

[Ionut]
- Add pre-compiled header support
- Initial code for per-stage tessellation computation

881 Diff Diff IonutCava picture IonutCava Tue 21 Mar, 2017 22:07:37 +0000

[IonutCava]
- Multi draw command bug fix: drawCount > 1 should properly register all commands in the command buffer
- More tessellated terrain updates and fixes (still Work In Progress)

877 IonutCava picture IonutCava Thu 16 Mar, 2017 21:55:59 +0000

[IonutCava]
- Reflection updates
- Sky parallax fixes
- Initial support for Tessellation patches (used by the new terrain system – W.I.P.)