
Here some example render of abouttime scene:
Currently it uses glsl because CUDA can't run on AMD, and Nvidia do not support OpenCL 2.0, so bindless textures is the only way I found to access all textures with hardware sampling.
Main API:
Code: Select all
struct LGE_DevSettings
{
void *m_HDC;
void *m_HGLRC;
bool m_threadedGL;
bool m_enableVSync;
};
struct LGE_RuntimeParams
{
glm::vec4 m_cameraRight;
glm::vec4 m_cameraUp;
glm::vec4 m_cameraForward;
glm::vec4 m_cameraPosition;
glm::vec4 m_sunDirection;
glm::vec4 m_sunColor;
unsigned int m_width;
unsigned int m_height;
unsigned int m_spp;
unsigned int m_skyTexture;
float m_time;
float m_secondaryRaysMipmap;
float m_FOV;
float m_denoiseAmount;
};
class LGE_Device
{
public:
virtual ~LGE_Device() {};
virtual bool SetSettings(LGE_DevSettings *pSettings) = 0;
virtual void SetRuntimeParams(LGE_RuntimeParams *pParams) = 0;
virtual void Render(unsigned int first_line, unsigned int count)=0;
virtual void WaitSync()=0;
virtual void WaitRender()=0;
virtual unsigned int AddTexture(unsigned int width, unsigned int height, void *image) = 0;
virtual void DeleteTexture(unsigned int tex) = 0;
virtual unsigned int AddBVHGeometry(void *pGeometry, unsigned int size) = 0;
virtual void DeleteBVHGeometry(unsigned int handle) = 0;
virtual void SetGeometryTransform(unsigned int geometry, glm::vec4 *pTransform) = 0;
virtual void AttachShader(const char *code, unsigned int slot) = 0;
};