Compare commits
No commits in common. "b46b4bddba6c609307ca198438c60d617e20c752" and "74b6f587941cd322428a3eeccd0a4f67304a3749" have entirely different histories.
b46b4bddba
...
74b6f58794
@ -380,9 +380,8 @@ _create_vertices.argtypes = (
|
|||||||
ctypes.c_void_p) # meshes
|
ctypes.c_void_p) # meshes
|
||||||
|
|
||||||
def create_vertices(format, nvertices, vertices, indices, meshes):
|
def create_vertices(format, nvertices, vertices, indices, meshes):
|
||||||
assert len(meshes) % 2 == 0
|
|
||||||
return _create_vertices(format,
|
return _create_vertices(format,
|
||||||
nvertices, _ubyte_addr(vertices), len(indices), _ushort_addr(indices), len(meshes) // 2, _uint_addr(meshes))
|
nvertices, _ubyte_addr(vertices), len(indices), _ushort_addr(indices), len(meshes), _uint_addr(meshes))
|
||||||
|
|
||||||
create_batch = _engine.rk_create_batch
|
create_batch = _engine.rk_create_batch
|
||||||
create_batch.restype = _handle
|
create_batch.restype = _handle
|
||||||
|
@ -18,12 +18,15 @@
|
|||||||
|
|
||||||
#include "types.hpp"
|
#include "types.hpp"
|
||||||
|
|
||||||
template<typename _small>
|
template<typename _type>
|
||||||
bool _rk_cmp_memcpy_small(
|
bool _rk_cmp_memcpy_small(
|
||||||
_small * __restrict dst,
|
void * const __restrict _dst,
|
||||||
_small const * __restrict src,
|
void const * const __restrict _src,
|
||||||
unsigned count) {
|
unsigned const size) {
|
||||||
_small cmp = 0;
|
unsigned count = (size / sizeof(_type));
|
||||||
|
_type * dst = reinterpret_cast<_type *>(_dst);
|
||||||
|
_type const * src = reinterpret_cast<_type const *>(_src);
|
||||||
|
_type cmp = 0;
|
||||||
do {
|
do {
|
||||||
cmp |= *dst ^ *src;
|
cmp |= *dst ^ *src;
|
||||||
*dst++ = *src++;
|
*dst++ = *src++;
|
||||||
@ -33,67 +36,134 @@ bool _rk_cmp_memcpy_small(
|
|||||||
|
|
||||||
template<typename _big, typename _small>
|
template<typename _big, typename _small>
|
||||||
bool _rk_cmp_memcpy_big(
|
bool _rk_cmp_memcpy_big(
|
||||||
_small * const __restrict _dst,
|
void * const __restrict _dst,
|
||||||
_small const * const __restrict _src,
|
void const * const __restrict _src,
|
||||||
unsigned const _count) {
|
unsigned const size) {
|
||||||
unsigned const ratio = sizeof(_big) / sizeof(_small);
|
unsigned count = size / sizeof(_big);
|
||||||
unsigned big_count = _count / ratio;
|
unsigned const remain = size % sizeof(_big);
|
||||||
unsigned const small_count = _count % ratio;
|
|
||||||
_big * dst = reinterpret_cast<_big *>(_dst);
|
_big * dst = reinterpret_cast<_big *>(_dst);
|
||||||
_big const * src = reinterpret_cast<_big const *>(_src);
|
_big const * src = reinterpret_cast<_big const *>(_src);
|
||||||
_big cmp = 0;
|
_big cmp = 0;
|
||||||
do {
|
do {
|
||||||
cmp |= *dst ^ *src;
|
cmp |= *dst ^ *src;
|
||||||
*dst++ = *src++;
|
*dst++ = *src++;
|
||||||
} while(--big_count > 0);
|
} while(--count > 0);
|
||||||
bool modified = (cmp != 0);
|
bool modified = (cmp != 0);
|
||||||
if (small_count) {
|
if (remain) {
|
||||||
modified |= _rk_cmp_memcpy_small<_small>(
|
modified |= _rk_cmp_memcpy_small<_small>(dst, src, remain);
|
||||||
reinterpret_cast<_small *>(dst), reinterpret_cast<_small const *>(src), small_count);
|
|
||||||
}
|
}
|
||||||
return modified;
|
return modified;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RK_CMP_MEMCPY_UNALIGNED
|
#ifdef RK_CMP_MEMCPY_UNALIGNED
|
||||||
#define _rk_count_and_alignment(_t) (count >= (sizeof(_t) / sizeof(_small)))
|
#define _rk_size_and_alignment(_t) (size >= sizeof(_t))
|
||||||
#else
|
#else
|
||||||
#define _rk_count_and_alignment(_t) ((count >= (sizeof(_t) / sizeof(_small))) && !(alignment % sizeof(_t)))
|
#define _rk_size_and_alignment(_t) (size >= sizeof(_t) && !(alignment & (sizeof(_t) - 1)))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
template<typename _small>
|
template<unsigned _s>
|
||||||
bool rk_cmp_memcpy(
|
bool rk_cmp_memcpy(
|
||||||
_small * const __restrict _dst,
|
void * const __restrict _dst,
|
||||||
_small const * const __restrict _src,
|
void const * const __restrict _src,
|
||||||
unsigned const count) {
|
unsigned const size);
|
||||||
|
|
||||||
|
template<>
|
||||||
|
bool rk_cmp_memcpy<sizeof(rk_ubyte)>(
|
||||||
|
void * const __restrict _dst,
|
||||||
|
void const * const __restrict _src,
|
||||||
|
unsigned const size) {
|
||||||
#ifndef RK_CMP_MEMCPY_UNALIGNED
|
#ifndef RK_CMP_MEMCPY_UNALIGNED
|
||||||
unsigned const alignment = reinterpret_cast<uintptr_t>(_dst) | reinterpret_cast<uintptr_t const>(_src);
|
unsigned const alignment = reinterpret_cast<uintptr_t>(_dst) | reinterpret_cast<uintptr_t const>(_src);
|
||||||
#endif
|
#endif
|
||||||
if (sizeof(_small) < sizeof(rk_ullong)) {
|
if (_rk_size_and_alignment(rk_ullong)) {
|
||||||
if (_rk_count_and_alignment(rk_ullong)) {
|
return _rk_cmp_memcpy_big<rk_ullong, rk_ubyte>(_dst, _src, size);
|
||||||
return _rk_cmp_memcpy_big<rk_ullong, _small>(_dst, _src, count);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (sizeof(_small) < sizeof(rk_ulong)) {
|
if (_rk_size_and_alignment(rk_ulong)) {
|
||||||
if (_rk_count_and_alignment(rk_ulong)) {
|
return _rk_cmp_memcpy_big<rk_ulong, rk_ubyte>(_dst, _src, size);
|
||||||
return _rk_cmp_memcpy_big<rk_ulong, _small>(_dst, _src, count);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (sizeof(_small) < sizeof(rk_uint)) {
|
if (_rk_size_and_alignment(rk_uint)) {
|
||||||
if (_rk_count_and_alignment(rk_uint)) {
|
return _rk_cmp_memcpy_big<rk_uint, rk_ubyte>(_dst, _src, size);
|
||||||
return _rk_cmp_memcpy_big<rk_uint, _small>(_dst, _src, count);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (sizeof(_small) < sizeof(rk_ushort)) {
|
if (_rk_size_and_alignment(rk_ushort)) {
|
||||||
if (_rk_count_and_alignment(rk_ushort)) {
|
return _rk_cmp_memcpy_big<rk_ushort, rk_ubyte>(_dst, _src, size);
|
||||||
return _rk_cmp_memcpy_big<rk_ushort, _small>(_dst, _src, count);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (count) {
|
if (size > 0) {
|
||||||
return _rk_cmp_memcpy_small<_small>(_dst, _src, count);
|
return _rk_cmp_memcpy_small<rk_ubyte>(_dst, _src, size);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef _rk_count_and_alignment
|
template<>
|
||||||
|
bool rk_cmp_memcpy<sizeof(rk_ushort)>(
|
||||||
|
void * const __restrict _dst,
|
||||||
|
void const * const __restrict _src,
|
||||||
|
unsigned const size) {
|
||||||
|
#ifndef RK_CMP_MEMCPY_UNALIGNED
|
||||||
|
unsigned const alignment = reinterpret_cast<uintptr_t>(_dst) | reinterpret_cast<uintptr_t const>(_src);
|
||||||
|
#endif
|
||||||
|
if (_rk_size_and_alignment(rk_ullong)) {
|
||||||
|
return _rk_cmp_memcpy_big<rk_ullong, rk_ushort>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
if (_rk_size_and_alignment(rk_ulong)) {
|
||||||
|
return _rk_cmp_memcpy_big<rk_ulong, rk_ushort>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
if (_rk_size_and_alignment(rk_uint)) {
|
||||||
|
return _rk_cmp_memcpy_big<rk_uint, rk_ushort>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
if (size > 0) {
|
||||||
|
return _rk_cmp_memcpy_small<rk_ushort>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
bool rk_cmp_memcpy<sizeof(rk_uint)>(
|
||||||
|
void * const __restrict _dst,
|
||||||
|
void const * const __restrict _src,
|
||||||
|
unsigned const size) {
|
||||||
|
#ifndef RK_CMP_MEMCPY_UNALIGNED
|
||||||
|
unsigned const alignment = reinterpret_cast<uintptr_t>(_dst) | reinterpret_cast<uintptr_t const>(_src);
|
||||||
|
#endif
|
||||||
|
if (_rk_size_and_alignment(rk_ullong)) {
|
||||||
|
return _rk_cmp_memcpy_big<rk_ullong, rk_uint>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
if (_rk_size_and_alignment(rk_ulong)) {
|
||||||
|
return _rk_cmp_memcpy_big<rk_ulong, rk_uint>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
if (size > 0) {
|
||||||
|
return _rk_cmp_memcpy_small<rk_uint>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
bool rk_cmp_memcpy<sizeof(rk_ulong)>(
|
||||||
|
void * const __restrict _dst,
|
||||||
|
void const * const __restrict _src,
|
||||||
|
unsigned const size) {
|
||||||
|
#ifndef RK_CMP_MEMCPY_UNALIGNED
|
||||||
|
unsigned const alignment = reinterpret_cast<uintptr_t>(_dst) | reinterpret_cast<uintptr_t const>(_src);
|
||||||
|
#endif
|
||||||
|
if (_rk_size_and_alignment(rk_ullong)) {
|
||||||
|
return _rk_cmp_memcpy_big<rk_ullong, rk_ulong>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
if (size > 0) {
|
||||||
|
return _rk_cmp_memcpy_small<rk_ulong>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
bool rk_cmp_memcpy<sizeof(rk_ullong)>(
|
||||||
|
void * const __restrict _dst,
|
||||||
|
void const * const __restrict _src,
|
||||||
|
unsigned const size) {
|
||||||
|
if (size > 0) {
|
||||||
|
return _rk_cmp_memcpy_small<rk_ullong>(_dst, _src, size);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef _rk_size_and_alignment
|
||||||
|
|
||||||
#endif // RK_ENGINE_CMP_MEMCPY_H
|
#endif // RK_ENGINE_CMP_MEMCPY_H
|
||||||
|
@ -26,14 +26,6 @@ typedef glm::vec4 rk_vec4;
|
|||||||
typedef glm::mat3 rk_mat3;
|
typedef glm::mat3 rk_mat3;
|
||||||
typedef glm::mat4 rk_mat4;
|
typedef glm::mat4 rk_mat4;
|
||||||
|
|
||||||
#define RK_CHECK_MATH_TYPE(_t, _e, _c) static_assert(sizeof(_t) == sizeof(_e) * (_c))
|
|
||||||
|
|
||||||
RK_CHECK_MATH_TYPE(rk_vec2, float, 2);
|
|
||||||
RK_CHECK_MATH_TYPE(rk_vec3, float, 3);
|
|
||||||
RK_CHECK_MATH_TYPE(rk_vec4, float, 4);
|
|
||||||
RK_CHECK_MATH_TYPE(rk_mat3, rk_vec3, 3);
|
|
||||||
RK_CHECK_MATH_TYPE(rk_mat4, rk_vec4, 4);
|
|
||||||
|
|
||||||
#define vec3_right (rk_vec3(1.f, 0.f, 0.f))
|
#define vec3_right (rk_vec3(1.f, 0.f, 0.f))
|
||||||
#define vec3_forward (rk_vec3(0.f, 1.f, 0.f))
|
#define vec3_forward (rk_vec3(0.f, 1.f, 0.f))
|
||||||
#define vec3_up (rk_vec3(0.f, 0.f, 1.f))
|
#define vec3_up (rk_vec3(0.f, 0.f, 1.f))
|
||||||
|
@ -27,18 +27,14 @@ typedef rk_handle_t rk_triangles_t;
|
|||||||
typedef rk_handle_t rk_vertices_t;
|
typedef rk_handle_t rk_vertices_t;
|
||||||
typedef rk_handle_t rk_batch_t;
|
typedef rk_handle_t rk_batch_t;
|
||||||
|
|
||||||
typedef rk_uint rk_texture_format;
|
enum rk_texture_format : rk_uint {
|
||||||
|
|
||||||
enum : rk_uint {
|
|
||||||
RK_TEXTURE_FORMAT_SRGB8_A8 = 0,
|
RK_TEXTURE_FORMAT_SRGB8_A8 = 0,
|
||||||
RK_TEXTURE_FORMAT_RGBA8 = 1,
|
RK_TEXTURE_FORMAT_RGBA8 = 1,
|
||||||
RK_TEXTURE_FORMAT_RGB10_A2 = 2,
|
RK_TEXTURE_FORMAT_RGB10_A2 = 2,
|
||||||
RK_TEXTURE_FORMAT_FLOAT_32 = 3
|
RK_TEXTURE_FORMAT_FLOAT_32 = 3
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef rk_uint rk_texture_flags;
|
enum rk_texture_flags : rk_uint {
|
||||||
|
|
||||||
enum : rk_uint {
|
|
||||||
RK_TEXTURE_FLAG_3D = RK_FLAG(0),
|
RK_TEXTURE_FLAG_3D = RK_FLAG(0),
|
||||||
RK_TEXTURE_FLAG_MIPMAPS = RK_FLAG(1),
|
RK_TEXTURE_FLAG_MIPMAPS = RK_FLAG(1),
|
||||||
RK_TEXTURE_FLAG_MIN_NEAREST = 0,
|
RK_TEXTURE_FLAG_MIN_NEAREST = 0,
|
||||||
@ -47,53 +43,41 @@ enum : rk_uint {
|
|||||||
RK_TEXTURE_FLAG_MAG_LINEAR = RK_FLAG(3),
|
RK_TEXTURE_FLAG_MAG_LINEAR = RK_FLAG(3),
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef rk_ubyte rk_vertex_format;
|
enum rk_vertex_format : rk_ubyte {
|
||||||
|
|
||||||
enum : rk_ubyte {
|
|
||||||
RK_VERTEX_FORMAT_VEC3_FLOAT = 1,
|
RK_VERTEX_FORMAT_VEC3_FLOAT = 1,
|
||||||
RK_VERTEX_FORMAT_VEC3_INT10 = 2,
|
RK_VERTEX_FORMAT_VEC3_INT10 = 2,
|
||||||
RK_VERTEX_FORMAT_VEC3_UINT10 = 3,
|
RK_VERTEX_FORMAT_VEC3_UINT10 = 3
|
||||||
RK_VERTEX_FORMAT_NORMALIZE = RK_FLAG(7),
|
|
||||||
RK_VERTEX_FORMAT_MASK = RK_VERTEX_FORMAT_NORMALIZE - 1
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef rk_ubyte rk_param_format;
|
enum : rk_ubyte { RK_VERTEX_FORMAT_NORMALIZE = RK_FLAG(7) };
|
||||||
|
enum : rk_ubyte { RK_VERTEX_FORMAT_MASK = RK_VERTEX_FORMAT_NORMALIZE - 1 };
|
||||||
|
|
||||||
enum : rk_ubyte {
|
enum rk_param_format : rk_ubyte {
|
||||||
RK_PARAM_FORMAT_VEC3_FLOAT = 1,
|
RK_PARAM_FORMAT_VEC3_FLOAT = 1,
|
||||||
RK_PARAM_FORMAT_VEC3_SHORT = 2,
|
RK_PARAM_FORMAT_VEC3_SHORT = 2,
|
||||||
RK_PARAM_FORMAT_VEC3_INT10 = 3,
|
RK_PARAM_FORMAT_VEC3_INT10 = 3,
|
||||||
RK_PARAM_FORMAT_MAT3_FLOAT = 4,
|
RK_PARAM_FORMAT_MAT3_FLOAT = 4,
|
||||||
RK_PARAM_FORMAT_MAT3_INT10 = 5,
|
RK_PARAM_FORMAT_MAT3_INT10 = 5
|
||||||
RK_PARAM_FORMAT_NORMALIZE = RK_FLAG(7),
|
|
||||||
RK_PARAM_FORMAT_MASK = RK_PARAM_FORMAT_NORMALIZE - 1
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef rk_ubyte rk_instance_flags;
|
enum : rk_ubyte { RK_PARAM_FORMAT_NORMALIZE = RK_FLAG(7) };
|
||||||
|
enum : rk_ubyte { RK_PARAM_FORMAT_MASK = RK_PARAM_FORMAT_NORMALIZE - 1 };
|
||||||
|
|
||||||
enum : rk_ubyte {
|
enum rk_instance_flags : rk_ubyte {
|
||||||
RK_INSTANCE_FLAG_SPAWNED = RK_FLAG(0),
|
RK_INSTANCE_FLAG_SPAWNED = RK_FLAG(0),
|
||||||
RK_INSTANCE_FLAG_VISIBLE = RK_FLAG(1),
|
RK_INSTANCE_FLAG_VISIBLE = RK_FLAG(1)
|
||||||
RK_INSTANCE_FLAGS_SPAWNED_VISIBLE = RK_INSTANCE_FLAG_SPAWNED | RK_INSTANCE_FLAG_VISIBLE
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef rk_ushort rk_instance_index;
|
enum : rk_ubyte { RK_INSTANCE_FLAGS_SPAWNED_VISIBLE = RK_INSTANCE_FLAG_SPAWNED | RK_INSTANCE_FLAG_VISIBLE };
|
||||||
|
|
||||||
enum : rk_uint {
|
enum : rk_uint { RK_BATCH_MAX_SIZE = 65536 };
|
||||||
RK_BATCH_MAX_SIZE = 1 << (sizeof(rk_instance_index) * 8)
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef rk_ushort rk_vertex_index;
|
union rk_mesh {
|
||||||
typedef rk_ushort rk_mesh_index;
|
rk_uint packed;
|
||||||
|
struct {
|
||||||
// param input types must be size compatible with an array of rk_param_input
|
rk_ushort base_index;
|
||||||
typedef rk_uint rk_param_input;
|
rk_ushort ntriangles;
|
||||||
|
};
|
||||||
#define RK_CHECK_PARAM_INPUT_TYPE(_t) static_assert(!(sizeof(_t) % sizeof(rk_param_input)))
|
|
||||||
|
|
||||||
struct rk_mesh {
|
|
||||||
rk_uint base_index;
|
|
||||||
rk_uint ntriangles;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
RK_EXPORT void rk_render_initialize(
|
RK_EXPORT void rk_render_initialize(
|
||||||
@ -132,7 +116,7 @@ RK_EXPORT rk_vertices_t rk_create_vertices(
|
|||||||
rk_uint nvertices,
|
rk_uint nvertices,
|
||||||
rk_ubyte const * vertices,
|
rk_ubyte const * vertices,
|
||||||
rk_uint nindices,
|
rk_uint nindices,
|
||||||
rk_vertex_index const * indices,
|
rk_ushort const * indices,
|
||||||
rk_uint nmeshes,
|
rk_uint nmeshes,
|
||||||
rk_mesh const * meshes);
|
rk_mesh const * meshes);
|
||||||
|
|
||||||
@ -145,8 +129,8 @@ RK_EXPORT void rk_fill_batch(
|
|||||||
rk_batch_t batch,
|
rk_batch_t batch,
|
||||||
rk_uint count,
|
rk_uint count,
|
||||||
rk_instance_flags const * flags,
|
rk_instance_flags const * flags,
|
||||||
rk_mesh_index const * meshes,
|
rk_ushort const * meshes,
|
||||||
rk_param_input const * const * params);
|
rk_ubyte const * const * params);
|
||||||
|
|
||||||
RK_EXPORT void rk_clear_buffer(
|
RK_EXPORT void rk_clear_buffer(
|
||||||
rk_bool pixels,
|
rk_bool pixels,
|
||||||
|
@ -27,6 +27,12 @@ typedef void (*rk_MultiDrawElementsIndirectFunc)(rk_uint, rk_uint, const void *,
|
|||||||
static rk_DrawElementsInstancedBaseInstanceFunc rk_DrawElementsInstancedBaseInstance = nullptr;
|
static rk_DrawElementsInstancedBaseInstanceFunc rk_DrawElementsInstancedBaseInstance = nullptr;
|
||||||
static rk_MultiDrawElementsIndirectFunc rk_MultiDrawElementsIndirect = nullptr;
|
static rk_MultiDrawElementsIndirectFunc rk_MultiDrawElementsIndirect = nullptr;
|
||||||
|
|
||||||
|
struct rk_bucket {
|
||||||
|
unsigned size;
|
||||||
|
unsigned count;
|
||||||
|
rk_ushort * indices;
|
||||||
|
};
|
||||||
|
|
||||||
static unsigned rk_nbuckets = 0;
|
static unsigned rk_nbuckets = 0;
|
||||||
static rk_bucket * rk_buckets = nullptr;
|
static rk_bucket * rk_buckets = nullptr;
|
||||||
|
|
||||||
@ -319,7 +325,7 @@ rk_vertices_t rk_create_vertices(
|
|||||||
rk_uint nvertices,
|
rk_uint nvertices,
|
||||||
rk_ubyte const * _vertices,
|
rk_ubyte const * _vertices,
|
||||||
rk_uint nindices,
|
rk_uint nindices,
|
||||||
rk_vertex_index const * indices,
|
rk_ushort const * indices,
|
||||||
rk_uint nmeshes,
|
rk_uint nmeshes,
|
||||||
rk_mesh const * meshes) {
|
rk_mesh const * meshes) {
|
||||||
if (!format || !nvertices || !_vertices || !nindices || !indices) {
|
if (!format || !nvertices || !_vertices || !nindices || !indices) {
|
||||||
@ -357,8 +363,8 @@ rk_vertices_t rk_create_vertices(
|
|||||||
memcpy(vertices->format, format, (format_size + 1) * sizeof(rk_vertex_format));
|
memcpy(vertices->format, format, (format_size + 1) * sizeof(rk_vertex_format));
|
||||||
vertices->vertices = new rk_ubyte[nvertices * vertex_size];
|
vertices->vertices = new rk_ubyte[nvertices * vertex_size];
|
||||||
memcpy(vertices->vertices, _vertices, nvertices * vertex_size);
|
memcpy(vertices->vertices, _vertices, nvertices * vertex_size);
|
||||||
vertices->indices = new rk_vertex_index[nindices];
|
vertices->indices = new rk_ushort[nindices];
|
||||||
memcpy(vertices->indices, indices, nindices * sizeof(rk_vertex_index));
|
memcpy(vertices->indices, indices, nindices * sizeof(rk_ushort));
|
||||||
vertices->meshes = new rk_mesh[nmeshes];
|
vertices->meshes = new rk_mesh[nmeshes];
|
||||||
memcpy(vertices->meshes, meshes, nmeshes * sizeof(rk_mesh));
|
memcpy(vertices->meshes, meshes, nmeshes * sizeof(rk_mesh));
|
||||||
vertices->vertices_buffer = 0;
|
vertices->vertices_buffer = 0;
|
||||||
@ -377,7 +383,7 @@ static void rk_buckets_alloc(
|
|||||||
for (unsigned index = 0; index < count; ++index) {
|
for (unsigned index = 0; index < count; ++index) {
|
||||||
rk_bucket & bucket = rk_buckets[index];
|
rk_bucket & bucket = rk_buckets[index];
|
||||||
bucket.size = size;
|
bucket.size = size;
|
||||||
bucket.indices = reinterpret_cast<rk_instance_index *>(malloc(size * sizeof(rk_instance_index)));
|
bucket.indices = reinterpret_cast<rk_ushort *>(malloc(size * sizeof(rk_ushort)));
|
||||||
}
|
}
|
||||||
reallocated = true;
|
reallocated = true;
|
||||||
}
|
}
|
||||||
@ -386,8 +392,7 @@ static void rk_buckets_alloc(
|
|||||||
rk_bucket & bucket = rk_buckets[index];
|
rk_bucket & bucket = rk_buckets[index];
|
||||||
if (bucket.size < size) {
|
if (bucket.size < size) {
|
||||||
bucket.size = size;
|
bucket.size = size;
|
||||||
bucket.indices = reinterpret_cast<rk_instance_index *>(
|
bucket.indices = reinterpret_cast<rk_ushort *>(realloc(bucket.indices, size * sizeof(rk_ushort)));
|
||||||
realloc(bucket.indices, size * sizeof(rk_instance_index)));
|
|
||||||
reallocated = true;
|
reallocated = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -398,15 +403,13 @@ static void rk_buckets_alloc(
|
|||||||
rk_bucket & bucket = rk_buckets[index];
|
rk_bucket & bucket = rk_buckets[index];
|
||||||
if (bucket.size < size) {
|
if (bucket.size < size) {
|
||||||
bucket.size = size;
|
bucket.size = size;
|
||||||
bucket.indices = reinterpret_cast<rk_instance_index *>(
|
bucket.indices = reinterpret_cast<rk_ushort *>(realloc(bucket.indices, size * sizeof(rk_ushort)));
|
||||||
realloc(bucket.indices, size * sizeof(rk_instance_index)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (unsigned index = rk_nbuckets; index < count; ++index) {
|
for (unsigned index = rk_nbuckets; index < count; ++index) {
|
||||||
rk_bucket & bucket = rk_buckets[index];
|
rk_bucket & bucket = rk_buckets[index];
|
||||||
bucket.size = size;
|
bucket.size = size;
|
||||||
bucket.indices = reinterpret_cast<rk_instance_index *>(
|
bucket.indices = reinterpret_cast<rk_ushort *>(malloc(size * sizeof(rk_ushort)));
|
||||||
malloc(size * sizeof(rk_instance_index)));
|
|
||||||
}
|
}
|
||||||
rk_nbuckets = count;
|
rk_nbuckets = count;
|
||||||
reallocated = true;
|
reallocated = true;
|
||||||
@ -415,7 +418,7 @@ static void rk_buckets_alloc(
|
|||||||
unsigned total_size = rk_nbuckets * sizeof(rk_bucket);
|
unsigned total_size = rk_nbuckets * sizeof(rk_bucket);
|
||||||
for (unsigned index = 0; index < rk_nbuckets; ++index) {
|
for (unsigned index = 0; index < rk_nbuckets; ++index) {
|
||||||
rk_bucket const & bucket = rk_buckets[index];
|
rk_bucket const & bucket = rk_buckets[index];
|
||||||
total_size += bucket.size * sizeof(rk_instance_index);
|
total_size += bucket.size * sizeof(rk_ushort);
|
||||||
}
|
}
|
||||||
printf("[RK] rk_buckets_alloc() -> %d KiB\n", total_size / 1024);
|
printf("[RK] rk_buckets_alloc() -> %d KiB\n", total_size / 1024);
|
||||||
}
|
}
|
||||||
@ -423,26 +426,26 @@ static void rk_buckets_alloc(
|
|||||||
|
|
||||||
static void rk_pack_vec3_float(
|
static void rk_pack_vec3_float(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_vec3_float * __restrict dst = reinterpret_cast<rk_vec3_float *>(_dst);
|
rk_vec3_float * __restrict dst = reinterpret_cast<rk_vec3_float *>(_dst);
|
||||||
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
*dst = src[*index];
|
*dst = src[*index];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rk_pack_vec3_short(
|
static void rk_pack_vec3_short(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_vec3_short * __restrict dst = reinterpret_cast<rk_vec3_short *>(_dst);
|
rk_vec3_short * __restrict dst = reinterpret_cast<rk_vec3_short *>(_dst);
|
||||||
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
rk_vec3_float const & input = src[*index];
|
rk_vec3_float const & input = src[*index];
|
||||||
dst->x = static_cast<rk_short>(input.x);
|
dst->x = static_cast<rk_short>(input.x);
|
||||||
dst->y = static_cast<rk_short>(input.y);
|
dst->y = static_cast<rk_short>(input.y);
|
||||||
@ -453,14 +456,14 @@ static void rk_pack_vec3_short(
|
|||||||
|
|
||||||
static void rk_pack_vec3_short_norm(
|
static void rk_pack_vec3_short_norm(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_vec3_short * __restrict dst = reinterpret_cast<rk_vec3_short *>(_dst);
|
rk_vec3_short * __restrict dst = reinterpret_cast<rk_vec3_short *>(_dst);
|
||||||
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
||||||
#define _convert(s) (static_cast<rk_short>((s) * ((s) < 0.f ? 32768.f : 32767.f)))
|
#define _convert(s) (static_cast<rk_short>((s) * ((s) < 0.f ? 32768.f : 32767.f)))
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
rk_vec3_float const & input = src[*index];
|
rk_vec3_float const & input = src[*index];
|
||||||
dst->x = _convert(input.x);
|
dst->x = _convert(input.x);
|
||||||
dst->y = _convert(input.y);
|
dst->y = _convert(input.y);
|
||||||
@ -472,14 +475,14 @@ static void rk_pack_vec3_short_norm(
|
|||||||
|
|
||||||
static void rk_pack_vec3_int10(
|
static void rk_pack_vec3_int10(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_vec3_int10 * __restrict dst = reinterpret_cast<rk_vec3_int10 *>(_dst);
|
rk_vec3_int10 * __restrict dst = reinterpret_cast<rk_vec3_int10 *>(_dst);
|
||||||
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
||||||
#define _convert(s) (static_cast<rk_int>((s)) & 1023)
|
#define _convert(s) (static_cast<rk_int>((s)) & 1023)
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
rk_vec3_float const & input = src[*index];
|
rk_vec3_float const & input = src[*index];
|
||||||
*dst = _convert(input.x) | (_convert(input.y) << 10) | (_convert(input.z) << 20);
|
*dst = _convert(input.x) | (_convert(input.y) << 10) | (_convert(input.z) << 20);
|
||||||
}
|
}
|
||||||
@ -488,14 +491,14 @@ static void rk_pack_vec3_int10(
|
|||||||
|
|
||||||
static void rk_pack_vec3_int10_norm(
|
static void rk_pack_vec3_int10_norm(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_vec3_int10 * __restrict dst = reinterpret_cast<rk_vec3_int10 *>(_dst);
|
rk_vec3_int10 * __restrict dst = reinterpret_cast<rk_vec3_int10 *>(_dst);
|
||||||
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
rk_vec3_float const * const __restrict src = reinterpret_cast<rk_vec3_float const *>(_src);
|
||||||
#define _convert(s) (static_cast<rk_int>((s) * ((s) < 0.f ? 512.f : 511.f)) & 1023)
|
#define _convert(s) (static_cast<rk_int>((s) * ((s) < 0.f ? 512.f : 511.f)) & 1023)
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
rk_vec3_float const & input = src[*index];
|
rk_vec3_float const & input = src[*index];
|
||||||
*dst = _convert(input.x) | (_convert(input.y) << 10) | (_convert(input.z) << 20);
|
*dst = _convert(input.x) | (_convert(input.y) << 10) | (_convert(input.z) << 20);
|
||||||
}
|
}
|
||||||
@ -504,13 +507,13 @@ static void rk_pack_vec3_int10_norm(
|
|||||||
|
|
||||||
static void rk_pack_mat3_float(
|
static void rk_pack_mat3_float(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_mat3_float * __restrict dst = reinterpret_cast<rk_mat3_float *>(_dst);
|
rk_mat3_float * __restrict dst = reinterpret_cast<rk_mat3_float *>(_dst);
|
||||||
rk_mat3_float const * const __restrict src = reinterpret_cast<rk_mat3_float const *>(_src);
|
rk_mat3_float const * const __restrict src = reinterpret_cast<rk_mat3_float const *>(_src);
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
*dst = src[*index];
|
*dst = src[*index];
|
||||||
}
|
}
|
||||||
#undef _convert
|
#undef _convert
|
||||||
@ -518,14 +521,14 @@ static void rk_pack_mat3_float(
|
|||||||
|
|
||||||
static void rk_pack_mat3_int10(
|
static void rk_pack_mat3_int10(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_mat3_int10 * __restrict dst = reinterpret_cast<rk_mat3_int10 *>(_dst);
|
rk_mat3_int10 * __restrict dst = reinterpret_cast<rk_mat3_int10 *>(_dst);
|
||||||
rk_mat3_float const * const __restrict src = reinterpret_cast<rk_mat3_float const *>(_src);
|
rk_mat3_float const * const __restrict src = reinterpret_cast<rk_mat3_float const *>(_src);
|
||||||
#define _convert(s) (static_cast<rk_int>((s)) & 1023)
|
#define _convert(s) (static_cast<rk_int>((s)) & 1023)
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
rk_mat3_float const & input = src[*index];
|
rk_mat3_float const & input = src[*index];
|
||||||
dst->x = _convert(input.x.x) | (_convert(input.x.y) << 10) | (_convert(input.x.z) << 20);
|
dst->x = _convert(input.x.x) | (_convert(input.x.y) << 10) | (_convert(input.x.z) << 20);
|
||||||
dst->y = _convert(input.y.x) | (_convert(input.y.y) << 10) | (_convert(input.y.z) << 20);
|
dst->y = _convert(input.y.x) | (_convert(input.y.y) << 10) | (_convert(input.y.z) << 20);
|
||||||
@ -536,14 +539,14 @@ static void rk_pack_mat3_int10(
|
|||||||
|
|
||||||
static void rk_pack_mat3_int10_norm(
|
static void rk_pack_mat3_int10_norm(
|
||||||
unsigned const count,
|
unsigned const count,
|
||||||
rk_instance_index const * const __restrict indices,
|
rk_ushort const * const __restrict indices,
|
||||||
rk_param_output * __restrict _dst,
|
rk_ubyte * __restrict _dst,
|
||||||
rk_param_input const * const __restrict _src) {
|
rk_ubyte const * const __restrict _src) {
|
||||||
rk_instance_index const * const last_index = indices + count;
|
rk_ushort const * const last_index = indices + count;
|
||||||
rk_mat3_int10 * __restrict dst = reinterpret_cast<rk_mat3_int10 *>(_dst);
|
rk_mat3_int10 * __restrict dst = reinterpret_cast<rk_mat3_int10 *>(_dst);
|
||||||
rk_mat3_float const * const __restrict src = reinterpret_cast<rk_mat3_float const *>(_src);
|
rk_mat3_float const * const __restrict src = reinterpret_cast<rk_mat3_float const *>(_src);
|
||||||
#define _convert(s) (static_cast<rk_int>((s) * ((s) < 0.f ? 512.f : 511.f)) & 1023)
|
#define _convert(s) (static_cast<rk_int>((s) * ((s) < 0.f ? 512.f : 511.f)) & 1023)
|
||||||
for (rk_instance_index const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
for (rk_ushort const * __restrict index = indices; index < last_index; ++index, ++dst) {
|
||||||
rk_mat3_float const & input = src[*index];
|
rk_mat3_float const & input = src[*index];
|
||||||
dst->x = _convert(input.x.x) | (_convert(input.x.y) << 10) | (_convert(input.x.z) << 20);
|
dst->x = _convert(input.x.x) | (_convert(input.x.y) << 10) | (_convert(input.x.z) << 20);
|
||||||
dst->y = _convert(input.y.x) | (_convert(input.y.y) << 10) | (_convert(input.y.z) << 20);
|
dst->y = _convert(input.y.x) | (_convert(input.y.y) << 10) | (_convert(input.y.z) << 20);
|
||||||
@ -611,11 +614,9 @@ rk_batch_t rk_create_batch(
|
|||||||
batch->nparams = nparams;
|
batch->nparams = nparams;
|
||||||
batch->vertices = vertices;
|
batch->vertices = vertices;
|
||||||
batch->flags = new rk_instance_flags[max_size];
|
batch->flags = new rk_instance_flags[max_size];
|
||||||
memset(batch->flags, 0xFF, max_size * sizeof(rk_instance_flags));
|
batch->meshes = new rk_ushort[max_size];
|
||||||
batch->meshes = new rk_mesh_index[max_size];
|
batch->indices = new rk_ushort[max_size];
|
||||||
memset(batch->meshes, 0xFF, max_size * sizeof(rk_mesh_index));
|
memset(batch->indices, 0xFF, max_size * sizeof(rk_ushort));
|
||||||
batch->indices = new rk_instance_index[max_size];
|
|
||||||
memset(batch->indices, 0, max_size * sizeof(rk_instance_index));
|
|
||||||
batch->commands = new rk_command[vertices->nmeshes];
|
batch->commands = new rk_command[vertices->nmeshes];
|
||||||
memset(batch->commands, 0, vertices->nmeshes * sizeof(rk_command));
|
memset(batch->commands, 0, vertices->nmeshes * sizeof(rk_command));
|
||||||
if (nparams) {
|
if (nparams) {
|
||||||
@ -636,8 +637,7 @@ rk_batch_t rk_create_batch(
|
|||||||
} else {
|
} else {
|
||||||
glGenBuffers(1, &vertices->indices_buffer);
|
glGenBuffers(1, &vertices->indices_buffer);
|
||||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vertices->indices_buffer);
|
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vertices->indices_buffer);
|
||||||
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
|
glBufferData(GL_ELEMENT_ARRAY_BUFFER, vertices->nindices * sizeof(rk_ushort), vertices->indices, GL_STATIC_DRAW);
|
||||||
vertices->nindices * sizeof(rk_vertex_index), vertices->indices, GL_STATIC_DRAW);
|
|
||||||
}
|
}
|
||||||
if (rk_MultiDrawElementsIndirect) {
|
if (rk_MultiDrawElementsIndirect) {
|
||||||
glGenBuffers(1, &batch->commands_buffer);
|
glGenBuffers(1, &batch->commands_buffer);
|
||||||
@ -747,10 +747,7 @@ rk_batch_t rk_create_batch(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
glVertexBindingDivisor(binding, 1);
|
glVertexBindingDivisor(binding, 1);
|
||||||
param->src_len = param->src_size / sizeof(rk_param_input);
|
param->source = new rk_ubyte[max_size * param->src_size];
|
||||||
param->dst_len = param->dst_size / sizeof(rk_param_output);
|
|
||||||
param->source = new rk_param_input[max_size * param->src_len];
|
|
||||||
memset(param->source, 0xFF, max_size * param->src_size);
|
|
||||||
offset += max_size * param->dst_size;
|
offset += max_size * param->dst_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -766,20 +763,20 @@ static void rk_sort_batch(
|
|||||||
bucket->count = 0;
|
bucket->count = 0;
|
||||||
}
|
}
|
||||||
rk_instance_flags const * __restrict flags = batch.flags;
|
rk_instance_flags const * __restrict flags = batch.flags;
|
||||||
rk_mesh_index const * __restrict mesh_index = batch.meshes;
|
rk_ushort const * __restrict mesh_index = batch.meshes;
|
||||||
for (unsigned index = 0; index < batch.count; ++index, ++flags, ++mesh_index) {
|
for (unsigned index = 0; index < batch.count; ++index, ++flags, ++mesh_index) {
|
||||||
if ((*flags & RK_INSTANCE_FLAGS_SPAWNED_VISIBLE) == RK_INSTANCE_FLAGS_SPAWNED_VISIBLE) {
|
if ((*flags & RK_INSTANCE_FLAGS_SPAWNED_VISIBLE) == RK_INSTANCE_FLAGS_SPAWNED_VISIBLE) {
|
||||||
rk_bucket & __restrict bucket = rk_buckets[*mesh_index];
|
rk_bucket & __restrict bucket = rk_buckets[*mesh_index];
|
||||||
bucket.indices[bucket.count++] = index;
|
bucket.indices[bucket.count++] = index;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rk_instance_index * __restrict indices = batch.indices;
|
rk_ushort * __restrict indices = batch.indices;
|
||||||
rk_command * __restrict command = batch.commands;
|
rk_command * __restrict command = batch.commands;
|
||||||
rk_mesh const * __restrict mesh = batch.vertices->meshes;
|
rk_mesh const * __restrict mesh = batch.vertices->meshes;
|
||||||
for (rk_bucket const * __restrict bucket = rk_buckets; bucket < last_bucket; ++bucket, ++mesh) {
|
for (rk_bucket const * __restrict bucket = rk_buckets; bucket < last_bucket; ++bucket, ++mesh) {
|
||||||
if (bucket->count) {
|
if (bucket->count) {
|
||||||
memcpy(indices, bucket->indices, bucket->count * sizeof(rk_instance_index));
|
memcpy(indices, bucket->indices, bucket->count * sizeof(rk_ushort));
|
||||||
command->nvertices = mesh->ntriangles * 3;
|
command->nvertices = static_cast<GLuint>(mesh->ntriangles) * 3;
|
||||||
command->ninstances = bucket->count;
|
command->ninstances = bucket->count;
|
||||||
command->base_index = mesh->base_index;
|
command->base_index = mesh->base_index;
|
||||||
command->base_instance = indices - batch.indices;
|
command->base_instance = indices - batch.indices;
|
||||||
@ -803,7 +800,7 @@ static void rk_pack_batch(
|
|||||||
if (param->dirty) {
|
if (param->dirty) {
|
||||||
param->dirty = false;
|
param->dirty = false;
|
||||||
if (batch.ninstances) {
|
if (batch.ninstances) {
|
||||||
rk_param_output * const dst = reinterpret_cast<rk_param_output *>(
|
rk_ubyte * const dst = reinterpret_cast<rk_ubyte *>(
|
||||||
glMapBufferRange(GL_ARRAY_BUFFER, param->offset, batch.ninstances * param->dst_size,
|
glMapBufferRange(GL_ARRAY_BUFFER, param->offset, batch.ninstances * param->dst_size,
|
||||||
GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT));
|
GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT));
|
||||||
if (dst) {
|
if (dst) {
|
||||||
@ -822,8 +819,8 @@ void rk_fill_batch(
|
|||||||
rk_batch_t _batch,
|
rk_batch_t _batch,
|
||||||
rk_uint count,
|
rk_uint count,
|
||||||
rk_instance_flags const * flags,
|
rk_instance_flags const * flags,
|
||||||
rk_mesh_index const * meshes,
|
rk_ushort const * meshes,
|
||||||
rk_param_input const * const * params) {
|
rk_ubyte const * const * params) {
|
||||||
rk_batch const * const batch = reinterpret_cast<rk_batch const *>(_batch);
|
rk_batch const * const batch = reinterpret_cast<rk_batch const *>(_batch);
|
||||||
if (!batch || !count || count > batch->max_size) {
|
if (!batch || !count || count > batch->max_size) {
|
||||||
rk_printf("rk_fill_batch(): invalid params.");
|
rk_printf("rk_fill_batch(): invalid params.");
|
||||||
@ -834,7 +831,7 @@ void rk_fill_batch(
|
|||||||
if (batch->nparams) {
|
if (batch->nparams) {
|
||||||
got_all_params = (params != nullptr);
|
got_all_params = (params != nullptr);
|
||||||
if (params) {
|
if (params) {
|
||||||
for (rk_param_input const * const * param = params; param < params + batch->nparams; ++param) {
|
for (rk_ubyte const * const * param = params; param < params + batch->nparams; ++param) {
|
||||||
bool const got_param = (*param != nullptr);
|
bool const got_param = (*param != nullptr);
|
||||||
got_any_params |= got_param;
|
got_any_params |= got_param;
|
||||||
got_all_params &= got_param;
|
got_all_params &= got_param;
|
||||||
@ -852,20 +849,23 @@ void rk_fill_batch(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
batch->count = count;
|
batch->count = count;
|
||||||
bool const cmp_flags = (flags && rk_cmp_memcpy(batch->flags, flags, batch->count));
|
bool const cmp_flags = (flags &&
|
||||||
bool const cmp_meshes = (meshes && rk_cmp_memcpy(batch->meshes, meshes, batch->count));
|
rk_cmp_memcpy<sizeof(rk_ubyte)>(batch->flags, flags, batch->count * sizeof(rk_instance_flags)));
|
||||||
|
bool const cmp_meshes = (meshes &&
|
||||||
|
rk_cmp_memcpy<sizeof(rk_ushort)>(batch->meshes, meshes, batch->count * sizeof(rk_mesh)));
|
||||||
bool const need_sorting = (cmp_flags || cmp_meshes || resized);
|
bool const need_sorting = (cmp_flags || cmp_meshes || resized);
|
||||||
if (batch->nparams) {
|
if (batch->nparams) {
|
||||||
rk_parameter const * const last_param = batch->params + batch->nparams;
|
rk_parameter const * const last_param = batch->params + batch->nparams;
|
||||||
if (got_any_params) {
|
if (got_any_params) {
|
||||||
rk_param_input const * const * src = params;
|
rk_ubyte const * const * src = params;
|
||||||
for (rk_parameter const * param = batch->params; param < last_param; ++param, ++src) {
|
for (rk_parameter const * dst = batch->params; dst < last_param; ++dst, ++src) {
|
||||||
param->dirty =
|
dst->dirty = ((*src &&
|
||||||
((*src && rk_cmp_memcpy(param->source, *src, batch->count * param->src_len)) || need_sorting);
|
rk_cmp_memcpy<sizeof(rk_uint)>(dst->source, *src, batch->count * dst->src_size))
|
||||||
|
|| need_sorting);
|
||||||
}
|
}
|
||||||
} else if (need_sorting) {
|
} else if (need_sorting) {
|
||||||
for (rk_parameter const * param = batch->params; param < last_param; ++param) {
|
for (rk_parameter const * dst = batch->params; dst < last_param; ++dst) {
|
||||||
param->dirty = true;
|
dst->dirty = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
#define _RK_ENGINE_RENDER_OPENGLES_H
|
#define _RK_ENGINE_RENDER_OPENGLES_H
|
||||||
|
|
||||||
#include "../types.hpp"
|
#include "../types.hpp"
|
||||||
#include "../math.hpp"
|
|
||||||
#include <GLES3/gl32.h>
|
#include <GLES3/gl32.h>
|
||||||
#include <GLES3/gl3ext.h>
|
#include <GLES3/gl3ext.h>
|
||||||
#include <GLES3/gl3platform.h>
|
#include <GLES3/gl3platform.h>
|
||||||
@ -45,7 +44,7 @@ struct rk_vertices {
|
|||||||
unsigned nmeshes;
|
unsigned nmeshes;
|
||||||
rk_vertex_format * format;
|
rk_vertex_format * format;
|
||||||
rk_ubyte * vertices;
|
rk_ubyte * vertices;
|
||||||
rk_vertex_index * indices;
|
rk_ushort * indices;
|
||||||
rk_mesh * meshes;
|
rk_mesh * meshes;
|
||||||
GLuint vertices_buffer;
|
GLuint vertices_buffer;
|
||||||
GLuint indices_buffer;
|
GLuint indices_buffer;
|
||||||
@ -59,19 +58,12 @@ struct rk_command {
|
|||||||
GLuint base_instance;
|
GLuint base_instance;
|
||||||
};
|
};
|
||||||
|
|
||||||
// param output types must be size compatible with an array of rk_param_output
|
|
||||||
typedef rk_uint rk_param_output;
|
|
||||||
|
|
||||||
#define RK_CHECK_PARAM_OUTPUT_TYPE(_t) static_assert(!(sizeof(_t) % sizeof(rk_param_output)))
|
|
||||||
|
|
||||||
struct rk_vec3_float {
|
struct rk_vec3_float {
|
||||||
float x;
|
float x;
|
||||||
float y;
|
float y;
|
||||||
float z;
|
float z;
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(sizeof(rk_vec3_float) == sizeof(rk_vec3));
|
|
||||||
|
|
||||||
struct rk_vec3_short {
|
struct rk_vec3_short {
|
||||||
rk_short x;
|
rk_short x;
|
||||||
rk_short y;
|
rk_short y;
|
||||||
@ -88,29 +80,17 @@ struct rk_mat3_float {
|
|||||||
rk_vec3_float z;
|
rk_vec3_float z;
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(sizeof(rk_mat3_float) == sizeof(rk_mat3));
|
|
||||||
|
|
||||||
struct rk_mat3_int10 {
|
struct rk_mat3_int10 {
|
||||||
rk_vec3_int10 x;
|
rk_vec3_int10 x;
|
||||||
rk_vec3_int10 y;
|
rk_vec3_int10 y;
|
||||||
rk_vec3_int10 z;
|
rk_vec3_int10 z;
|
||||||
};
|
};
|
||||||
|
|
||||||
RK_CHECK_PARAM_INPUT_TYPE(rk_vec3_float);
|
|
||||||
RK_CHECK_PARAM_INPUT_TYPE(rk_mat3_float);
|
|
||||||
|
|
||||||
RK_CHECK_PARAM_OUTPUT_TYPE(rk_vec3_float);
|
|
||||||
RK_CHECK_PARAM_OUTPUT_TYPE(rk_vec3_short);
|
|
||||||
RK_CHECK_PARAM_OUTPUT_TYPE(rk_vec3_int10);
|
|
||||||
RK_CHECK_PARAM_OUTPUT_TYPE(rk_vec3_uint10);
|
|
||||||
RK_CHECK_PARAM_OUTPUT_TYPE(rk_mat3_float);
|
|
||||||
RK_CHECK_PARAM_OUTPUT_TYPE(rk_mat3_int10);
|
|
||||||
|
|
||||||
typedef void (*rk_packer)(
|
typedef void (*rk_packer)(
|
||||||
unsigned const, // count
|
unsigned const, // count
|
||||||
rk_instance_index const * const, // indices
|
rk_ushort const * const, // indices
|
||||||
rk_param_output *, // dst
|
rk_ubyte *, // dst
|
||||||
rk_param_input const * const); // src
|
rk_ubyte const * const); // src
|
||||||
|
|
||||||
struct rk_parameter {
|
struct rk_parameter {
|
||||||
mutable bool dirty;
|
mutable bool dirty;
|
||||||
@ -118,18 +98,10 @@ struct rk_parameter {
|
|||||||
unsigned offset;
|
unsigned offset;
|
||||||
unsigned src_size;
|
unsigned src_size;
|
||||||
unsigned dst_size;
|
unsigned dst_size;
|
||||||
unsigned src_len;
|
rk_ubyte * source;
|
||||||
unsigned dst_len;
|
|
||||||
rk_param_input * source;
|
|
||||||
rk_packer packer;
|
rk_packer packer;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rk_bucket {
|
|
||||||
unsigned size;
|
|
||||||
unsigned count;
|
|
||||||
rk_instance_index * indices;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum rk_batch_state {
|
enum rk_batch_state {
|
||||||
RK_BATCH_STATE_EMPTY = 0,
|
RK_BATCH_STATE_EMPTY = 0,
|
||||||
RK_BATCH_STATE_FILLED = 1,
|
RK_BATCH_STATE_FILLED = 1,
|
||||||
@ -146,8 +118,8 @@ struct rk_batch {
|
|||||||
unsigned nparams;
|
unsigned nparams;
|
||||||
rk_vertices const * vertices;
|
rk_vertices const * vertices;
|
||||||
rk_instance_flags * flags;
|
rk_instance_flags * flags;
|
||||||
rk_mesh_index * meshes;
|
rk_ushort * meshes;
|
||||||
rk_instance_index * indices;
|
rk_ushort * indices;
|
||||||
rk_command * commands;
|
rk_command * commands;
|
||||||
rk_parameter * params;
|
rk_parameter * params;
|
||||||
GLuint vertex_array;
|
GLuint vertex_array;
|
||||||
|
Loading…
Reference in New Issue
Block a user