1 | #if defined(SOKOL_IMPL) && !defined(SOKOL_GFX_IMPL) |
2 | #define SOKOL_GFX_IMPL |
3 | #endif |
4 | #ifndef SOKOL_GFX_INCLUDED |
5 | /* |
6 | sokol_gfx.h -- simple 3D API wrapper |
7 | |
8 | Project URL: https://github.com/floooh/sokol |
9 | |
10 | Example code: https://github.com/floooh/sokol-samples |
11 | |
12 | Do this: |
13 | #define SOKOL_IMPL or |
14 | #define SOKOL_GFX_IMPL |
15 | before you include this file in *one* C or C++ file to create the |
16 | implementation. |
17 | |
18 | In the same place define one of the following to select the rendering |
19 | backend: |
20 | #define SOKOL_GLCORE33 |
21 | #define SOKOL_GLES2 |
22 | #define SOKOL_GLES3 |
23 | #define SOKOL_D3D11 |
24 | #define SOKOL_METAL |
25 | #define SOKOL_WGPU |
26 | #define SOKOL_DUMMY_BACKEND |
27 | |
28 | I.e. for the GL 3.3 Core Profile it should look like this: |
29 | |
30 | #include ... |
31 | #include ... |
32 | #define SOKOL_IMPL |
33 | #define SOKOL_GLCORE33 |
34 | #include "sokol_gfx.h" |
35 | |
36 | The dummy backend replaces the platform-specific backend code with empty |
37 | stub functions. This is useful for writing tests that need to run on the |
38 | command line. |
39 | |
40 | Optionally provide the following defines with your own implementations: |
41 | |
42 | SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) |
43 | SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false)) |
44 | SOKOL_GFX_API_DECL - public function declaration prefix (default: extern) |
45 | SOKOL_API_DECL - same as SOKOL_GFX_API_DECL |
46 | SOKOL_API_IMPL - public function implementation prefix (default: -) |
47 | SOKOL_TRACE_HOOKS - enable trace hook callbacks (search below for TRACE HOOKS) |
48 | SOKOL_EXTERNAL_GL_LOADER - indicates that you're using your own GL loader, in this case |
49 | sokol_gfx.h will not include any platform GL headers and disable |
50 | the integrated Win32 GL loader |
51 | |
52 | If sokol_gfx.h is compiled as a DLL, define the following before |
53 | including the declaration or implementation: |
54 | |
55 | SOKOL_DLL |
56 | |
57 | On Windows, SOKOL_DLL will define SOKOL_GFX_API_DECL as __declspec(dllexport) |
58 | or __declspec(dllimport) as needed. |
59 | |
60 | If you want to compile without deprecated structs and functions, |
61 | define: |
62 | |
63 | SOKOL_NO_DEPRECATED |
64 | |
65 | Optionally define the following to force debug checks and validations |
66 | even in release mode: |
67 | |
68 | SOKOL_DEBUG - by default this is defined if _DEBUG is defined |
69 | |
70 | sokol_gfx DOES NOT: |
71 | =================== |
72 | - create a window or the 3D-API context/device, you must do this |
73 | before sokol_gfx is initialized, and pass any required information |
74 | (like 3D device pointers) to the sokol_gfx initialization call |
75 | |
76 | - present the rendered frame, how this is done exactly usually depends |
77 | on how the window and 3D-API context/device was created |
78 | |
79 | - provide a unified shader language, instead 3D-API-specific shader |
80 | source-code or shader-bytecode must be provided (for the "official" |
81 | offline shader cross-compiler, see here: |
82 | https://github.com/floooh/sokol-tools/blob/master/docs/sokol-shdc.md) |
83 | |
84 | |
85 | STEP BY STEP |
86 | ============ |
87 | --- to initialize sokol_gfx, after creating a window and a 3D-API |
88 | context/device, call: |
89 | |
90 | sg_setup(const sg_desc*) |
91 | |
92 | --- create resource objects (at least buffers, shaders and pipelines, |
93 | and optionally images and passes): |
94 | |
95 | sg_buffer sg_make_buffer(const sg_buffer_desc*) |
96 | sg_image sg_make_image(const sg_image_desc*) |
97 | sg_shader sg_make_shader(const sg_shader_desc*) |
98 | sg_pipeline sg_make_pipeline(const sg_pipeline_desc*) |
99 | sg_pass sg_make_pass(const sg_pass_desc*) |
100 | |
101 | --- start rendering to the default frame buffer with: |
102 | |
103 | sg_begin_default_pass(const sg_pass_action* action, int width, int height) |
104 | |
105 | ...or alternatively with: |
106 | |
107 | sg_begin_default_passf(const sg_pass_action* action, float width, float height) |
108 | |
109 | ...which takes the framebuffer width and height as float values. |
110 | |
111 | --- or start rendering to an offscreen framebuffer with: |
112 | |
113 | sg_begin_pass(sg_pass pass, const sg_pass_action* action) |
114 | |
115 | --- set the pipeline state for the next draw call with: |
116 | |
117 | sg_apply_pipeline(sg_pipeline pip) |
118 | |
119 | --- fill an sg_bindings struct with the resource bindings for the next |
120 | draw call (1..N vertex buffers, 0 or 1 index buffer, 0..N image objects |
121 | to use as textures each on the vertex-shader- and fragment-shader-stage |
122 | and then call |
123 | |
124 | sg_apply_bindings(const sg_bindings* bindings) |
125 | |
126 | to update the resource bindings |
127 | |
128 | --- optionally update shader uniform data with: |
129 | |
130 | sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data) |
131 | |
132 | Read the section 'UNIFORM DATA LAYOUT' to learn about the expected memory layout |
133 | of the uniform data passed into sg_apply_uniforms(). |
134 | |
135 | --- kick off a draw call with: |
136 | |
137 | sg_draw(int base_element, int num_elements, int num_instances) |
138 | |
139 | The sg_draw() function unifies all the different ways to render primitives |
140 | in a single call (indexed vs non-indexed rendering, and instanced vs non-instanced |
141 | rendering). In case of indexed rendering, base_element and num_element specify |
142 | indices in the currently bound index buffer. In case of non-indexed rendering |
143 | base_element and num_elements specify vertices in the currently bound |
144 | vertex-buffer(s). To perform instanced rendering, the rendering pipeline |
145 | must be setup for instancing (see sg_pipeline_desc below), a separate vertex buffer |
146 | containing per-instance data must be bound, and the num_instances parameter |
147 | must be > 1. |
148 | |
149 | --- finish the current rendering pass with: |
150 | |
151 | sg_end_pass() |
152 | |
153 | --- when done with the current frame, call |
154 | |
155 | sg_commit() |
156 | |
157 | --- at the end of your program, shutdown sokol_gfx with: |
158 | |
159 | sg_shutdown() |
160 | |
161 | --- if you need to destroy resources before sg_shutdown(), call: |
162 | |
163 | sg_destroy_buffer(sg_buffer buf) |
164 | sg_destroy_image(sg_image img) |
165 | sg_destroy_shader(sg_shader shd) |
166 | sg_destroy_pipeline(sg_pipeline pip) |
167 | sg_destroy_pass(sg_pass pass) |
168 | |
169 | --- to set a new viewport rectangle, call |
170 | |
171 | sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) |
172 | |
173 | ...or if you want to specifiy the viewport rectangle with float values: |
174 | |
175 | sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left) |
176 | |
177 | --- to set a new scissor rect, call: |
178 | |
179 | sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) |
180 | |
181 | ...or with float values: |
182 | |
183 | sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left) |
184 | |
185 | Both sg_apply_viewport() and sg_apply_scissor_rect() must be called |
186 | inside a rendering pass |
187 | |
188 | Note that sg_begin_default_pass() and sg_begin_pass() will reset both the |
189 | viewport and scissor rectangles to cover the entire framebuffer. |
190 | |
191 | --- to update (overwrite) the content of buffer and image resources, call: |
192 | |
193 | sg_update_buffer(sg_buffer buf, const sg_range* data) |
194 | sg_update_image(sg_image img, const sg_image_data* data) |
195 | |
196 | Buffers and images to be updated must have been created with |
197 | SG_USAGE_DYNAMIC or SG_USAGE_STREAM |
198 | |
199 | Only one update per frame is allowed for buffer and image resources when |
200 | using the sg_update_*() functions. The rationale is to have a simple |
201 | countermeasure to avoid the CPU scribbling over data the GPU is currently |
202 | using, or the CPU having to wait for the GPU |
203 | |
204 | Buffer and image updates can be partial, as long as a rendering |
205 | operation only references the valid (updated) data in the |
206 | buffer or image. |
207 | |
208 | --- to append a chunk of data to a buffer resource, call: |
209 | |
210 | int sg_append_buffer(sg_buffer buf, const sg_range* data) |
211 | |
212 | The difference to sg_update_buffer() is that sg_append_buffer() |
213 | can be called multiple times per frame to append new data to the |
214 | buffer piece by piece, optionally interleaved with draw calls referencing |
215 | the previously written data. |
216 | |
217 | sg_append_buffer() returns a byte offset to the start of the |
218 | written data, this offset can be assigned to |
219 | sg_bindings.vertex_buffer_offsets[n] or |
220 | sg_bindings.index_buffer_offset |
221 | |
222 | Code example: |
223 | |
224 | for (...) { |
225 | const void* data = ...; |
226 | const int num_bytes = ...; |
227 | int offset = sg_append_buffer(buf, &(sg_range) { .ptr=data, .size=num_bytes }); |
228 | bindings.vertex_buffer_offsets[0] = offset; |
229 | sg_apply_pipeline(pip); |
230 | sg_apply_bindings(&bindings); |
231 | sg_apply_uniforms(...); |
232 | sg_draw(...); |
233 | } |
234 | |
235 | A buffer to be used with sg_append_buffer() must have been created |
236 | with SG_USAGE_DYNAMIC or SG_USAGE_STREAM. |
237 | |
238 | If the application appends more data to the buffer then fits into |
239 | the buffer, the buffer will go into the "overflow" state for the |
240 | rest of the frame. |
241 | |
242 | Any draw calls attempting to render an overflown buffer will be |
243 | silently dropped (in debug mode this will also result in a |
244 | validation error). |
245 | |
246 | You can also check manually if a buffer is in overflow-state by calling |
247 | |
248 | bool sg_query_buffer_overflow(sg_buffer buf) |
249 | |
250 | You can manually check to see if an overflow would occur before adding |
251 | any data to a buffer by calling |
252 | |
253 | bool sg_query_buffer_will_overflow(sg_buffer buf, size_t size) |
254 | |
255 | NOTE: Due to restrictions in underlying 3D-APIs, appended chunks of |
256 | data will be 4-byte aligned in the destination buffer. This means |
257 | that there will be gaps in index buffers containing 16-bit indices |
258 | when the number of indices in a call to sg_append_buffer() is |
259 | odd. This isn't a problem when each call to sg_append_buffer() |
260 | is associated with one draw call, but will be problematic when |
261 | a single indexed draw call spans several appended chunks of indices. |
262 | |
263 | --- to check at runtime for optional features, limits and pixelformat support, |
264 | call: |
265 | |
266 | sg_features sg_query_features() |
267 | sg_limits sg_query_limits() |
268 | sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt) |
269 | |
270 | --- if you need to call into the underlying 3D-API directly, you must call: |
271 | |
272 | sg_reset_state_cache() |
273 | |
274 | ...before calling sokol_gfx functions again |
275 | |
276 | --- you can inspect the original sg_desc structure handed to sg_setup() |
277 | by calling sg_query_desc(). This will return an sg_desc struct with |
278 | the default values patched in instead of any zero-initialized values |
279 | |
280 | --- you can inspect various internal resource attributes via: |
281 | |
282 | sg_buffer_info sg_query_buffer_info(sg_buffer buf) |
283 | sg_image_info sg_query_image_info(sg_image img) |
284 | sg_shader_info sg_query_shader_info(sg_shader shd) |
285 | sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip) |
286 | sg_pass_info sg_query_pass_info(sg_pass pass) |
287 | |
288 | ...please note that the returned info-structs are tied quite closely |
289 | to sokol_gfx.h internals, and may change more often than other |
290 | public API functions and structs. |
291 | |
292 | --- you can ask at runtime what backend sokol_gfx.h has been compiled |
293 | for, or whether the GLES3 backend had to fall back to GLES2 with: |
294 | |
295 | sg_backend sg_query_backend(void) |
296 | |
297 | --- you can query the default resource creation parameters through the functions |
298 | |
299 | sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc) |
300 | sg_image_desc sg_query_image_defaults(const sg_image_desc* desc) |
301 | sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc) |
302 | sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc) |
303 | sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc) |
304 | |
305 | These functions take a pointer to a desc structure which may contain |
306 | zero-initialized items for default values. These zero-init values |
307 | will be replaced with their concrete values in the returned desc |
308 | struct. |
309 | |
310 | |
311 | ON INITIALIZATION: |
312 | ================== |
313 | When calling sg_setup(), a pointer to an sg_desc struct must be provided |
314 | which contains initialization options. These options provide two types |
315 | of information to sokol-gfx: |
316 | |
317 | (1) upper bounds and limits needed to allocate various internal |
318 | data structures: |
319 | - the max number of resources of each type that can |
320 | be alive at the same time, this is used for allocating |
321 | internal pools |
322 | - the max overall size of uniform data that can be |
323 | updated per frame, including a worst-case alignment |
324 | per uniform update (this worst-case alignment is 256 bytes) |
325 | - the max size of all dynamic resource updates (sg_update_buffer, |
326 | sg_append_buffer and sg_update_image) per frame |
327 | - the max number of entries in the texture sampler cache |
328 | (how many unique texture sampler can exist at the same time) |
329 | Not all of those limit values are used by all backends, but it is |
330 | good practice to provide them none-the-less. |
331 | |
332 | (2) 3D-API "context information" (sometimes also called "bindings"): |
333 | sokol_gfx.h doesn't create or initialize 3D API objects which are |
334 | closely related to the presentation layer (this includes the "rendering |
335 | device", the swapchain, and any objects which depend on the |
336 | swapchain). These API objects (or callback functions to obtain |
337 | them, if those objects might change between frames), must |
338 | be provided in a nested sg_context_desc struct inside the |
339 | sg_desc struct. If sokol_gfx.h is used together with |
340 | sokol_app.h, have a look at the sokol_glue.h header which provides |
341 | a convenience function to get a sg_context_desc struct filled out |
342 | with context information provided by sokol_app.h |
343 | |
344 | See the documention block of the sg_desc struct below for more information. |
345 | |
346 | |
347 | UNIFORM DATA LAYOUT: |
348 | ==================== |
349 | NOTE: if you use the sokol-shdc shader compiler tool, you don't need to worry |
350 | about the following details. |
351 | |
352 | The data that's passed into the sg_apply_uniforms() function must adhere to |
353 | specific layout rules so that the GPU shader finds the uniform block |
354 | items at the right offset. |
355 | |
356 | For the D3D11 and Metal backends, sokol-gfx only cares about the size of uniform |
357 | blocks, but not about the internal layout. The data will just be copied into |
358 | a uniform/constant buffer in a single operation and it's up you to arrange the |
359 | CPU-side layout so that it matches the GPU side layout. This also means that with |
360 | the D3D11 and Metal backends you are not limited to a 'cross-platform' subset |
361 | of uniform variable types. |
362 | |
363 | If you ever only use one of the D3D11, Metal *or* WebGPU backend, you can stop reading here. |
364 | |
365 | For the GL backends, the internal layout of uniform blocks matters though, |
366 | and you are limited to a small number of uniform variable types. This is |
367 | because sokol-gfx must be able to locate the uniform block members in order |
368 | to upload them to the GPU with glUniformXXX() calls. |
369 | |
370 | To describe the uniform block layout to sokol-gfx, the following information |
371 | must be passed to the sg_make_shader() call in the sg_shader_desc struct: |
372 | |
373 | - a hint about the used packing rule (either SG_UNIFORMLAYOUT_NATIVE or |
374 | SG_UNIFORMLAYOUT_STD140) |
375 | - a list of the uniform block members types in the correct order they |
376 | appear on the CPU side |
377 | |
378 | For example if the GLSL shader has the following uniform declarations: |
379 | |
380 | uniform mat4 mvp; |
381 | uniform vec2 offset0; |
382 | uniform vec2 offset1; |
383 | uniform vec2 offset2; |
384 | |
385 | ...and on the CPU side, there's a similar C struct: |
386 | |
387 | typedef struct { |
388 | float mvp[16]; |
389 | float offset0[2]; |
390 | float offset1[2]; |
391 | float offset2[2]; |
392 | } params_t; |
393 | |
394 | ...the uniform block description in the sg_shader_desc must look like this: |
395 | |
396 | sg_shader_desc desc = { |
397 | .vs.uniform_blocks[0] = { |
398 | .size = sizeof(params_t), |
399 | .layout = SG_UNIFORMLAYOUT_NATIVE, // this is the default and can be omitted |
400 | .uniforms = { |
401 | // order must be the same as in 'params_t': |
402 | [0] = { .name = "mvp", .type = SG_UNIFORMTYPE_MAT4 }, |
403 | [1] = { .name = "offset0", .type = SG_UNIFORMTYPE_VEC2 }, |
404 | [2] = { .name = "offset1", .type = SG_UNIFORMTYPE_VEC2 }, |
405 | [3] = { .name = "offset2", .type = SG_UNIFORMTYPE_VEC2 }, |
406 | } |
407 | } |
408 | }; |
409 | |
410 | With this information sokol-gfx can now compute the correct offsets of the data items |
411 | within the uniform block struct. |
412 | |
413 | The SG_UNIFORMLAYOUT_NATIVE packing rule works fine if only the GL backends are used, |
414 | but for proper D3D11/Metal/GL a subset of the std140 layout must be used which is |
415 | described in the next section: |
416 | |
417 | |
418 | CROSS-BACKEND COMMON UNIFORM DATA LAYOUT |
419 | ======================================== |
420 | For cross-platform / cross-3D-backend code it is important that the same uniform block |
421 | layout on the CPU side can be used for all sokol-gfx backends. To achieve this, |
422 | a common subset of the std140 layout must be used: |
423 | |
424 | - The uniform block layout hint in sg_shader_desc must be explicitely set to |
425 | SG_UNIFORMLAYOUT_STD140. |
426 | - Only the following GLSL uniform types can be used (with their associated sokol-gfx enums): |
427 | - float => SG_UNIFORMTYPE_FLOAT |
428 | - vec2 => SG_UNIFORMTYPE_FLOAT2 |
429 | - vec3 => SG_UNIFORMTYPE_FLOAT3 |
430 | - vec4 => SG_UNIFORMTYPE_FLOAT4 |
431 | - int => SG_UNIFORMTYPE_INT |
432 | - ivec2 => SG_UNIFORMTYPE_INT2 |
433 | - ivec3 => SG_UNIFORMTYPE_INT3 |
434 | - ivec4 => SG_UNIFORMTYPE_INT4 |
435 | - mat4 => SG_UNIFORMTYPE_MAT4 |
436 | - Alignment for those types must be as follows (in bytes): |
437 | - float => 4 |
438 | - vec2 => 8 |
439 | - vec3 => 16 |
440 | - vec4 => 16 |
441 | - int => 4 |
442 | - ivec2 => 8 |
443 | - ivec3 => 16 |
444 | - ivec4 => 16 |
445 | - mat4 => 16 |
446 | - Arrays are only allowed for the following types: vec4, int4, mat4. |
447 | |
448 | Note that the HLSL cbuffer layout rules are slightly different from the |
449 | std140 layout rules, this means that the cbuffer declarations in HLSL code |
450 | must be tweaked so that the layout is compatible with std140. |
451 | |
452 | The by far easiest way to tacke the common uniform block layout problem is |
453 | to use the sokol-shdc shader cross-compiler tool! |
454 | |
455 | |
456 | BACKEND-SPECIFIC TOPICS: |
457 | ======================== |
458 | --- The GL backends need to know about the internal structure of uniform |
459 | blocks, and the texture sampler-name and -type. The uniform layout details |
460 | are described in the UNIFORM DATA LAYOUT section above. |
461 | |
462 | // uniform block structure and texture image definition in sg_shader_desc: |
463 | sg_shader_desc desc = { |
464 | // uniform block description (size and internal structure) |
465 | .vs.uniform_blocks[0] = { |
466 | ... |
467 | }, |
468 | // one texture on the fragment-shader-stage, GLES2/WebGL needs name and image type |
469 | .fs.images[0] = { .name="tex", .type=SG_IMAGETYPE_ARRAY } |
470 | ... |
471 | }; |
472 | |
473 | --- the Metal and D3D11 backends only need to know the size of uniform blocks, |
474 | not their internal member structure, and they only need to know |
475 | the type of a texture sampler, not its name: |
476 | |
477 | sg_shader_desc desc = { |
478 | .vs.uniform_blocks[0].size = sizeof(params_t), |
479 | .fs.images[0].type = SG_IMAGETYPE_ARRAY, |
480 | ... |
481 | }; |
482 | |
483 | --- when creating a shader object, GLES2/WebGL need to know the vertex |
484 | attribute names as used in the vertex shader: |
485 | |
486 | sg_shader_desc desc = { |
487 | .attrs = { |
488 | [0] = { .name="position" }, |
489 | [1] = { .name="color1" } |
490 | } |
491 | }; |
492 | |
493 | The vertex attribute names provided when creating a shader will be |
494 | used later in sg_create_pipeline() for matching the vertex layout |
495 | to vertex shader inputs. |
496 | |
497 | --- on D3D11 you need to provide a semantic name and semantic index in the |
498 | shader description struct instead (see the D3D11 documentation on |
499 | D3D11_INPUT_ELEMENT_DESC for details): |
500 | |
501 | sg_shader_desc desc = { |
502 | .attrs = { |
503 | [0] = { .sem_name="POSITION", .sem_index=0 } |
504 | [1] = { .sem_name="COLOR", .sem_index=1 } |
505 | } |
506 | }; |
507 | |
508 | The provided semantic information will be used later in sg_create_pipeline() |
509 | to match the vertex layout to vertex shader inputs. |
510 | |
511 | --- on D3D11, and when passing HLSL source code (instead of byte code) to shader |
512 | creation, you can optionally define the shader model targets on the vertex |
513 | stage: |
514 | |
515 | sg_shader_Desc desc = { |
516 | .vs = { |
517 | ... |
518 | .d3d11_target = "vs_5_0" |
519 | }, |
520 | .fs = { |
521 | ... |
522 | .d3d11_target = "ps_5_0" |
523 | } |
524 | }; |
525 | |
526 | The default targets are "ps_4_0" and "fs_4_0". Note that those target names |
527 | are only used when compiling shaders from source. They are ignored when |
528 | creating a shader from bytecode. |
529 | |
530 | --- on Metal, GL 3.3 or GLES3/WebGL2, you don't need to provide an attribute |
531 | name or semantic name, since vertex attributes can be bound by their slot index |
532 | (this is mandatory in Metal, and optional in GL): |
533 | |
534 | sg_pipeline_desc desc = { |
535 | .layout = { |
536 | .attrs = { |
537 | [0] = { .format=SG_VERTEXFORMAT_FLOAT3 }, |
538 | [1] = { .format=SG_VERTEXFORMAT_FLOAT4 } |
539 | } |
540 | } |
541 | }; |
542 | |
543 | |
544 | WORKING WITH CONTEXTS |
545 | ===================== |
546 | sokol-gfx allows to switch between different rendering contexts and |
547 | associate resource objects with contexts. This is useful to |
548 | create GL applications that render into multiple windows. |
549 | |
550 | A rendering context keeps track of all resources created while |
551 | the context is active. When the context is destroyed, all resources |
552 | "belonging to the context" are destroyed as well. |
553 | |
554 | A default context will be created and activated implicitly in |
555 | sg_setup(), and destroyed in sg_shutdown(). So for a typical application |
556 | which *doesn't* use multiple contexts, nothing changes, and calling |
557 | the context functions isn't necessary. |
558 | |
559 | Three functions have been added to work with contexts: |
560 | |
561 | --- sg_context sg_setup_context(): |
562 | This must be called once after a GL context has been created and |
563 | made active. |
564 | |
565 | --- void sg_activate_context(sg_context ctx) |
566 | This must be called after making a different GL context active. |
567 | Apart from 3D-API-specific actions, the call to sg_activate_context() |
568 | will internally call sg_reset_state_cache(). |
569 | |
570 | --- void sg_discard_context(sg_context ctx) |
571 | This must be called right before a GL context is destroyed and |
572 | will destroy all resources associated with the context (that |
573 | have been created while the context was active) The GL context must be |
574 | active at the time sg_discard_context(sg_context ctx) is called. |
575 | |
576 | Also note that resources (buffers, images, shaders and pipelines) must |
577 | only be used or destroyed while the same GL context is active that |
578 | was also active while the resource was created (an exception is |
579 | resource sharing on GL, such resources can be used while |
580 | another context is active, but must still be destroyed under |
581 | the same context that was active during creation). |
582 | |
583 | For more information, check out the multiwindow-glfw sample: |
584 | |
585 | https://github.com/floooh/sokol-samples/blob/master/glfw/multiwindow-glfw.c |
586 | |
587 | |
588 | TRACE HOOKS: |
589 | ============ |
590 | sokol_gfx.h optionally allows to install "trace hook" callbacks for |
591 | each public API functions. When a public API function is called, and |
592 | a trace hook callback has been installed for this function, the |
593 | callback will be invoked with the parameters and result of the function. |
594 | This is useful for things like debugging- and profiling-tools, or |
595 | keeping track of resource creation and destruction. |
596 | |
597 | To use the trace hook feature: |
598 | |
599 | --- Define SOKOL_TRACE_HOOKS before including the implementation. |
600 | |
601 | --- Setup an sg_trace_hooks structure with your callback function |
602 | pointers (keep all function pointers you're not interested |
603 | in zero-initialized), optionally set the user_data member |
604 | in the sg_trace_hooks struct. |
605 | |
606 | --- Install the trace hooks by calling sg_install_trace_hooks(), |
607 | the return value of this function is another sg_trace_hooks |
608 | struct which contains the previously set of trace hooks. |
609 | You should keep this struct around, and call those previous |
610 | functions pointers from your own trace callbacks for proper |
611 | chaining. |
612 | |
613 | As an example of how trace hooks are used, have a look at the |
614 | imgui/sokol_gfx_imgui.h header which implements a realtime |
615 | debugging UI for sokol_gfx.h on top of Dear ImGui. |
616 | |
617 | |
618 | A NOTE ON PORTABLE PACKED VERTEX FORMATS: |
619 | ========================================= |
620 | There are two things to consider when using packed |
621 | vertex formats like UBYTE4, SHORT2, etc which need to work |
622 | across all backends: |
623 | |
624 | - D3D11 can only convert *normalized* vertex formats to |
625 | floating point during vertex fetch, normalized formats |
626 | have a trailing 'N', and are "normalized" to a range |
627 | -1.0..+1.0 (for the signed formats) or 0.0..1.0 (for the |
628 | unsigned formats): |
629 | |
630 | - SG_VERTEXFORMAT_BYTE4N |
631 | - SG_VERTEXFORMAT_UBYTE4N |
632 | - SG_VERTEXFORMAT_SHORT2N |
633 | - SG_VERTEXFORMAT_USHORT2N |
634 | - SG_VERTEXFORMAT_SHORT4N |
635 | - SG_VERTEXFORMAT_USHORT4N |
636 | |
637 | D3D11 will not convert *non-normalized* vertex formats to floating point |
638 | vertex shader inputs, those can only be uses with the *ivecn* vertex shader |
639 | input types when D3D11 is used as backend (GL and Metal can use both formats) |
640 | |
641 | - SG_VERTEXFORMAT_BYTE4, |
642 | - SG_VERTEXFORMAT_UBYTE4 |
643 | - SG_VERTEXFORMAT_SHORT2 |
644 | - SG_VERTEXFORMAT_SHORT4 |
645 | |
646 | - WebGL/GLES2 cannot use integer vertex shader inputs (int or ivecn) |
647 | |
648 | - SG_VERTEXFORMAT_UINT10_N2 is not supported on WebGL/GLES2 |
649 | |
650 | So for a vertex input layout which works on all platforms, only use the following |
651 | vertex formats, and if needed "expand" the normalized vertex shader |
652 | inputs in the vertex shader by multiplying with 127.0, 255.0, 32767.0 or |
653 | 65535.0: |
654 | |
655 | - SG_VERTEXFORMAT_FLOAT, |
656 | - SG_VERTEXFORMAT_FLOAT2, |
657 | - SG_VERTEXFORMAT_FLOAT3, |
658 | - SG_VERTEXFORMAT_FLOAT4, |
659 | - SG_VERTEXFORMAT_BYTE4N, |
660 | - SG_VERTEXFORMAT_UBYTE4N, |
661 | - SG_VERTEXFORMAT_SHORT2N, |
662 | - SG_VERTEXFORMAT_USHORT2N |
663 | - SG_VERTEXFORMAT_SHORT4N, |
664 | - SG_VERTEXFORMAT_USHORT4N |
665 | |
666 | |
667 | MEMORY ALLOCATION OVERRIDE |
668 | ========================== |
669 | You can override the memory allocation functions at initialization time |
670 | like this: |
671 | |
672 | void* my_alloc(size_t size, void* user_data) { |
673 | return malloc(size); |
674 | } |
675 | |
676 | void my_free(void* ptr, void* user_data) { |
677 | free(ptr); |
678 | } |
679 | |
680 | ... |
681 | sg_setup(&(sg_desc){ |
682 | // ... |
683 | .allocator = { |
684 | .alloc = my_alloc, |
685 | .free = my_free, |
686 | .user_data = ..., |
687 | } |
688 | }); |
689 | ... |
690 | |
691 | If no overrides are provided, malloc and free will be used. |
692 | |
693 | This only affects memory allocation calls done by sokol_gfx.h |
694 | itself though, not any allocations in OS libraries. |
695 | |
696 | |
697 | LOG FUNCTION OVERRIDE |
698 | ===================== |
699 | You can override the log function at initialization time like this: |
700 | |
701 | void my_log(const char* message, void* user_data) { |
702 | printf("sg says: \s\n", message); |
703 | } |
704 | |
705 | ... |
706 | sg_setup(&(sg_desc){ |
707 | // ... |
708 | .logger = { |
709 | .log_cb = my_log, |
710 | .user_data = ..., |
711 | } |
712 | }); |
713 | ... |
714 | |
715 | If no overrides are provided, puts will be used on most platforms. |
716 | On Android, __android_log_write will be used instead. |
717 | |
718 | |
719 | COMMIT LISTENERS |
720 | ================ |
721 | It's possible to hook callback functions into sokol-gfx which are called from |
722 | inside sg_commit() in unspecified order. This is mainly useful for libraries |
723 | that build on top of sokol_gfx.h to be notified about the end/start of a frame. |
724 | |
725 | To add a commit listener, call: |
726 | |
727 | static void my_commit_listener(void* user_data) { |
728 | ... |
729 | } |
730 | |
731 | bool success = sg_add_commit_listener((sg_commit_listener){ |
732 | .func = my_commit_listener, |
733 | .user_data = ..., |
734 | }); |
735 | |
736 | The function returns false if the internal array of commit listeners is full, |
737 | or the same commit listener had already been added. |
738 | |
739 | If the function returns true, my_commit_listener() will be called each frame |
740 | from inside sg_commit(). |
741 | |
742 | By default, 1024 distinct commit listeners can be added, but this number |
743 | can be tweaked in the sg_setup() call: |
744 | |
745 | sg_setup(&(sg_desc){ |
746 | .max_commit_listeners = 2048, |
747 | }); |
748 | |
749 | An sg_commit_listener item is equal to another if both the function |
750 | pointer and user_data field are equal. |
751 | |
752 | To remove a commit listener: |
753 | |
754 | bool success = sg_remove_commit_listener((sg_commit_listener){ |
755 | .func = my_commit_listener, |
756 | .user_data = ..., |
757 | }); |
758 | |
759 | ...where the .func and .user_data field are equal to a previous |
760 | sg_add_commit_listener() call. The function returns true if the commit |
761 | listener item was found and removed, and false otherwise. |
762 | |
763 | |
764 | RESOURCE CREATION AND DESTRUCTION IN DETAIL |
765 | =========================================== |
766 | The 'vanilla' way to create resource objects is with the 'make functions': |
767 | |
768 | sg_buffer sg_make_buffer(const sg_buffer_desc* desc) |
769 | sg_image sg_make_image(const sg_image_desc* desc) |
770 | sg_shader sg_make_shader(const sg_shader_desc* desc) |
771 | sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc) |
772 | sg_pass sg_make_pass(const sg_pass_desc* desc) |
773 | |
774 | This will result in one of three cases: |
775 | |
776 | 1. The returned handle is invalid. This happens when there are no more |
777 | free slots in the resource pool for this resource type. An invalid |
778 | handle is associated with the INVALID resource state, for instance: |
779 | |
780 | sg_buffer buf = sg_make_buffer(...) |
781 | if (sg_query_buffer_state(buf) == SG_RESOURCESTATE_INVALID) { |
782 | // buffer pool is exhausted |
783 | } |
784 | |
785 | 2. The returned handle is valid, but creating the underlying resource |
786 | has failed for some reason. This results in a resource object in the |
787 | FAILED state. The reason *why* resource creation has failed differ |
788 | by resource type. Look for log messages with more details. A failed |
789 | resource state can be checked with: |
790 | |
791 | sg_buffer buf = sg_make_buffer(...) |
792 | if (sg_query_buffer_state(buf) == SG_RESOURCESTATE_FAILED) { |
793 | // creating the resource has failed |
794 | } |
795 | |
796 | 3. And finally, if everything goes right, the returned resource is |
797 | in resource state VALID and ready to use. This can be checked |
798 | with: |
799 | |
800 | sg_buffer buf = sg_make_buffer(...) |
801 | if (sg_query_buffer_state(buf) == SG_RESOURCESTATE_VALID) { |
802 | // creating the resource has failed |
803 | } |
804 | |
805 | When calling the 'make functions', the created resource goes through a number |
806 | of states: |
807 | |
808 | - INITIAL: the resource slot associated with the new resource is currently |
809 | free (technically, there is no resource yet, just an empty pool slot) |
810 | - ALLOC: a handle for the new resource has been allocated, this just means |
811 | a pool slot has been reserved. |
812 | - VALID or FAILED: in VALID state any 3D API backend resource objects have |
813 | been successfully created, otherwise if anything went wrong, the resource |
814 | will be in FAILED state. |
815 | |
816 | Sometimes it makes sense to first grab a handle, but initialize the |
817 | underlying resource at a later time. For instance when loading data |
818 | asynchronously from a slow data source, you may know what buffers and |
819 | textures are needed at an early stage of the loading process, but actually |
820 | loading the buffer or texture content can only be completed at a later time. |
821 | |
822 | For such situations, sokol-gfx resource objects can be created in two steps. |
823 | You can allocate a handle upfront with one of the 'alloc functions': |
824 | |
825 | sg_buffer sg_alloc_buffer(void) |
826 | sg_image sg_alloc_image(void) |
827 | sg_shader sg_alloc_shader(void) |
828 | sg_pipeline sg_alloc_pipeline(void) |
829 | sg_pass sg_alloc_pass(void) |
830 | |
831 | This will return a handle with the underlying resource object in the |
832 | ALLOC state: |
833 | |
834 | sg_image img = sg_alloc_image(); |
835 | if (sg_query_image_state(img) == SG_RESOURCESTATE_ALLOC) { |
836 | // allocating an image handle has succeeded, otherwise |
837 | // the image pool is full |
838 | } |
839 | |
840 | Such an 'incomplete' handle can be used in most sokol-gfx rendering functions |
841 | without doing any harm, sokol-gfx will simply skip any rendering operation |
842 | that involve resources which are not in VALID state. |
843 | |
844 | At a later time (for instance once the texture has completed loading |
845 | asynchronously), the resource creation can be completed by calling one of |
846 | the 'init functions', those functions take an existing resource handle and |
847 | 'desc struct': |
848 | |
849 | void sg_init_buffer(sg_buffer buf, const sg_buffer_desc* desc) |
850 | void sg_init_image(sg_image img, const sg_image_desc* desc) |
851 | void sg_init_shader(sg_shader shd, const sg_shader_desc* desc) |
852 | void sg_init_pipeline(sg_pipeline pip, const sg_pipeline_desc* desc) |
853 | void sg_init_pass(sg_pass pass, const sg_pass_desc* desc) |
854 | |
855 | The init functions expect a resource in ALLOC state, and after the function |
856 | returns, the resource will be either in VALID or FAILED state. Calling |
857 | an 'alloc function' followed by the matching 'init function' is fully |
858 | equivalent with calling the 'make function' alone. |
859 | |
860 | Destruction can also happen as a two-step process. The 'uninit functions' |
861 | will put a resource object from the VALID or FAILED state back into the |
862 | ALLOC state: |
863 | |
864 | void sg_uninit_buffer(sg_buffer buf) |
865 | void sg_uninit_image(sg_image img) |
866 | void sg_uninit_shader(sg_shader shd) |
867 | void sg_uninit_pipeline(sg_pipeline pip) |
868 | void sg_uninit_pass(sg_pass pass) |
869 | |
870 | Calling the 'uninit functions' with a resource that is not in the VALID or |
871 | FAILED state is a no-op. |
872 | |
873 | To finally free the pool slot for recycling call the 'dealloc functions': |
874 | |
875 | void sg_dealloc_buffer(sg_buffer buf) |
876 | void sg_dealloc_image(sg_image img) |
877 | void sg_dealloc_shader(sg_shader shd) |
878 | void sg_dealloc_pipeline(sg_pipeline pip) |
879 | void sg_dealloc_pass(sg_pass pass) |
880 | |
881 | Calling the 'dealloc functions' on a resource that's not in ALLOC state is |
882 | a no-op, but will generate a warning log message. |
883 | |
884 | Calling an 'uninit function' and 'dealloc function' in sequence is equivalent |
885 | with calling the associated 'destroy function': |
886 | |
887 | void sg_destroy_buffer(sg_buffer buf) |
888 | void sg_destroy_image(sg_image img) |
889 | void sg_destroy_shader(sg_shader shd) |
890 | void sg_destroy_pipeline(sg_pipeline pip) |
891 | void sg_destroy_pass(sg_pass pass) |
892 | |
893 | The 'destroy functions' can be called on resources in any state and generally |
894 | do the right thing (for instance if the resource is in ALLOC state, the destroy |
895 | function will be equivalent to the 'dealloc function' and skip the 'uninit part'). |
896 | |
897 | And finally to close the circle, the 'fail functions' can be called to manually |
898 | put a resource in ALLOC state into the FAILED state: |
899 | |
900 | sg_fail_buffer(sg_buffer buf) |
901 | sg_fail_image(sg_image img) |
902 | sg_fail_shader(sg_shader shd) |
903 | sg_fail_pipeline(sg_pipeline pip) |
904 | sg_fail_pass(sg_pass pass) |
905 | |
906 | This is recommended if anything went wrong outside of sokol-gfx during asynchronous |
907 | resource creation (for instance the file loading operation failed). In this case, |
908 | the 'fail function' should be called instead of the 'init function'. |
909 | |
910 | Calling a 'fail function' on a resource that's not in ALLOC state is a no-op, |
911 | but will generate a warning log message. |
912 | |
913 | NOTE: that two-step resource creation usually only makes sense for buffers |
914 | and images, but not for shaders, pipelines or passes. Most notably, trying |
915 | to create a pipeline object with a shader that's not in VALID state will |
916 | trigger a validation layer error, or if the validation layer is disabled, |
917 | result in a pipeline object in FAILED state. Same when trying to create |
918 | a pass object with image invalid image objects. |
919 | |
920 | LICENSE |
921 | ======= |
922 | zlib/libpng license |
923 | |
924 | Copyright (c) 2018 Andre Weissflog |
925 | |
926 | This software is provided 'as-is', without any express or implied warranty. |
927 | In no event will the authors be held liable for any damages arising from the |
928 | use of this software. |
929 | |
930 | Permission is granted to anyone to use this software for any purpose, |
931 | including commercial applications, and to alter it and redistribute it |
932 | freely, subject to the following restrictions: |
933 | |
934 | 1. The origin of this software must not be misrepresented; you must not |
935 | claim that you wrote the original software. If you use this software in a |
936 | product, an acknowledgment in the product documentation would be |
937 | appreciated but is not required. |
938 | |
939 | 2. Altered source versions must be plainly marked as such, and must not |
940 | be misrepresented as being the original software. |
941 | |
942 | 3. This notice may not be removed or altered from any source |
943 | distribution. |
944 | */ |
945 | #define SOKOL_GFX_INCLUDED (1) |
946 | #include <stddef.h> // size_t |
947 | #include <stdint.h> |
948 | #include <stdbool.h> |
949 | |
950 | #if defined(SOKOL_API_DECL) && !defined(SOKOL_GFX_API_DECL) |
951 | #define SOKOL_GFX_API_DECL SOKOL_API_DECL |
952 | #endif |
953 | #ifndef SOKOL_GFX_API_DECL |
954 | #if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_GFX_IMPL) |
955 | #define SOKOL_GFX_API_DECL __declspec(dllexport) |
956 | #elif defined(_WIN32) && defined(SOKOL_DLL) |
957 | #define SOKOL_GFX_API_DECL __declspec(dllimport) |
958 | #else |
959 | #define SOKOL_GFX_API_DECL extern |
960 | #endif |
961 | #endif |
962 | |
963 | #ifdef __cplusplus |
964 | extern "C" { |
965 | #endif |
966 | |
967 | /* |
968 | Resource id typedefs: |
969 | |
970 | sg_buffer: vertex- and index-buffers |
971 | sg_image: textures and render targets |
972 | sg_shader: vertex- and fragment-shaders, uniform blocks |
973 | sg_pipeline: associated shader and vertex-layouts, and render states |
974 | sg_pass: a bundle of render targets and actions on them |
975 | sg_context: a 'context handle' for switching between 3D-API contexts |
976 | |
977 | Instead of pointers, resource creation functions return a 32-bit |
978 | number which uniquely identifies the resource object. |
979 | |
980 | The 32-bit resource id is split into a 16-bit pool index in the lower bits, |
981 | and a 16-bit 'unique counter' in the upper bits. The index allows fast |
982 | pool lookups, and combined with the unique-mask it allows to detect |
983 | 'dangling accesses' (trying to use an object which no longer exists, and |
984 | its pool slot has been reused for a new object) |
985 | |
986 | The resource ids are wrapped into a struct so that the compiler |
987 | can complain when the wrong resource type is used. |
988 | */ |
989 | typedef struct sg_buffer { uint32_t id; } sg_buffer; |
990 | typedef struct sg_image { uint32_t id; } sg_image; |
991 | typedef struct sg_shader { uint32_t id; } sg_shader; |
992 | typedef struct sg_pipeline { uint32_t id; } sg_pipeline; |
993 | typedef struct sg_pass { uint32_t id; } sg_pass; |
994 | typedef struct sg_context { uint32_t id; } sg_context; |
995 | |
996 | /* |
997 | sg_range is a pointer-size-pair struct used to pass memory blobs into |
998 | sokol-gfx. When initialized from a value type (array or struct), you can |
999 | use the SG_RANGE() macro to build an sg_range struct. For functions which |
1000 | take either a sg_range pointer, or a (C++) sg_range reference, use the |
1001 | SG_RANGE_REF macro as a solution which compiles both in C and C++. |
1002 | */ |
1003 | typedef struct sg_range { |
1004 | const void* ptr; |
1005 | size_t size; |
1006 | } sg_range; |
1007 | |
1008 | // disabling this for every includer isn't great, but the warnings are also quite pointless |
1009 | #if defined(_MSC_VER) |
1010 | #pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ |
1011 | #pragma warning(disable:4204) /* VS2015: nonstandard extension used: non-constant aggregate initializer */ |
1012 | #endif |
1013 | #if defined(__cplusplus) |
1014 | #define SG_RANGE(x) sg_range{ &x, sizeof(x) } |
1015 | #define SG_RANGE_REF(x) sg_range{ &x, sizeof(x) } |
1016 | #else |
1017 | #define SG_RANGE(x) (sg_range){ &x, sizeof(x) } |
1018 | #define SG_RANGE_REF(x) &(sg_range){ &x, sizeof(x) } |
1019 | #endif |
1020 | |
1021 | // various compile-time constants |
1022 | enum { |
1023 | SG_INVALID_ID = 0, |
1024 | SG_NUM_SHADER_STAGES = 2, |
1025 | SG_NUM_INFLIGHT_FRAMES = 2, |
1026 | SG_MAX_COLOR_ATTACHMENTS = 4, |
1027 | SG_MAX_SHADERSTAGE_BUFFERS = 8, |
1028 | SG_MAX_SHADERSTAGE_IMAGES = 12, |
1029 | SG_MAX_SHADERSTAGE_UBS = 4, |
1030 | SG_MAX_UB_MEMBERS = 16, |
1031 | SG_MAX_VERTEX_ATTRIBUTES = 16, /* NOTE: actual max vertex attrs can be less on GLES2, see sg_limits! */ |
1032 | SG_MAX_MIPMAPS = 16, |
1033 | SG_MAX_TEXTUREARRAY_LAYERS = 128 |
1034 | }; |
1035 | |
1036 | /* |
1037 | sg_color |
1038 | |
1039 | An RGBA color value. |
1040 | */ |
1041 | typedef struct sg_color { float r, g, b, a; } sg_color; |
1042 | |
1043 | /* |
1044 | sg_backend |
1045 | |
1046 | The active 3D-API backend, use the function sg_query_backend() |
1047 | to get the currently active backend. |
1048 | |
1049 | NOTE that SG_BACKEND_GLES2 will be returned if sokol-gfx was |
1050 | compiled with SOKOL_GLES3, but the runtime platform doesn't support |
1051 | GLES3/WebGL2 and sokol-gfx had to fallback to GLES2/WebGL. |
1052 | */ |
1053 | typedef enum sg_backend { |
1054 | SG_BACKEND_GLCORE33, |
1055 | SG_BACKEND_GLES2, |
1056 | SG_BACKEND_GLES3, |
1057 | SG_BACKEND_D3D11, |
1058 | SG_BACKEND_METAL_IOS, |
1059 | SG_BACKEND_METAL_MACOS, |
1060 | SG_BACKEND_METAL_SIMULATOR, |
1061 | SG_BACKEND_WGPU, |
1062 | SG_BACKEND_DUMMY, |
1063 | } sg_backend; |
1064 | |
1065 | /* |
1066 | sg_pixel_format |
1067 | |
1068 | sokol_gfx.h basically uses the same pixel formats as WebGPU, since these |
1069 | are supported on most newer GPUs. GLES2 and WebGL only supports a much |
1070 | smaller subset of actually available pixel formats. Call |
1071 | sg_query_pixelformat() to check at runtime if a pixel format supports the |
1072 | desired features. |
1073 | |
1074 | A pixelformat name consist of three parts: |
1075 | |
1076 | - components (R, RG, RGB or RGBA) |
1077 | - bit width per component (8, 16 or 32) |
1078 | - component data type: |
1079 | - unsigned normalized (no postfix) |
1080 | - signed normalized (SN postfix) |
1081 | - unsigned integer (UI postfix) |
1082 | - signed integer (SI postfix) |
1083 | - float (F postfix) |
1084 | |
1085 | Not all pixel formats can be used for everything, call sg_query_pixelformat() |
1086 | to inspect the capabilities of a given pixelformat. The function returns |
1087 | an sg_pixelformat_info struct with the following bool members: |
1088 | |
1089 | - sample: the pixelformat can be sampled as texture at least with |
1090 | nearest filtering |
1091 | - filter: the pixelformat can be samples as texture with linear |
1092 | filtering |
1093 | - render: the pixelformat can be used for render targets |
1094 | - blend: blending is supported when using the pixelformat for |
1095 | render targets |
1096 | - msaa: multisample-antialiasing is supported when using the |
1097 | pixelformat for render targets |
1098 | - depth: the pixelformat can be used for depth-stencil attachments |
1099 | |
1100 | When targeting GLES2/WebGL, the only safe formats to use |
1101 | as texture are SG_PIXELFORMAT_R8 and SG_PIXELFORMAT_RGBA8. For rendering |
1102 | in GLES2/WebGL, only SG_PIXELFORMAT_RGBA8 is safe. All other formats |
1103 | must be checked via sg_query_pixelformats(). |
1104 | |
1105 | The default pixel format for texture images is SG_PIXELFORMAT_RGBA8. |
1106 | |
1107 | The default pixel format for render target images is platform-dependent: |
1108 | - for Metal and D3D11 it is SG_PIXELFORMAT_BGRA8 |
1109 | - for GL backends it is SG_PIXELFORMAT_RGBA8 |
1110 | |
1111 | This is mainly because of the default framebuffer which is setup outside |
1112 | of sokol_gfx.h. On some backends, using BGRA for the default frame buffer |
1113 | allows more efficient frame flips. For your own offscreen-render-targets, |
1114 | use whatever renderable pixel format is convenient for you. |
1115 | */ |
1116 | typedef enum sg_pixel_format { |
1117 | _SG_PIXELFORMAT_DEFAULT, /* value 0 reserved for default-init */ |
1118 | SG_PIXELFORMAT_NONE, |
1119 | |
1120 | SG_PIXELFORMAT_R8, |
1121 | SG_PIXELFORMAT_R8SN, |
1122 | SG_PIXELFORMAT_R8UI, |
1123 | SG_PIXELFORMAT_R8SI, |
1124 | |
1125 | SG_PIXELFORMAT_R16, |
1126 | SG_PIXELFORMAT_R16SN, |
1127 | SG_PIXELFORMAT_R16UI, |
1128 | SG_PIXELFORMAT_R16SI, |
1129 | SG_PIXELFORMAT_R16F, |
1130 | SG_PIXELFORMAT_RG8, |
1131 | SG_PIXELFORMAT_RG8SN, |
1132 | SG_PIXELFORMAT_RG8UI, |
1133 | SG_PIXELFORMAT_RG8SI, |
1134 | |
1135 | SG_PIXELFORMAT_R32UI, |
1136 | SG_PIXELFORMAT_R32SI, |
1137 | SG_PIXELFORMAT_R32F, |
1138 | SG_PIXELFORMAT_RG16, |
1139 | SG_PIXELFORMAT_RG16SN, |
1140 | SG_PIXELFORMAT_RG16UI, |
1141 | SG_PIXELFORMAT_RG16SI, |
1142 | SG_PIXELFORMAT_RG16F, |
1143 | SG_PIXELFORMAT_RGBA8, |
1144 | SG_PIXELFORMAT_RGBA8SN, |
1145 | SG_PIXELFORMAT_RGBA8UI, |
1146 | SG_PIXELFORMAT_RGBA8SI, |
1147 | SG_PIXELFORMAT_BGRA8, |
1148 | SG_PIXELFORMAT_RGB10A2, |
1149 | SG_PIXELFORMAT_RG11B10F, |
1150 | |
1151 | SG_PIXELFORMAT_RG32UI, |
1152 | SG_PIXELFORMAT_RG32SI, |
1153 | SG_PIXELFORMAT_RG32F, |
1154 | SG_PIXELFORMAT_RGBA16, |
1155 | SG_PIXELFORMAT_RGBA16SN, |
1156 | SG_PIXELFORMAT_RGBA16UI, |
1157 | SG_PIXELFORMAT_RGBA16SI, |
1158 | SG_PIXELFORMAT_RGBA16F, |
1159 | |
1160 | SG_PIXELFORMAT_RGBA32UI, |
1161 | SG_PIXELFORMAT_RGBA32SI, |
1162 | SG_PIXELFORMAT_RGBA32F, |
1163 | |
1164 | SG_PIXELFORMAT_DEPTH, |
1165 | SG_PIXELFORMAT_DEPTH_STENCIL, |
1166 | |
1167 | SG_PIXELFORMAT_BC1_RGBA, |
1168 | SG_PIXELFORMAT_BC2_RGBA, |
1169 | SG_PIXELFORMAT_BC3_RGBA, |
1170 | SG_PIXELFORMAT_BC4_R, |
1171 | SG_PIXELFORMAT_BC4_RSN, |
1172 | SG_PIXELFORMAT_BC5_RG, |
1173 | SG_PIXELFORMAT_BC5_RGSN, |
1174 | SG_PIXELFORMAT_BC6H_RGBF, |
1175 | SG_PIXELFORMAT_BC6H_RGBUF, |
1176 | SG_PIXELFORMAT_BC7_RGBA, |
1177 | SG_PIXELFORMAT_PVRTC_RGB_2BPP, |
1178 | SG_PIXELFORMAT_PVRTC_RGB_4BPP, |
1179 | SG_PIXELFORMAT_PVRTC_RGBA_2BPP, |
1180 | SG_PIXELFORMAT_PVRTC_RGBA_4BPP, |
1181 | SG_PIXELFORMAT_ETC2_RGB8, |
1182 | SG_PIXELFORMAT_ETC2_RGB8A1, |
1183 | SG_PIXELFORMAT_ETC2_RGBA8, |
1184 | SG_PIXELFORMAT_ETC2_RG11, |
1185 | SG_PIXELFORMAT_ETC2_RG11SN, |
1186 | |
1187 | SG_PIXELFORMAT_RGB9E5, |
1188 | |
1189 | _SG_PIXELFORMAT_NUM, |
1190 | _SG_PIXELFORMAT_FORCE_U32 = 0x7FFFFFFF |
1191 | } sg_pixel_format; |
1192 | |
1193 | /* |
1194 | Runtime information about a pixel format, returned |
1195 | by sg_query_pixelformat(). |
1196 | */ |
1197 | typedef struct sg_pixelformat_info { |
1198 | bool sample; // pixel format can be sampled in shaders |
1199 | bool filter; // pixel format can be sampled with filtering |
1200 | bool render; // pixel format can be used as render target |
1201 | bool blend; // alpha-blending is supported |
1202 | bool msaa; // pixel format can be used as MSAA render target |
1203 | bool depth; // pixel format is a depth format |
1204 | #if defined(SOKOL_ZIG_BINDINGS) |
1205 | uint32_t __pad[3]; |
1206 | #endif |
1207 | } sg_pixelformat_info; |
1208 | |
1209 | /* |
1210 | Runtime information about available optional features, |
1211 | returned by sg_query_features() |
1212 | */ |
1213 | typedef struct sg_features { |
1214 | bool instancing; // hardware instancing supported |
1215 | bool origin_top_left; // framebuffer and texture origin is in top left corner |
1216 | bool multiple_render_targets; // offscreen render passes can have multiple render targets attached |
1217 | bool msaa_render_targets; // offscreen render passes support MSAA antialiasing |
1218 | bool imagetype_3d; // creation of SG_IMAGETYPE_3D images is supported |
1219 | bool imagetype_array; // creation of SG_IMAGETYPE_ARRAY images is supported |
1220 | bool image_clamp_to_border; // border color and clamp-to-border UV-wrap mode is supported |
1221 | bool mrt_independent_blend_state; // multiple-render-target rendering can use per-render-target blend state |
1222 | bool mrt_independent_write_mask; // multiple-render-target rendering can use per-render-target color write masks |
1223 | #if defined(SOKOL_ZIG_BINDINGS) |
1224 | uint32_t __pad[3]; |
1225 | #endif |
1226 | } sg_features; |
1227 | |
1228 | /* |
1229 | Runtime information about resource limits, returned by sg_query_limit() |
1230 | */ |
1231 | typedef struct sg_limits { |
1232 | int max_image_size_2d; // max width/height of SG_IMAGETYPE_2D images |
1233 | int max_image_size_cube; // max width/height of SG_IMAGETYPE_CUBE images |
1234 | int max_image_size_3d; // max width/height/depth of SG_IMAGETYPE_3D images |
1235 | int max_image_size_array; // max width/height of SG_IMAGETYPE_ARRAY images |
1236 | int max_image_array_layers; // max number of layers in SG_IMAGETYPE_ARRAY images |
1237 | int max_vertex_attrs; // <= SG_MAX_VERTEX_ATTRIBUTES or less (on some GLES2 impls) |
1238 | int gl_max_vertex_uniform_vectors; // <= GL_MAX_VERTEX_UNIFORM_VECTORS (only on GL backends) |
1239 | } sg_limits; |
1240 | |
1241 | /* |
1242 | sg_resource_state |
1243 | |
1244 | The current state of a resource in its resource pool. |
1245 | Resources start in the INITIAL state, which means the |
1246 | pool slot is unoccupied and can be allocated. When a resource is |
1247 | created, first an id is allocated, and the resource pool slot |
1248 | is set to state ALLOC. After allocation, the resource is |
1249 | initialized, which may result in the VALID or FAILED state. The |
1250 | reason why allocation and initialization are separate is because |
1251 | some resource types (e.g. buffers and images) might be asynchronously |
1252 | initialized by the user application. If a resource which is not |
1253 | in the VALID state is attempted to be used for rendering, rendering |
1254 | operations will silently be dropped. |
1255 | |
1256 | The special INVALID state is returned in sg_query_xxx_state() if no |
1257 | resource object exists for the provided resource id. |
1258 | */ |
1259 | typedef enum sg_resource_state { |
1260 | SG_RESOURCESTATE_INITIAL, |
1261 | SG_RESOURCESTATE_ALLOC, |
1262 | SG_RESOURCESTATE_VALID, |
1263 | SG_RESOURCESTATE_FAILED, |
1264 | SG_RESOURCESTATE_INVALID, |
1265 | _SG_RESOURCESTATE_FORCE_U32 = 0x7FFFFFFF |
1266 | } sg_resource_state; |
1267 | |
1268 | /* |
1269 | sg_usage |
1270 | |
1271 | A resource usage hint describing the update strategy of |
1272 | buffers and images. This is used in the sg_buffer_desc.usage |
1273 | and sg_image_desc.usage members when creating buffers |
1274 | and images: |
1275 | |
1276 | SG_USAGE_IMMUTABLE: the resource will never be updated with |
1277 | new data, instead the content of the |
1278 | resource must be provided on creation |
1279 | SG_USAGE_DYNAMIC: the resource will be updated infrequently |
1280 | with new data (this could range from "once |
1281 | after creation", to "quite often but not |
1282 | every frame") |
1283 | SG_USAGE_STREAM: the resource will be updated each frame |
1284 | with new content |
1285 | |
1286 | The rendering backends use this hint to prevent that the |
1287 | CPU needs to wait for the GPU when attempting to update |
1288 | a resource that might be currently accessed by the GPU. |
1289 | |
1290 | Resource content is updated with the functions sg_update_buffer() or |
1291 | sg_append_buffer() for buffer objects, and sg_update_image() for image |
1292 | objects. For the sg_update_*() functions, only one update is allowed per |
1293 | frame and resource object, while sg_append_buffer() can be called |
1294 | multiple times per frame on the same buffer. The application must update |
1295 | all data required for rendering (this means that the update data can be |
1296 | smaller than the resource size, if only a part of the overall resource |
1297 | size is used for rendering, you only need to make sure that the data that |
1298 | *is* used is valid). |
1299 | |
1300 | The default usage is SG_USAGE_IMMUTABLE. |
1301 | */ |
1302 | typedef enum sg_usage { |
1303 | _SG_USAGE_DEFAULT, /* value 0 reserved for default-init */ |
1304 | SG_USAGE_IMMUTABLE, |
1305 | SG_USAGE_DYNAMIC, |
1306 | SG_USAGE_STREAM, |
1307 | _SG_USAGE_NUM, |
1308 | _SG_USAGE_FORCE_U32 = 0x7FFFFFFF |
1309 | } sg_usage; |
1310 | |
1311 | /* |
1312 | sg_buffer_type |
1313 | |
1314 | This indicates whether a buffer contains vertex- or index-data, |
1315 | used in the sg_buffer_desc.type member when creating a buffer. |
1316 | |
1317 | The default value is SG_BUFFERTYPE_VERTEXBUFFER. |
1318 | */ |
1319 | typedef enum sg_buffer_type { |
1320 | _SG_BUFFERTYPE_DEFAULT, /* value 0 reserved for default-init */ |
1321 | SG_BUFFERTYPE_VERTEXBUFFER, |
1322 | SG_BUFFERTYPE_INDEXBUFFER, |
1323 | _SG_BUFFERTYPE_NUM, |
1324 | _SG_BUFFERTYPE_FORCE_U32 = 0x7FFFFFFF |
1325 | } sg_buffer_type; |
1326 | |
1327 | /* |
1328 | sg_index_type |
1329 | |
1330 | Indicates whether indexed rendering (fetching vertex-indices from an |
1331 | index buffer) is used, and if yes, the index data type (16- or 32-bits). |
1332 | This is used in the sg_pipeline_desc.index_type member when creating a |
1333 | pipeline object. |
1334 | |
1335 | The default index type is SG_INDEXTYPE_NONE. |
1336 | */ |
1337 | typedef enum sg_index_type { |
1338 | _SG_INDEXTYPE_DEFAULT, /* value 0 reserved for default-init */ |
1339 | SG_INDEXTYPE_NONE, |
1340 | SG_INDEXTYPE_UINT16, |
1341 | SG_INDEXTYPE_UINT32, |
1342 | _SG_INDEXTYPE_NUM, |
1343 | _SG_INDEXTYPE_FORCE_U32 = 0x7FFFFFFF |
1344 | } sg_index_type; |
1345 | |
1346 | /* |
1347 | sg_image_type |
1348 | |
1349 | Indicates the basic type of an image object (2D-texture, cubemap, |
1350 | 3D-texture or 2D-array-texture). 3D- and array-textures are not supported |
1351 | on the GLES2/WebGL backend (use sg_query_features().imagetype_3d and |
1352 | sg_query_features().imagetype_array to check for support). The image type |
1353 | is used in the sg_image_desc.type member when creating an image, and |
1354 | in sg_shader_image_desc when describing a shader's texture sampler binding. |
1355 | |
1356 | The default image type when creating an image is SG_IMAGETYPE_2D. |
1357 | */ |
1358 | typedef enum sg_image_type { |
1359 | _SG_IMAGETYPE_DEFAULT, /* value 0 reserved for default-init */ |
1360 | SG_IMAGETYPE_2D, |
1361 | SG_IMAGETYPE_CUBE, |
1362 | SG_IMAGETYPE_3D, |
1363 | SG_IMAGETYPE_ARRAY, |
1364 | _SG_IMAGETYPE_NUM, |
1365 | _SG_IMAGETYPE_FORCE_U32 = 0x7FFFFFFF |
1366 | } sg_image_type; |
1367 | |
1368 | /* |
1369 | sg_sampler_type |
1370 | |
1371 | Indicates the basic data type of a shader's texture sampler which |
1372 | can be float , unsigned integer or signed integer. The sampler |
1373 | type is used in the sg_shader_image_desc to describe the |
1374 | sampler type of a shader's texture sampler binding. |
1375 | |
1376 | The default sampler type is SG_SAMPLERTYPE_FLOAT. |
1377 | */ |
1378 | typedef enum sg_sampler_type { |
1379 | _SG_SAMPLERTYPE_DEFAULT, /* value 0 reserved for default-init */ |
1380 | SG_SAMPLERTYPE_FLOAT, |
1381 | SG_SAMPLERTYPE_SINT, |
1382 | SG_SAMPLERTYPE_UINT, |
1383 | } sg_sampler_type; |
1384 | |
1385 | /* |
1386 | sg_cube_face |
1387 | |
1388 | The cubemap faces. Use these as indices in the sg_image_desc.content |
1389 | array. |
1390 | */ |
1391 | typedef enum sg_cube_face { |
1392 | SG_CUBEFACE_POS_X, |
1393 | SG_CUBEFACE_NEG_X, |
1394 | SG_CUBEFACE_POS_Y, |
1395 | SG_CUBEFACE_NEG_Y, |
1396 | SG_CUBEFACE_POS_Z, |
1397 | SG_CUBEFACE_NEG_Z, |
1398 | SG_CUBEFACE_NUM, |
1399 | _SG_CUBEFACE_FORCE_U32 = 0x7FFFFFFF |
1400 | } sg_cube_face; |
1401 | |
1402 | /* |
1403 | sg_shader_stage |
1404 | |
1405 | There are 2 shader stages: vertex- and fragment-shader-stage. |
1406 | Each shader stage consists of: |
1407 | |
1408 | - one slot for a shader function (provided as source- or byte-code) |
1409 | - SG_MAX_SHADERSTAGE_UBS slots for uniform blocks |
1410 | - SG_MAX_SHADERSTAGE_IMAGES slots for images used as textures by |
1411 | the shader function |
1412 | */ |
1413 | typedef enum sg_shader_stage { |
1414 | SG_SHADERSTAGE_VS, |
1415 | SG_SHADERSTAGE_FS, |
1416 | _SG_SHADERSTAGE_FORCE_U32 = 0x7FFFFFFF |
1417 | } sg_shader_stage; |
1418 | |
1419 | /* |
1420 | sg_primitive_type |
1421 | |
1422 | This is the common subset of 3D primitive types supported across all 3D |
1423 | APIs. This is used in the sg_pipeline_desc.primitive_type member when |
1424 | creating a pipeline object. |
1425 | |
1426 | The default primitive type is SG_PRIMITIVETYPE_TRIANGLES. |
1427 | */ |
1428 | typedef enum sg_primitive_type { |
1429 | _SG_PRIMITIVETYPE_DEFAULT, /* value 0 reserved for default-init */ |
1430 | SG_PRIMITIVETYPE_POINTS, |
1431 | SG_PRIMITIVETYPE_LINES, |
1432 | SG_PRIMITIVETYPE_LINE_STRIP, |
1433 | SG_PRIMITIVETYPE_TRIANGLES, |
1434 | SG_PRIMITIVETYPE_TRIANGLE_STRIP, |
1435 | _SG_PRIMITIVETYPE_NUM, |
1436 | _SG_PRIMITIVETYPE_FORCE_U32 = 0x7FFFFFFF |
1437 | } sg_primitive_type; |
1438 | |
1439 | /* |
1440 | sg_filter |
1441 | |
1442 | The filtering mode when sampling a texture image. This is |
1443 | used in the sg_image_desc.min_filter and sg_image_desc.mag_filter |
1444 | members when creating an image object. |
1445 | |
1446 | The default filter mode is SG_FILTER_NEAREST. |
1447 | */ |
1448 | typedef enum sg_filter { |
1449 | _SG_FILTER_DEFAULT, /* value 0 reserved for default-init */ |
1450 | SG_FILTER_NEAREST, |
1451 | SG_FILTER_LINEAR, |
1452 | SG_FILTER_NEAREST_MIPMAP_NEAREST, |
1453 | SG_FILTER_NEAREST_MIPMAP_LINEAR, |
1454 | SG_FILTER_LINEAR_MIPMAP_NEAREST, |
1455 | SG_FILTER_LINEAR_MIPMAP_LINEAR, |
1456 | _SG_FILTER_NUM, |
1457 | _SG_FILTER_FORCE_U32 = 0x7FFFFFFF |
1458 | } sg_filter; |
1459 | |
1460 | /* |
1461 | sg_wrap |
1462 | |
1463 | The texture coordinates wrapping mode when sampling a texture |
1464 | image. This is used in the sg_image_desc.wrap_u, .wrap_v |
1465 | and .wrap_w members when creating an image. |
1466 | |
1467 | The default wrap mode is SG_WRAP_REPEAT. |
1468 | |
1469 | NOTE: SG_WRAP_CLAMP_TO_BORDER is not supported on all backends |
1470 | and platforms. To check for support, call sg_query_features() |
1471 | and check the "clamp_to_border" boolean in the returned |
1472 | sg_features struct. |
1473 | |
1474 | Platforms which don't support SG_WRAP_CLAMP_TO_BORDER will silently fall back |
1475 | to SG_WRAP_CLAMP_TO_EDGE without a validation error. |
1476 | |
1477 | Platforms which support clamp-to-border are: |
1478 | |
1479 | - all desktop GL platforms |
1480 | - Metal on macOS |
1481 | - D3D11 |
1482 | |
1483 | Platforms which do not support clamp-to-border: |
1484 | |
1485 | - GLES2/3 and WebGL/WebGL2 |
1486 | - Metal on iOS |
1487 | */ |
1488 | typedef enum sg_wrap { |
1489 | _SG_WRAP_DEFAULT, /* value 0 reserved for default-init */ |
1490 | SG_WRAP_REPEAT, |
1491 | SG_WRAP_CLAMP_TO_EDGE, |
1492 | SG_WRAP_CLAMP_TO_BORDER, |
1493 | SG_WRAP_MIRRORED_REPEAT, |
1494 | _SG_WRAP_NUM, |
1495 | _SG_WRAP_FORCE_U32 = 0x7FFFFFFF |
1496 | } sg_wrap; |
1497 | |
1498 | /* |
1499 | sg_border_color |
1500 | |
1501 | The border color to use when sampling a texture, and the UV wrap |
1502 | mode is SG_WRAP_CLAMP_TO_BORDER. |
1503 | |
1504 | The default border color is SG_BORDERCOLOR_OPAQUE_BLACK |
1505 | */ |
1506 | typedef enum sg_border_color { |
1507 | _SG_BORDERCOLOR_DEFAULT, /* value 0 reserved for default-init */ |
1508 | SG_BORDERCOLOR_TRANSPARENT_BLACK, |
1509 | SG_BORDERCOLOR_OPAQUE_BLACK, |
1510 | SG_BORDERCOLOR_OPAQUE_WHITE, |
1511 | _SG_BORDERCOLOR_NUM, |
1512 | _SG_BORDERCOLOR_FORCE_U32 = 0x7FFFFFFF |
1513 | } sg_border_color; |
1514 | |
1515 | /* |
1516 | sg_vertex_format |
1517 | |
1518 | The data type of a vertex component. This is used to describe |
1519 | the layout of vertex data when creating a pipeline object. |
1520 | */ |
1521 | typedef enum sg_vertex_format { |
1522 | SG_VERTEXFORMAT_INVALID, |
1523 | SG_VERTEXFORMAT_FLOAT, |
1524 | SG_VERTEXFORMAT_FLOAT2, |
1525 | SG_VERTEXFORMAT_FLOAT3, |
1526 | SG_VERTEXFORMAT_FLOAT4, |
1527 | SG_VERTEXFORMAT_BYTE4, |
1528 | SG_VERTEXFORMAT_BYTE4N, |
1529 | SG_VERTEXFORMAT_UBYTE4, |
1530 | SG_VERTEXFORMAT_UBYTE4N, |
1531 | SG_VERTEXFORMAT_SHORT2, |
1532 | SG_VERTEXFORMAT_SHORT2N, |
1533 | SG_VERTEXFORMAT_USHORT2N, |
1534 | SG_VERTEXFORMAT_SHORT4, |
1535 | SG_VERTEXFORMAT_SHORT4N, |
1536 | SG_VERTEXFORMAT_USHORT4N, |
1537 | SG_VERTEXFORMAT_UINT10_N2, |
1538 | _SG_VERTEXFORMAT_NUM, |
1539 | _SG_VERTEXFORMAT_FORCE_U32 = 0x7FFFFFFF |
1540 | } sg_vertex_format; |
1541 | |
1542 | /* |
1543 | sg_vertex_step |
1544 | |
1545 | Defines whether the input pointer of a vertex input stream is advanced |
1546 | 'per vertex' or 'per instance'. The default step-func is |
1547 | SG_VERTEXSTEP_PER_VERTEX. SG_VERTEXSTEP_PER_INSTANCE is used with |
1548 | instanced-rendering. |
1549 | |
1550 | The vertex-step is part of the vertex-layout definition |
1551 | when creating pipeline objects. |
1552 | */ |
1553 | typedef enum sg_vertex_step { |
1554 | _SG_VERTEXSTEP_DEFAULT, /* value 0 reserved for default-init */ |
1555 | SG_VERTEXSTEP_PER_VERTEX, |
1556 | SG_VERTEXSTEP_PER_INSTANCE, |
1557 | _SG_VERTEXSTEP_NUM, |
1558 | _SG_VERTEXSTEP_FORCE_U32 = 0x7FFFFFFF |
1559 | } sg_vertex_step; |
1560 | |
1561 | /* |
1562 | sg_uniform_type |
1563 | |
1564 | The data type of a uniform block member. This is used to |
1565 | describe the internal layout of uniform blocks when creating |
1566 | a shader object. |
1567 | */ |
1568 | typedef enum sg_uniform_type { |
1569 | SG_UNIFORMTYPE_INVALID, |
1570 | SG_UNIFORMTYPE_FLOAT, |
1571 | SG_UNIFORMTYPE_FLOAT2, |
1572 | SG_UNIFORMTYPE_FLOAT3, |
1573 | SG_UNIFORMTYPE_FLOAT4, |
1574 | SG_UNIFORMTYPE_INT, |
1575 | SG_UNIFORMTYPE_INT2, |
1576 | SG_UNIFORMTYPE_INT3, |
1577 | SG_UNIFORMTYPE_INT4, |
1578 | SG_UNIFORMTYPE_MAT4, |
1579 | _SG_UNIFORMTYPE_NUM, |
1580 | _SG_UNIFORMTYPE_FORCE_U32 = 0x7FFFFFFF |
1581 | } sg_uniform_type; |
1582 | |
1583 | /* |
1584 | sg_uniform_layout |
1585 | |
1586 | A hint for the interior memory layout of uniform blocks. This is |
1587 | only really relevant for the GL backend where the internal layout |
1588 | of uniform blocks must be known to sokol-gfx. For all other backends the |
1589 | internal memory layout of uniform blocks doesn't matter, sokol-gfx |
1590 | will just pass uniform data as a single memory blob to the |
1591 | 3D backend. |
1592 | |
1593 | SG_UNIFORMLAYOUT_NATIVE (default) |
1594 | Native layout means that a 'backend-native' memory layout |
1595 | is used. For the GL backend this means that uniforms |
1596 | are packed tightly in memory (e.g. there are no padding |
1597 | bytes). |
1598 | |
1599 | SG_UNIFORMLAYOUT_STD140 |
1600 | The memory layout is a subset of std140. Arrays are only |
1601 | allowed for the FLOAT4, INT4 and MAT4. Alignment is as |
1602 | is as follows: |
1603 | |
1604 | FLOAT, INT: 4 byte alignment |
1605 | FLOAT2, INT2: 8 byte alignment |
1606 | FLOAT3, INT3: 16 byte alignment(!) |
1607 | FLOAT4, INT4: 16 byte alignment |
1608 | MAT4: 16 byte alignment |
1609 | FLOAT4[], INT4[]: 16 byte alignment |
1610 | |
1611 | The overall size of the uniform block must be a multiple |
1612 | of 16. |
1613 | |
1614 | For more information search for 'UNIFORM DATA LAYOUT' in the documentation block |
1615 | at the start of the header. |
1616 | */ |
1617 | typedef enum sg_uniform_layout { |
1618 | _SG_UNIFORMLAYOUT_DEFAULT, /* value 0 reserved for default-init */ |
1619 | SG_UNIFORMLAYOUT_NATIVE, /* default: layout depends on currently active backend */ |
1620 | SG_UNIFORMLAYOUT_STD140, /* std140: memory layout according to std140 */ |
1621 | _SG_UNIFORMLAYOUT_NUM, |
1622 | _SG_UNIFORMLAYOUT_FORCE_U32 = 0x7FFFFFFF |
1623 | } sg_uniform_layout; |
1624 | |
1625 | /* |
1626 | sg_cull_mode |
1627 | |
1628 | The face-culling mode, this is used in the |
1629 | sg_pipeline_desc.cull_mode member when creating a |
1630 | pipeline object. |
1631 | |
1632 | The default cull mode is SG_CULLMODE_NONE |
1633 | */ |
1634 | typedef enum sg_cull_mode { |
1635 | _SG_CULLMODE_DEFAULT, /* value 0 reserved for default-init */ |
1636 | SG_CULLMODE_NONE, |
1637 | SG_CULLMODE_FRONT, |
1638 | SG_CULLMODE_BACK, |
1639 | _SG_CULLMODE_NUM, |
1640 | _SG_CULLMODE_FORCE_U32 = 0x7FFFFFFF |
1641 | } sg_cull_mode; |
1642 | |
1643 | /* |
1644 | sg_face_winding |
1645 | |
1646 | The vertex-winding rule that determines a front-facing primitive. This |
1647 | is used in the member sg_pipeline_desc.face_winding |
1648 | when creating a pipeline object. |
1649 | |
1650 | The default winding is SG_FACEWINDING_CW (clockwise) |
1651 | */ |
1652 | typedef enum sg_face_winding { |
1653 | _SG_FACEWINDING_DEFAULT, /* value 0 reserved for default-init */ |
1654 | SG_FACEWINDING_CCW, |
1655 | SG_FACEWINDING_CW, |
1656 | _SG_FACEWINDING_NUM, |
1657 | _SG_FACEWINDING_FORCE_U32 = 0x7FFFFFFF |
1658 | } sg_face_winding; |
1659 | |
1660 | /* |
1661 | sg_compare_func |
1662 | |
1663 | The compare-function for depth- and stencil-ref tests. |
1664 | This is used when creating pipeline objects in the members: |
1665 | |
1666 | sg_pipeline_desc |
1667 | .depth |
1668 | .compare |
1669 | .stencil |
1670 | .front.compare |
1671 | .back.compar |
1672 | |
1673 | The default compare func for depth- and stencil-tests is |
1674 | SG_COMPAREFUNC_ALWAYS. |
1675 | */ |
1676 | typedef enum sg_compare_func { |
1677 | _SG_COMPAREFUNC_DEFAULT, /* value 0 reserved for default-init */ |
1678 | SG_COMPAREFUNC_NEVER, |
1679 | SG_COMPAREFUNC_LESS, |
1680 | SG_COMPAREFUNC_EQUAL, |
1681 | SG_COMPAREFUNC_LESS_EQUAL, |
1682 | SG_COMPAREFUNC_GREATER, |
1683 | SG_COMPAREFUNC_NOT_EQUAL, |
1684 | SG_COMPAREFUNC_GREATER_EQUAL, |
1685 | SG_COMPAREFUNC_ALWAYS, |
1686 | _SG_COMPAREFUNC_NUM, |
1687 | _SG_COMPAREFUNC_FORCE_U32 = 0x7FFFFFFF |
1688 | } sg_compare_func; |
1689 | |
1690 | /* |
1691 | sg_stencil_op |
1692 | |
1693 | The operation performed on a currently stored stencil-value when a |
1694 | comparison test passes or fails. This is used when creating a pipeline |
1695 | object in the members: |
1696 | |
1697 | sg_pipeline_desc |
1698 | .stencil |
1699 | .front |
1700 | .fail_op |
1701 | .depth_fail_op |
1702 | .pass_op |
1703 | .back |
1704 | .fail_op |
1705 | .depth_fail_op |
1706 | .pass_op |
1707 | |
1708 | The default value is SG_STENCILOP_KEEP. |
1709 | */ |
1710 | typedef enum sg_stencil_op { |
1711 | _SG_STENCILOP_DEFAULT, /* value 0 reserved for default-init */ |
1712 | SG_STENCILOP_KEEP, |
1713 | SG_STENCILOP_ZERO, |
1714 | SG_STENCILOP_REPLACE, |
1715 | SG_STENCILOP_INCR_CLAMP, |
1716 | SG_STENCILOP_DECR_CLAMP, |
1717 | SG_STENCILOP_INVERT, |
1718 | SG_STENCILOP_INCR_WRAP, |
1719 | SG_STENCILOP_DECR_WRAP, |
1720 | _SG_STENCILOP_NUM, |
1721 | _SG_STENCILOP_FORCE_U32 = 0x7FFFFFFF |
1722 | } sg_stencil_op; |
1723 | |
1724 | /* |
1725 | sg_blend_factor |
1726 | |
1727 | The source and destination factors in blending operations. |
1728 | This is used in the following members when creating a pipeline object: |
1729 | |
1730 | sg_pipeline_desc |
1731 | .colors[i] |
1732 | .blend |
1733 | .src_factor_rgb |
1734 | .dst_factor_rgb |
1735 | .src_factor_alpha |
1736 | .dst_factor_alpha |
1737 | |
1738 | The default value is SG_BLENDFACTOR_ONE for source |
1739 | factors, and SG_BLENDFACTOR_ZERO for destination factors. |
1740 | */ |
1741 | typedef enum sg_blend_factor { |
1742 | _SG_BLENDFACTOR_DEFAULT, /* value 0 reserved for default-init */ |
1743 | SG_BLENDFACTOR_ZERO, |
1744 | SG_BLENDFACTOR_ONE, |
1745 | SG_BLENDFACTOR_SRC_COLOR, |
1746 | SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR, |
1747 | SG_BLENDFACTOR_SRC_ALPHA, |
1748 | SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA, |
1749 | SG_BLENDFACTOR_DST_COLOR, |
1750 | SG_BLENDFACTOR_ONE_MINUS_DST_COLOR, |
1751 | SG_BLENDFACTOR_DST_ALPHA, |
1752 | SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA, |
1753 | SG_BLENDFACTOR_SRC_ALPHA_SATURATED, |
1754 | SG_BLENDFACTOR_BLEND_COLOR, |
1755 | SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR, |
1756 | SG_BLENDFACTOR_BLEND_ALPHA, |
1757 | SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA, |
1758 | _SG_BLENDFACTOR_NUM, |
1759 | _SG_BLENDFACTOR_FORCE_U32 = 0x7FFFFFFF |
1760 | } sg_blend_factor; |
1761 | |
1762 | /* |
1763 | sg_blend_op |
1764 | |
1765 | Describes how the source and destination values are combined in the |
1766 | fragment blending operation. It is used in the following members when |
1767 | creating a pipeline object: |
1768 | |
1769 | sg_pipeline_desc |
1770 | .colors[i] |
1771 | .blend |
1772 | .op_rgb |
1773 | .op_alpha |
1774 | |
1775 | The default value is SG_BLENDOP_ADD. |
1776 | */ |
1777 | typedef enum sg_blend_op { |
1778 | _SG_BLENDOP_DEFAULT, /* value 0 reserved for default-init */ |
1779 | SG_BLENDOP_ADD, |
1780 | SG_BLENDOP_SUBTRACT, |
1781 | SG_BLENDOP_REVERSE_SUBTRACT, |
1782 | _SG_BLENDOP_NUM, |
1783 | _SG_BLENDOP_FORCE_U32 = 0x7FFFFFFF |
1784 | } sg_blend_op; |
1785 | |
1786 | /* |
1787 | sg_color_mask |
1788 | |
1789 | Selects the active color channels when writing a fragment color to the |
1790 | framebuffer. This is used in the members |
1791 | sg_pipeline_desc.colors[i].write_mask when creating a pipeline object. |
1792 | |
1793 | The default colormask is SG_COLORMASK_RGBA (write all colors channels) |
1794 | |
1795 | NOTE: since the color mask value 0 is reserved for the default value |
1796 | (SG_COLORMASK_RGBA), use SG_COLORMASK_NONE if all color channels |
1797 | should be disabled. |
1798 | */ |
1799 | typedef enum sg_color_mask { |
1800 | _SG_COLORMASK_DEFAULT = 0, /* value 0 reserved for default-init */ |
1801 | SG_COLORMASK_NONE = 0x10, /* special value for 'all channels disabled */ |
1802 | SG_COLORMASK_R = 0x1, |
1803 | SG_COLORMASK_G = 0x2, |
1804 | SG_COLORMASK_RG = 0x3, |
1805 | SG_COLORMASK_B = 0x4, |
1806 | SG_COLORMASK_RB = 0x5, |
1807 | SG_COLORMASK_GB = 0x6, |
1808 | SG_COLORMASK_RGB = 0x7, |
1809 | SG_COLORMASK_A = 0x8, |
1810 | SG_COLORMASK_RA = 0x9, |
1811 | SG_COLORMASK_GA = 0xA, |
1812 | SG_COLORMASK_RGA = 0xB, |
1813 | SG_COLORMASK_BA = 0xC, |
1814 | SG_COLORMASK_RBA = 0xD, |
1815 | SG_COLORMASK_GBA = 0xE, |
1816 | SG_COLORMASK_RGBA = 0xF, |
1817 | _SG_COLORMASK_FORCE_U32 = 0x7FFFFFFF |
1818 | } sg_color_mask; |
1819 | |
1820 | /* |
1821 | sg_action |
1822 | |
1823 | Defines what action should be performed at the start of a render pass: |
1824 | |
1825 | SG_ACTION_CLEAR: clear the render target image |
1826 | SG_ACTION_LOAD: load the previous content of the render target image |
1827 | SG_ACTION_DONTCARE: leave the render target image content undefined |
1828 | |
1829 | This is used in the sg_pass_action structure. |
1830 | |
1831 | The default action for all pass attachments is SG_ACTION_CLEAR, with the |
1832 | clear color rgba = {0.5f, 0.5f, 0.5f, 1.0f], depth=1.0 and stencil=0. |
1833 | |
1834 | If you want to override the default behaviour, it is important to not |
1835 | only set the clear color, but the 'action' field as well (as long as this |
1836 | is in its _SG_ACTION_DEFAULT, the value fields will be ignored). |
1837 | */ |
1838 | typedef enum sg_action { |
1839 | _SG_ACTION_DEFAULT, |
1840 | SG_ACTION_CLEAR, |
1841 | SG_ACTION_LOAD, |
1842 | SG_ACTION_DONTCARE, |
1843 | _SG_ACTION_NUM, |
1844 | _SG_ACTION_FORCE_U32 = 0x7FFFFFFF |
1845 | } sg_action; |
1846 | |
1847 | /* |
1848 | sg_pass_action |
1849 | |
1850 | The sg_pass_action struct defines the actions to be performed |
1851 | at the start of a rendering pass in the functions sg_begin_pass() |
1852 | and sg_begin_default_pass(). |
1853 | |
1854 | A separate action and clear values can be defined for each |
1855 | color attachment, and for the depth-stencil attachment. |
1856 | |
1857 | The default clear values are defined by the macros: |
1858 | |
1859 | - SG_DEFAULT_CLEAR_RED: 0.5f |
1860 | - SG_DEFAULT_CLEAR_GREEN: 0.5f |
1861 | - SG_DEFAULT_CLEAR_BLUE: 0.5f |
1862 | - SG_DEFAULT_CLEAR_ALPHA: 1.0f |
1863 | - SG_DEFAULT_CLEAR_DEPTH: 1.0f |
1864 | - SG_DEFAULT_CLEAR_STENCIL: 0 |
1865 | */ |
1866 | typedef struct sg_color_attachment_action { |
1867 | sg_action action; |
1868 | sg_color value; |
1869 | } sg_color_attachment_action; |
1870 | |
1871 | typedef struct sg_depth_attachment_action { |
1872 | sg_action action; |
1873 | float value; |
1874 | } sg_depth_attachment_action; |
1875 | |
1876 | typedef struct sg_stencil_attachment_action { |
1877 | sg_action action; |
1878 | uint8_t value; |
1879 | } sg_stencil_attachment_action; |
1880 | |
1881 | typedef struct sg_pass_action { |
1882 | uint32_t _start_canary; |
1883 | sg_color_attachment_action colors[SG_MAX_COLOR_ATTACHMENTS]; |
1884 | sg_depth_attachment_action depth; |
1885 | sg_stencil_attachment_action stencil; |
1886 | uint32_t _end_canary; |
1887 | } sg_pass_action; |
1888 | |
1889 | /* |
1890 | sg_bindings |
1891 | |
1892 | The sg_bindings structure defines the resource binding slots |
1893 | of the sokol_gfx render pipeline, used as argument to the |
1894 | sg_apply_bindings() function. |
1895 | |
1896 | A resource binding struct contains: |
1897 | |
1898 | - 1..N vertex buffers |
1899 | - 0..N vertex buffer offsets |
1900 | - 0..1 index buffers |
1901 | - 0..1 index buffer offsets |
1902 | - 0..N vertex shader stage images |
1903 | - 0..N fragment shader stage images |
1904 | |
1905 | The max number of vertex buffer and shader stage images |
1906 | are defined by the SG_MAX_SHADERSTAGE_BUFFERS and |
1907 | SG_MAX_SHADERSTAGE_IMAGES configuration constants. |
1908 | |
1909 | The optional buffer offsets can be used to put different unrelated |
1910 | chunks of vertex- and/or index-data into the same buffer objects. |
1911 | */ |
1912 | typedef struct sg_bindings { |
1913 | uint32_t _start_canary; |
1914 | sg_buffer vertex_buffers[SG_MAX_SHADERSTAGE_BUFFERS]; |
1915 | int vertex_buffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
1916 | sg_buffer index_buffer; |
1917 | int index_buffer_offset; |
1918 | sg_image vs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
1919 | sg_image fs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
1920 | uint32_t _end_canary; |
1921 | } sg_bindings; |
1922 | |
1923 | /* |
1924 | sg_buffer_desc |
1925 | |
1926 | Creation parameters for sg_buffer objects, used in the |
1927 | sg_make_buffer() call. |
1928 | |
1929 | The default configuration is: |
1930 | |
1931 | .size: 0 (*must* be >0 for buffers without data) |
1932 | .type: SG_BUFFERTYPE_VERTEXBUFFER |
1933 | .usage: SG_USAGE_IMMUTABLE |
1934 | .data.ptr 0 (*must* be valid for immutable buffers) |
1935 | .data.size 0 (*must* be > 0 for immutable buffers) |
1936 | .label 0 (optional string label for trace hooks) |
1937 | |
1938 | The label will be ignored by sokol_gfx.h, it is only useful |
1939 | when hooking into sg_make_buffer() or sg_init_buffer() via |
1940 | the sg_install_trace_hooks() function. |
1941 | |
1942 | For immutable buffers which are initialized with initial data, |
1943 | keep the .size item zero-initialized, and set the size together with the |
1944 | pointer to the initial data in the .data item. |
1945 | |
1946 | For mutable buffers without initial data, keep the .data item |
1947 | zero-initialized, and set the buffer size in the .size item instead. |
1948 | |
1949 | You can also set both size values, but currently both size values must |
1950 | be identical (this may change in the future when the dynamic resource |
1951 | management may become more flexible). |
1952 | |
1953 | ADVANCED TOPIC: Injecting native 3D-API buffers: |
1954 | |
1955 | The following struct members allow to inject your own GL, Metal |
1956 | or D3D11 buffers into sokol_gfx: |
1957 | |
1958 | .gl_buffers[SG_NUM_INFLIGHT_FRAMES] |
1959 | .mtl_buffers[SG_NUM_INFLIGHT_FRAMES] |
1960 | .d3d11_buffer |
1961 | |
1962 | You must still provide all other struct items except the .data item, and |
1963 | these must match the creation parameters of the native buffers you |
1964 | provide. For SG_USAGE_IMMUTABLE, only provide a single native 3D-API |
1965 | buffer, otherwise you need to provide SG_NUM_INFLIGHT_FRAMES buffers |
1966 | (only for GL and Metal, not D3D11). Providing multiple buffers for GL and |
1967 | Metal is necessary because sokol_gfx will rotate through them when |
1968 | calling sg_update_buffer() to prevent lock-stalls. |
1969 | |
1970 | Note that it is expected that immutable injected buffer have already been |
1971 | initialized with content, and the .content member must be 0! |
1972 | |
1973 | Also you need to call sg_reset_state_cache() after calling native 3D-API |
1974 | functions, and before calling any sokol_gfx function. |
1975 | */ |
1976 | typedef struct sg_buffer_desc { |
1977 | uint32_t _start_canary; |
1978 | size_t size; |
1979 | sg_buffer_type type; |
1980 | sg_usage usage; |
1981 | sg_range data; |
1982 | const char* label; |
1983 | /* GL specific */ |
1984 | uint32_t gl_buffers[SG_NUM_INFLIGHT_FRAMES]; |
1985 | /* Metal specific */ |
1986 | const void* mtl_buffers[SG_NUM_INFLIGHT_FRAMES]; |
1987 | /* D3D11 specific */ |
1988 | const void* d3d11_buffer; |
1989 | /* WebGPU specific */ |
1990 | const void* wgpu_buffer; |
1991 | uint32_t _end_canary; |
1992 | } sg_buffer_desc; |
1993 | |
1994 | /* |
1995 | sg_image_data |
1996 | |
1997 | Defines the content of an image through a 2D array of sg_range structs. |
1998 | The first array dimension is the cubemap face, and the second array |
1999 | dimension the mipmap level. |
2000 | */ |
2001 | typedef struct sg_image_data { |
2002 | sg_range subimage[SG_CUBEFACE_NUM][SG_MAX_MIPMAPS]; |
2003 | } sg_image_data; |
2004 | |
2005 | /* |
2006 | sg_image_desc |
2007 | |
2008 | Creation parameters for sg_image objects, used in the sg_make_image() |
2009 | call. |
2010 | |
2011 | The default configuration is: |
2012 | |
2013 | .type: SG_IMAGETYPE_2D |
2014 | .render_target: false |
2015 | .width 0 (must be set to >0) |
2016 | .height 0 (must be set to >0) |
2017 | .num_slices 1 (3D textures: depth; array textures: number of layers) |
2018 | .num_mipmaps: 1 |
2019 | .usage: SG_USAGE_IMMUTABLE |
2020 | .pixel_format: SG_PIXELFORMAT_RGBA8 for textures, or sg_desc.context.color_format for render targets |
2021 | .sample_count: 1 for textures, or sg_desc.context.sample_count for render targets |
2022 | .min_filter: SG_FILTER_NEAREST |
2023 | .mag_filter: SG_FILTER_NEAREST |
2024 | .wrap_u: SG_WRAP_REPEAT |
2025 | .wrap_v: SG_WRAP_REPEAT |
2026 | .wrap_w: SG_WRAP_REPEAT (only SG_IMAGETYPE_3D) |
2027 | .border_color SG_BORDERCOLOR_OPAQUE_BLACK |
2028 | .max_anisotropy 1 (must be 1..16) |
2029 | .min_lod 0.0f |
2030 | .max_lod FLT_MAX |
2031 | .data an sg_image_data struct to define the initial content |
2032 | .label 0 (optional string label for trace hooks) |
2033 | |
2034 | Q: Why is the default sample_count for render targets identical with the |
2035 | "default sample count" from sg_desc.context.sample_count? |
2036 | |
2037 | A: So that it matches the default sample count in pipeline objects. Even |
2038 | though it is a bit strange/confusing that offscreen render targets by default |
2039 | get the same sample count as the default framebuffer, but it's better that |
2040 | an offscreen render target created with default parameters matches |
2041 | a pipeline object created with default parameters. |
2042 | |
2043 | NOTE: |
2044 | |
2045 | SG_IMAGETYPE_ARRAY and SG_IMAGETYPE_3D are not supported on WebGL/GLES2, |
2046 | use sg_query_features().imagetype_array and |
2047 | sg_query_features().imagetype_3d at runtime to check if array- and |
2048 | 3D-textures are supported. |
2049 | |
2050 | Images with usage SG_USAGE_IMMUTABLE must be fully initialized by |
2051 | providing a valid .data member which points to initialization data. |
2052 | |
2053 | ADVANCED TOPIC: Injecting native 3D-API textures: |
2054 | |
2055 | The following struct members allow to inject your own GL, Metal or D3D11 |
2056 | textures into sokol_gfx: |
2057 | |
2058 | .gl_textures[SG_NUM_INFLIGHT_FRAMES] |
2059 | .mtl_textures[SG_NUM_INFLIGHT_FRAMES] |
2060 | .d3d11_texture |
2061 | .d3d11_shader_resource_view |
2062 | |
2063 | For GL, you can also specify the texture target or leave it empty to use |
2064 | the default texture target for the image type (GL_TEXTURE_2D for |
2065 | SG_IMAGETYPE_2D etc) |
2066 | |
2067 | For D3D11, you can provide either a D3D11 texture, or a |
2068 | shader-resource-view, or both. If only a texture is provided, a matching |
2069 | shader-resource-view will be created. If only a shader-resource-view is |
2070 | provided, the texture will be looked up from the shader-resource-view. |
2071 | |
2072 | The same rules apply as for injecting native buffers (see sg_buffer_desc |
2073 | documentation for more details). |
2074 | */ |
2075 | typedef struct sg_image_desc { |
2076 | uint32_t _start_canary; |
2077 | sg_image_type type; |
2078 | bool render_target; |
2079 | int width; |
2080 | int height; |
2081 | int num_slices; |
2082 | int num_mipmaps; |
2083 | sg_usage usage; |
2084 | sg_pixel_format pixel_format; |
2085 | int sample_count; |
2086 | sg_filter min_filter; |
2087 | sg_filter mag_filter; |
2088 | sg_wrap wrap_u; |
2089 | sg_wrap wrap_v; |
2090 | sg_wrap wrap_w; |
2091 | sg_border_color border_color; |
2092 | uint32_t max_anisotropy; |
2093 | float min_lod; |
2094 | float max_lod; |
2095 | sg_image_data data; |
2096 | const char* label; |
2097 | /* GL specific */ |
2098 | uint32_t gl_textures[SG_NUM_INFLIGHT_FRAMES]; |
2099 | uint32_t gl_texture_target; |
2100 | /* Metal specific */ |
2101 | const void* mtl_textures[SG_NUM_INFLIGHT_FRAMES]; |
2102 | /* D3D11 specific */ |
2103 | const void* d3d11_texture; |
2104 | const void* d3d11_shader_resource_view; |
2105 | /* WebGPU specific */ |
2106 | const void* wgpu_texture; |
2107 | uint32_t _end_canary; |
2108 | } sg_image_desc; |
2109 | |
2110 | /* |
2111 | sg_shader_desc |
2112 | |
2113 | The structure sg_shader_desc defines all creation parameters for shader |
2114 | programs, used as input to the sg_make_shader() function: |
2115 | |
2116 | - reflection information for vertex attributes (vertex shader inputs): |
2117 | - vertex attribute name (required for GLES2, optional for GLES3 and GL) |
2118 | - a semantic name and index (required for D3D11) |
2119 | - for each shader-stage (vertex and fragment): |
2120 | - the shader source or bytecode |
2121 | - an optional entry function name |
2122 | - an optional compile target (only for D3D11 when source is provided, |
2123 | defaults are "vs_4_0" and "ps_4_0") |
2124 | - reflection info for each uniform block used by the shader stage: |
2125 | - the size of the uniform block in bytes |
2126 | - a memory layout hint (native vs std140, only required for GL backends) |
2127 | - reflection info for each uniform block member (only required for GL backends): |
2128 | - member name |
2129 | - member type (SG_UNIFORMTYPE_xxx) |
2130 | - if the member is an array, the number of array items |
2131 | - reflection info for the texture images used by the shader stage: |
2132 | - the image type (SG_IMAGETYPE_xxx) |
2133 | - the sampler type (SG_SAMPLERTYPE_xxx, default is SG_SAMPLERTYPE_FLOAT) |
2134 | - the name of the texture sampler (required for GLES2, optional everywhere else) |
2135 | |
2136 | For all GL backends, shader source-code must be provided. For D3D11 and Metal, |
2137 | either shader source-code or byte-code can be provided. |
2138 | |
2139 | For D3D11, if source code is provided, the d3dcompiler_47.dll will be loaded |
2140 | on demand. If this fails, shader creation will fail. When compiling HLSL |
2141 | source code, you can provide an optional target string via |
2142 | sg_shader_stage_desc.d3d11_target, the default target is "vs_4_0" for the |
2143 | vertex shader stage and "ps_4_0" for the pixel shader stage. |
2144 | */ |
2145 | typedef struct sg_shader_attr_desc { |
2146 | const char* name; // GLSL vertex attribute name (only strictly required for GLES2) |
2147 | const char* sem_name; // HLSL semantic name |
2148 | int sem_index; // HLSL semantic index |
2149 | } sg_shader_attr_desc; |
2150 | |
2151 | typedef struct sg_shader_uniform_desc { |
2152 | const char* name; |
2153 | sg_uniform_type type; |
2154 | int array_count; |
2155 | } sg_shader_uniform_desc; |
2156 | |
2157 | typedef struct sg_shader_uniform_block_desc { |
2158 | size_t size; |
2159 | sg_uniform_layout layout; |
2160 | sg_shader_uniform_desc uniforms[SG_MAX_UB_MEMBERS]; |
2161 | } sg_shader_uniform_block_desc; |
2162 | |
2163 | typedef struct sg_shader_image_desc { |
2164 | const char* name; |
2165 | sg_image_type image_type; |
2166 | sg_sampler_type sampler_type; |
2167 | } sg_shader_image_desc; |
2168 | |
2169 | typedef struct sg_shader_stage_desc { |
2170 | const char* source; |
2171 | sg_range bytecode; |
2172 | const char* entry; |
2173 | const char* d3d11_target; |
2174 | sg_shader_uniform_block_desc uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
2175 | sg_shader_image_desc images[SG_MAX_SHADERSTAGE_IMAGES]; |
2176 | } sg_shader_stage_desc; |
2177 | |
2178 | typedef struct sg_shader_desc { |
2179 | uint32_t _start_canary; |
2180 | sg_shader_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
2181 | sg_shader_stage_desc vs; |
2182 | sg_shader_stage_desc fs; |
2183 | const char* label; |
2184 | uint32_t _end_canary; |
2185 | } sg_shader_desc; |
2186 | |
2187 | /* |
2188 | sg_pipeline_desc |
2189 | |
2190 | The sg_pipeline_desc struct defines all creation parameters for an |
2191 | sg_pipeline object, used as argument to the sg_make_pipeline() function: |
2192 | |
2193 | - the vertex layout for all input vertex buffers |
2194 | - a shader object |
2195 | - the 3D primitive type (points, lines, triangles, ...) |
2196 | - the index type (none, 16- or 32-bit) |
2197 | - all the fixed-function-pipeline state (depth-, stencil-, blend-state, etc...) |
2198 | |
2199 | If the vertex data has no gaps between vertex components, you can omit |
2200 | the .layout.buffers[].stride and layout.attrs[].offset items (leave them |
2201 | default-initialized to 0), sokol-gfx will then compute the offsets and |
2202 | strides from the vertex component formats (.layout.attrs[].format). |
2203 | Please note that ALL vertex attribute offsets must be 0 in order for the |
2204 | automatic offset computation to kick in. |
2205 | |
2206 | The default configuration is as follows: |
2207 | |
2208 | .shader: 0 (must be initialized with a valid sg_shader id!) |
2209 | .layout: |
2210 | .buffers[]: vertex buffer layouts |
2211 | .stride: 0 (if no stride is given it will be computed) |
2212 | .step_func SG_VERTEXSTEP_PER_VERTEX |
2213 | .step_rate 1 |
2214 | .attrs[]: vertex attribute declarations |
2215 | .buffer_index 0 the vertex buffer bind slot |
2216 | .offset 0 (offsets can be omitted if the vertex layout has no gaps) |
2217 | .format SG_VERTEXFORMAT_INVALID (must be initialized!) |
2218 | .depth: |
2219 | .pixel_format: sg_desc.context.depth_format |
2220 | .compare: SG_COMPAREFUNC_ALWAYS |
2221 | .write_enabled: false |
2222 | .bias: 0.0f |
2223 | .bias_slope_scale: 0.0f |
2224 | .bias_clamp: 0.0f |
2225 | .stencil: |
2226 | .enabled: false |
2227 | .front/back: |
2228 | .compare: SG_COMPAREFUNC_ALWAYS |
2229 | .fail_op: SG_STENCILOP_KEEP |
2230 | .depth_fail_op: SG_STENCILOP_KEEP |
2231 | .pass_op: SG_STENCILOP_KEEP |
2232 | .read_mask: 0 |
2233 | .write_mask: 0 |
2234 | .ref: 0 |
2235 | .color_count 1 |
2236 | .colors[0..color_count] |
2237 | .pixel_format sg_desc.context.color_format |
2238 | .write_mask: SG_COLORMASK_RGBA |
2239 | .blend: |
2240 | .enabled: false |
2241 | .src_factor_rgb: SG_BLENDFACTOR_ONE |
2242 | .dst_factor_rgb: SG_BLENDFACTOR_ZERO |
2243 | .op_rgb: SG_BLENDOP_ADD |
2244 | .src_factor_alpha: SG_BLENDFACTOR_ONE |
2245 | .dst_factor_alpha: SG_BLENDFACTOR_ZERO |
2246 | .op_alpha: SG_BLENDOP_ADD |
2247 | .primitive_type: SG_PRIMITIVETYPE_TRIANGLES |
2248 | .index_type: SG_INDEXTYPE_NONE |
2249 | .cull_mode: SG_CULLMODE_NONE |
2250 | .face_winding: SG_FACEWINDING_CW |
2251 | .sample_count: sg_desc.context.sample_count |
2252 | .blend_color: (sg_color) { 0.0f, 0.0f, 0.0f, 0.0f } |
2253 | .alpha_to_coverage_enabled: false |
2254 | .label 0 (optional string label for trace hooks) |
2255 | */ |
2256 | typedef struct sg_buffer_layout_desc { |
2257 | int stride; |
2258 | sg_vertex_step step_func; |
2259 | int step_rate; |
2260 | #if defined(SOKOL_ZIG_BINDINGS) |
2261 | uint32_t __pad[2]; |
2262 | #endif |
2263 | } sg_buffer_layout_desc; |
2264 | |
2265 | typedef struct sg_vertex_attr_desc { |
2266 | int buffer_index; |
2267 | int offset; |
2268 | sg_vertex_format format; |
2269 | #if defined(SOKOL_ZIG_BINDINGS) |
2270 | uint32_t __pad[2]; |
2271 | #endif |
2272 | } sg_vertex_attr_desc; |
2273 | |
2274 | typedef struct sg_layout_desc { |
2275 | sg_buffer_layout_desc buffers[SG_MAX_SHADERSTAGE_BUFFERS]; |
2276 | sg_vertex_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
2277 | } sg_layout_desc; |
2278 | |
2279 | typedef struct sg_stencil_face_state { |
2280 | sg_compare_func compare; |
2281 | sg_stencil_op fail_op; |
2282 | sg_stencil_op depth_fail_op; |
2283 | sg_stencil_op pass_op; |
2284 | } sg_stencil_face_state; |
2285 | |
2286 | typedef struct sg_stencil_state { |
2287 | bool enabled; |
2288 | sg_stencil_face_state front; |
2289 | sg_stencil_face_state back; |
2290 | uint8_t read_mask; |
2291 | uint8_t write_mask; |
2292 | uint8_t ref; |
2293 | } sg_stencil_state; |
2294 | |
2295 | typedef struct sg_depth_state { |
2296 | sg_pixel_format pixel_format; |
2297 | sg_compare_func compare; |
2298 | bool write_enabled; |
2299 | float bias; |
2300 | float bias_slope_scale; |
2301 | float bias_clamp; |
2302 | } sg_depth_state; |
2303 | |
2304 | typedef struct sg_blend_state { |
2305 | bool enabled; |
2306 | sg_blend_factor src_factor_rgb; |
2307 | sg_blend_factor dst_factor_rgb; |
2308 | sg_blend_op op_rgb; |
2309 | sg_blend_factor src_factor_alpha; |
2310 | sg_blend_factor dst_factor_alpha; |
2311 | sg_blend_op op_alpha; |
2312 | } sg_blend_state; |
2313 | |
2314 | typedef struct sg_color_state { |
2315 | sg_pixel_format pixel_format; |
2316 | sg_color_mask write_mask; |
2317 | sg_blend_state blend; |
2318 | } sg_color_state; |
2319 | |
2320 | typedef struct sg_pipeline_desc { |
2321 | uint32_t _start_canary; |
2322 | sg_shader shader; |
2323 | sg_layout_desc layout; |
2324 | sg_depth_state depth; |
2325 | sg_stencil_state stencil; |
2326 | int color_count; |
2327 | sg_color_state colors[SG_MAX_COLOR_ATTACHMENTS]; |
2328 | sg_primitive_type primitive_type; |
2329 | sg_index_type index_type; |
2330 | sg_cull_mode cull_mode; |
2331 | sg_face_winding face_winding; |
2332 | int sample_count; |
2333 | sg_color blend_color; |
2334 | bool alpha_to_coverage_enabled; |
2335 | const char* label; |
2336 | uint32_t _end_canary; |
2337 | } sg_pipeline_desc; |
2338 | |
2339 | /* |
2340 | sg_pass_desc |
2341 | |
2342 | Creation parameters for an sg_pass object, used as argument |
2343 | to the sg_make_pass() function. |
2344 | |
2345 | A pass object contains 1..4 color-attachments and none, or one, |
2346 | depth-stencil-attachment. Each attachment consists of |
2347 | an image, and two additional indices describing |
2348 | which subimage the pass will render to: one mipmap index, and |
2349 | if the image is a cubemap, array-texture or 3D-texture, the |
2350 | face-index, array-layer or depth-slice. |
2351 | |
2352 | Pass images must fulfill the following requirements: |
2353 | |
2354 | All images must have: |
2355 | - been created as render target (sg_image_desc.render_target = true) |
2356 | - the same size |
2357 | - the same sample count |
2358 | |
2359 | In addition, all color-attachment images must have the same pixel format. |
2360 | */ |
2361 | typedef struct sg_pass_attachment_desc { |
2362 | sg_image image; |
2363 | int mip_level; |
2364 | int slice; /* cube texture: face; array texture: layer; 3D texture: slice */ |
2365 | } sg_pass_attachment_desc; |
2366 | |
2367 | typedef struct sg_pass_desc { |
2368 | uint32_t _start_canary; |
2369 | sg_pass_attachment_desc color_attachments[SG_MAX_COLOR_ATTACHMENTS]; |
2370 | sg_pass_attachment_desc depth_stencil_attachment; |
2371 | const char* label; |
2372 | uint32_t _end_canary; |
2373 | } sg_pass_desc; |
2374 | |
2375 | /* |
2376 | sg_trace_hooks |
2377 | |
2378 | Installable callback functions to keep track of the sokol-gfx calls, |
2379 | this is useful for debugging, or keeping track of resource creation |
2380 | and destruction. |
2381 | |
2382 | Trace hooks are installed with sg_install_trace_hooks(), this returns |
2383 | another sg_trace_hooks struct with the previous set of |
2384 | trace hook function pointers. These should be invoked by the |
2385 | new trace hooks to form a proper call chain. |
2386 | */ |
2387 | typedef struct sg_trace_hooks { |
2388 | void* user_data; |
2389 | void (*reset_state_cache)(void* user_data); |
2390 | void (*make_buffer)(const sg_buffer_desc* desc, sg_buffer result, void* user_data); |
2391 | void (*make_image)(const sg_image_desc* desc, sg_image result, void* user_data); |
2392 | void (*make_shader)(const sg_shader_desc* desc, sg_shader result, void* user_data); |
2393 | void (*make_pipeline)(const sg_pipeline_desc* desc, sg_pipeline result, void* user_data); |
2394 | void (*make_pass)(const sg_pass_desc* desc, sg_pass result, void* user_data); |
2395 | void (*destroy_buffer)(sg_buffer buf, void* user_data); |
2396 | void (*destroy_image)(sg_image img, void* user_data); |
2397 | void (*destroy_shader)(sg_shader shd, void* user_data); |
2398 | void (*destroy_pipeline)(sg_pipeline pip, void* user_data); |
2399 | void (*destroy_pass)(sg_pass pass, void* user_data); |
2400 | void (*update_buffer)(sg_buffer buf, const sg_range* data, void* user_data); |
2401 | void (*update_image)(sg_image img, const sg_image_data* data, void* user_data); |
2402 | void (*append_buffer)(sg_buffer buf, const sg_range* data, int result, void* user_data); |
2403 | void (*begin_default_pass)(const sg_pass_action* pass_action, int width, int height, void* user_data); |
2404 | void (*begin_pass)(sg_pass pass, const sg_pass_action* pass_action, void* user_data); |
2405 | void (*apply_viewport)(int x, int y, int width, int height, bool origin_top_left, void* user_data); |
2406 | void (*apply_scissor_rect)(int x, int y, int width, int height, bool origin_top_left, void* user_data); |
2407 | void (*apply_pipeline)(sg_pipeline pip, void* user_data); |
2408 | void (*apply_bindings)(const sg_bindings* bindings, void* user_data); |
2409 | void (*apply_uniforms)(sg_shader_stage stage, int ub_index, const sg_range* data, void* user_data); |
2410 | void (*draw)(int base_element, int num_elements, int num_instances, void* user_data); |
2411 | void (*end_pass)(void* user_data); |
2412 | void (*commit)(void* user_data); |
2413 | void (*alloc_buffer)(sg_buffer result, void* user_data); |
2414 | void (*alloc_image)(sg_image result, void* user_data); |
2415 | void (*alloc_shader)(sg_shader result, void* user_data); |
2416 | void (*alloc_pipeline)(sg_pipeline result, void* user_data); |
2417 | void (*alloc_pass)(sg_pass result, void* user_data); |
2418 | void (*dealloc_buffer)(sg_buffer buf_id, void* user_data); |
2419 | void (*dealloc_image)(sg_image img_id, void* user_data); |
2420 | void (*dealloc_shader)(sg_shader shd_id, void* user_data); |
2421 | void (*dealloc_pipeline)(sg_pipeline pip_id, void* user_data); |
2422 | void (*dealloc_pass)(sg_pass pass_id, void* user_data); |
2423 | void (*init_buffer)(sg_buffer buf_id, const sg_buffer_desc* desc, void* user_data); |
2424 | void (*init_image)(sg_image img_id, const sg_image_desc* desc, void* user_data); |
2425 | void (*init_shader)(sg_shader shd_id, const sg_shader_desc* desc, void* user_data); |
2426 | void (*init_pipeline)(sg_pipeline pip_id, const sg_pipeline_desc* desc, void* user_data); |
2427 | void (*init_pass)(sg_pass pass_id, const sg_pass_desc* desc, void* user_data); |
2428 | void (*uninit_buffer)(sg_buffer buf_id, void* user_data); |
2429 | void (*uninit_image)(sg_image img_id, void* user_data); |
2430 | void (*uninit_shader)(sg_shader shd_id, void* user_data); |
2431 | void (*uninit_pipeline)(sg_pipeline pip_id, void* user_data); |
2432 | void (*uninit_pass)(sg_pass pass_id, void* user_data); |
2433 | void (*fail_buffer)(sg_buffer buf_id, void* user_data); |
2434 | void (*fail_image)(sg_image img_id, void* user_data); |
2435 | void (*fail_shader)(sg_shader shd_id, void* user_data); |
2436 | void (*fail_pipeline)(sg_pipeline pip_id, void* user_data); |
2437 | void (*fail_pass)(sg_pass pass_id, void* user_data); |
2438 | void (*push_debug_group)(const char* name, void* user_data); |
2439 | void (*pop_debug_group)(void* user_data); |
2440 | void (*err_buffer_pool_exhausted)(void* user_data); |
2441 | void (*err_image_pool_exhausted)(void* user_data); |
2442 | void (*err_shader_pool_exhausted)(void* user_data); |
2443 | void (*err_pipeline_pool_exhausted)(void* user_data); |
2444 | void (*err_pass_pool_exhausted)(void* user_data); |
2445 | void (*err_context_mismatch)(void* user_data); |
2446 | void (*err_pass_invalid)(void* user_data); |
2447 | void (*err_draw_invalid)(void* user_data); |
2448 | void (*err_bindings_invalid)(void* user_data); |
2449 | } sg_trace_hooks; |
2450 | |
2451 | /* |
2452 | sg_buffer_info |
2453 | sg_image_info |
2454 | sg_shader_info |
2455 | sg_pipeline_info |
2456 | sg_pass_info |
2457 | |
2458 | These structs contain various internal resource attributes which |
2459 | might be useful for debug-inspection. Please don't rely on the |
2460 | actual content of those structs too much, as they are quite closely |
2461 | tied to sokol_gfx.h internals and may change more frequently than |
2462 | the other public API elements. |
2463 | |
2464 | The *_info structs are used as the return values of the following functions: |
2465 | |
2466 | sg_query_buffer_info() |
2467 | sg_query_image_info() |
2468 | sg_query_shader_info() |
2469 | sg_query_pipeline_info() |
2470 | sg_query_pass_info() |
2471 | */ |
2472 | typedef struct sg_slot_info { |
2473 | sg_resource_state state; /* the current state of this resource slot */ |
2474 | uint32_t res_id; /* type-neutral resource if (e.g. sg_buffer.id) */ |
2475 | uint32_t ctx_id; /* the context this resource belongs to */ |
2476 | } sg_slot_info; |
2477 | |
2478 | typedef struct sg_buffer_info { |
2479 | sg_slot_info slot; /* resource pool slot info */ |
2480 | uint32_t update_frame_index; /* frame index of last sg_update_buffer() */ |
2481 | uint32_t append_frame_index; /* frame index of last sg_append_buffer() */ |
2482 | int append_pos; /* current position in buffer for sg_append_buffer() */ |
2483 | bool append_overflow; /* is buffer in overflow state (due to sg_append_buffer) */ |
2484 | int num_slots; /* number of renaming-slots for dynamically updated buffers */ |
2485 | int active_slot; /* currently active write-slot for dynamically updated buffers */ |
2486 | } sg_buffer_info; |
2487 | |
2488 | typedef struct sg_image_info { |
2489 | sg_slot_info slot; /* resource pool slot info */ |
2490 | uint32_t upd_frame_index; /* frame index of last sg_update_image() */ |
2491 | int num_slots; /* number of renaming-slots for dynamically updated images */ |
2492 | int active_slot; /* currently active write-slot for dynamically updated images */ |
2493 | int width; /* image width */ |
2494 | int height; /* image height */ |
2495 | } sg_image_info; |
2496 | |
2497 | typedef struct sg_shader_info { |
2498 | sg_slot_info slot; /* resoure pool slot info */ |
2499 | } sg_shader_info; |
2500 | |
2501 | typedef struct sg_pipeline_info { |
2502 | sg_slot_info slot; /* resource pool slot info */ |
2503 | } sg_pipeline_info; |
2504 | |
2505 | typedef struct sg_pass_info { |
2506 | sg_slot_info slot; /* resource pool slot info */ |
2507 | } sg_pass_info; |
2508 | |
2509 | /* |
2510 | sg_desc |
2511 | |
2512 | The sg_desc struct contains configuration values for sokol_gfx, |
2513 | it is used as parameter to the sg_setup() call. |
2514 | |
2515 | NOTE that all callback function pointers come in two versions, one without |
2516 | a userdata pointer, and one with a userdata pointer. You would |
2517 | either initialize one or the other depending on whether you pass data |
2518 | to your callbacks. |
2519 | |
2520 | FIXME: explain the various configuration options |
2521 | |
2522 | The default configuration is: |
2523 | |
2524 | .buffer_pool_size 128 |
2525 | .image_pool_size 128 |
2526 | .shader_pool_size 32 |
2527 | .pipeline_pool_size 64 |
2528 | .pass_pool_size 16 |
2529 | .context_pool_size 16 |
2530 | .uniform_buffer_size 4 MB (4*1024*1024) |
2531 | .staging_buffer_size 8 MB (8*1024*1024) |
2532 | .sampler_cache_size 64 |
2533 | .max_commit_listeners 1024 |
2534 | .disable_validation false |
2535 | |
2536 | .allocator.alloc 0 (in this case, malloc() will be called) |
2537 | .allocator.free 0 (in this case, free() will be called) |
2538 | .allocator.user_data 0 |
2539 | |
2540 | .context.color_format: default value depends on selected backend: |
2541 | all GL backends: SG_PIXELFORMAT_RGBA8 |
2542 | Metal and D3D11: SG_PIXELFORMAT_BGRA8 |
2543 | WGPU: *no default* (must be queried from WGPU swapchain) |
2544 | .context.depth_format SG_PIXELFORMAT_DEPTH_STENCIL |
2545 | .context.sample_count 1 |
2546 | |
2547 | GL specific: |
2548 | .context.gl.force_gles2 |
2549 | if this is true the GL backend will act in "GLES2 fallback mode" even |
2550 | when compiled with SOKOL_GLES3, this is useful to fall back |
2551 | to traditional WebGL if a browser doesn't support a WebGL2 context |
2552 | |
2553 | Metal specific: |
2554 | (NOTE: All Objective-C object references are transferred through |
2555 | a bridged (const void*) to sokol_gfx, which will use a unretained |
2556 | bridged cast (__bridged id<xxx>) to retrieve the Objective-C |
2557 | references back. Since the bridge cast is unretained, the caller |
2558 | must hold a strong reference to the Objective-C object for the |
2559 | duration of the sokol_gfx call! |
2560 | |
2561 | .context.metal.device |
2562 | a pointer to the MTLDevice object |
2563 | .context.metal.renderpass_descriptor_cb |
2564 | .context.metal_renderpass_descriptor_userdata_cb |
2565 | A C callback function to obtain the MTLRenderPassDescriptor for the |
2566 | current frame when rendering to the default framebuffer, will be called |
2567 | in sg_begin_default_pass(). |
2568 | .context.metal.drawable_cb |
2569 | .context.metal.drawable_userdata_cb |
2570 | a C callback function to obtain a MTLDrawable for the current |
2571 | frame when rendering to the default framebuffer, will be called in |
2572 | sg_end_pass() of the default pass |
2573 | .context.metal.user_data |
2574 | optional user data pointer passed to the userdata versions of |
2575 | callback functions |
2576 | |
2577 | D3D11 specific: |
2578 | .context.d3d11.device |
2579 | a pointer to the ID3D11Device object, this must have been created |
2580 | before sg_setup() is called |
2581 | .context.d3d11.device_context |
2582 | a pointer to the ID3D11DeviceContext object |
2583 | .context.d3d11.render_target_view_cb |
2584 | .context.d3d11.render_target_view_userdata_cb |
2585 | a C callback function to obtain a pointer to the current |
2586 | ID3D11RenderTargetView object of the default framebuffer, |
2587 | this function will be called in sg_begin_pass() when rendering |
2588 | to the default framebuffer |
2589 | .context.d3d11.depth_stencil_view_cb |
2590 | .context.d3d11.depth_stencil_view_userdata_cb |
2591 | a C callback function to obtain a pointer to the current |
2592 | ID3D11DepthStencilView object of the default framebuffer, |
2593 | this function will be called in sg_begin_pass() when rendering |
2594 | to the default framebuffer |
2595 | .context.metal.user_data |
2596 | optional user data pointer passed to the userdata versions of |
2597 | callback functions |
2598 | |
2599 | WebGPU specific: |
2600 | .context.wgpu.device |
2601 | a WGPUDevice handle |
2602 | .context.wgpu.render_format |
2603 | WGPUTextureFormat of the swap chain surface |
2604 | .context.wgpu.render_view_cb |
2605 | .context.wgpu.render_view_userdata_cb |
2606 | callback to get the current WGPUTextureView of the swapchain's |
2607 | rendering attachment (may be an MSAA surface) |
2608 | .context.wgpu.resolve_view_cb |
2609 | .context.wgpu.resolve_view_userdata_cb |
2610 | callback to get the current WGPUTextureView of the swapchain's |
2611 | MSAA-resolve-target surface, must return 0 if not MSAA rendering |
2612 | .context.wgpu.depth_stencil_view_cb |
2613 | .context.wgpu.depth_stencil_view_userdata_cb |
2614 | callback to get current default-pass depth-stencil-surface WGPUTextureView |
2615 | the pixel format of the default WGPUTextureView must be WGPUTextureFormat_Depth24Plus8 |
2616 | .context.metal.user_data |
2617 | optional user data pointer passed to the userdata versions of |
2618 | callback functions |
2619 | |
2620 | When using sokol_gfx.h and sokol_app.h together, consider using the |
2621 | helper function sapp_sgcontext() in the sokol_glue.h header to |
2622 | initialize the sg_desc.context nested struct. sapp_sgcontext() returns |
2623 | a completely initialized sg_context_desc struct with information |
2624 | provided by sokol_app.h. |
2625 | */ |
2626 | typedef struct sg_gl_context_desc { |
2627 | bool force_gles2; |
2628 | } sg_gl_context_desc; |
2629 | |
2630 | typedef struct sg_metal_context_desc { |
2631 | const void* device; |
2632 | const void* (*renderpass_descriptor_cb)(void); |
2633 | const void* (*renderpass_descriptor_userdata_cb)(void*); |
2634 | const void* (*drawable_cb)(void); |
2635 | const void* (*drawable_userdata_cb)(void*); |
2636 | void* user_data; |
2637 | } sg_metal_context_desc; |
2638 | |
2639 | typedef struct sg_d3d11_context_desc { |
2640 | const void* device; |
2641 | const void* device_context; |
2642 | const void* (*render_target_view_cb)(void); |
2643 | const void* (*render_target_view_userdata_cb)(void*); |
2644 | const void* (*depth_stencil_view_cb)(void); |
2645 | const void* (*depth_stencil_view_userdata_cb)(void*); |
2646 | void* user_data; |
2647 | } sg_d3d11_context_desc; |
2648 | |
2649 | typedef struct sg_wgpu_context_desc { |
2650 | const void* device; /* WGPUDevice */ |
2651 | const void* (*render_view_cb)(void); /* returns WGPUTextureView */ |
2652 | const void* (*render_view_userdata_cb)(void*); |
2653 | const void* (*resolve_view_cb)(void); /* returns WGPUTextureView */ |
2654 | const void* (*resolve_view_userdata_cb)(void*); |
2655 | const void* (*depth_stencil_view_cb)(void); /* returns WGPUTextureView, must be WGPUTextureFormat_Depth24Plus8 */ |
2656 | const void* (*depth_stencil_view_userdata_cb)(void*); |
2657 | void* user_data; |
2658 | } sg_wgpu_context_desc; |
2659 | |
2660 | typedef struct sg_context_desc { |
2661 | sg_pixel_format color_format; |
2662 | sg_pixel_format depth_format; |
2663 | int sample_count; |
2664 | sg_gl_context_desc gl; |
2665 | sg_metal_context_desc metal; |
2666 | sg_d3d11_context_desc d3d11; |
2667 | sg_wgpu_context_desc wgpu; |
2668 | } sg_context_desc; |
2669 | |
2670 | /* |
2671 | sg_commit_listener |
2672 | |
2673 | Used with function sg_add_commit_listener() to add a callback |
2674 | which will be called in sg_commit(). This is useful for libraries |
2675 | building on top of sokol-gfx to be notified about when a frame |
2676 | ends (instead of having to guess, or add a manual 'new-frame' |
2677 | function. |
2678 | */ |
2679 | typedef struct sg_commit_listener { |
2680 | void (*func)(void* user_data); |
2681 | void* user_data; |
2682 | } sg_commit_listener; |
2683 | |
2684 | /* |
2685 | sg_allocator |
2686 | |
2687 | Used in sg_desc to provide custom memory-alloc and -free functions |
2688 | to sokol_gfx.h. If memory management should be overridden, both the |
2689 | alloc and free function must be provided (e.g. it's not valid to |
2690 | override one function but not the other). |
2691 | */ |
2692 | typedef struct sg_allocator { |
2693 | void* (*alloc)(size_t size, void* user_data); |
2694 | void (*free)(void* ptr, void* user_data); |
2695 | void* user_data; |
2696 | } sg_allocator; |
2697 | |
2698 | /* |
2699 | sg_logger |
2700 | |
2701 | Used in sg_desc to provide custom log callbacks to sokol_gfx.h. |
2702 | Default behavior is SOKOL_LOG(message). |
2703 | */ |
2704 | typedef struct sg_logger { |
2705 | void (*log_cb)(const char* message, void* user_data); |
2706 | void* user_data; |
2707 | } sg_logger; |
2708 | |
2709 | typedef struct sg_desc { |
2710 | uint32_t _start_canary; |
2711 | int buffer_pool_size; |
2712 | int image_pool_size; |
2713 | int shader_pool_size; |
2714 | int pipeline_pool_size; |
2715 | int pass_pool_size; |
2716 | int context_pool_size; |
2717 | int uniform_buffer_size; |
2718 | int staging_buffer_size; |
2719 | int sampler_cache_size; |
2720 | int max_commit_listeners; |
2721 | bool disable_validation; // disable validation layer even in debug mode, useful for tests |
2722 | sg_allocator allocator; |
2723 | sg_logger logger; // optional log function override |
2724 | sg_context_desc context; |
2725 | uint32_t _end_canary; |
2726 | } sg_desc; |
2727 | |
2728 | /* setup and misc functions */ |
2729 | SOKOL_GFX_API_DECL void sg_setup(const sg_desc* desc); |
2730 | SOKOL_GFX_API_DECL void sg_shutdown(void); |
2731 | SOKOL_GFX_API_DECL bool sg_isvalid(void); |
2732 | SOKOL_GFX_API_DECL void sg_reset_state_cache(void); |
2733 | SOKOL_GFX_API_DECL sg_trace_hooks sg_install_trace_hooks(const sg_trace_hooks* trace_hooks); |
2734 | SOKOL_GFX_API_DECL void sg_push_debug_group(const char* name); |
2735 | SOKOL_GFX_API_DECL void sg_pop_debug_group(void); |
2736 | SOKOL_GFX_API_DECL bool sg_add_commit_listener(sg_commit_listener listener); |
2737 | SOKOL_GFX_API_DECL bool sg_remove_commit_listener(sg_commit_listener listener); |
2738 | |
2739 | /* resource creation, destruction and updating */ |
2740 | SOKOL_GFX_API_DECL sg_buffer sg_make_buffer(const sg_buffer_desc* desc); |
2741 | SOKOL_GFX_API_DECL sg_image sg_make_image(const sg_image_desc* desc); |
2742 | SOKOL_GFX_API_DECL sg_shader sg_make_shader(const sg_shader_desc* desc); |
2743 | SOKOL_GFX_API_DECL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc); |
2744 | SOKOL_GFX_API_DECL sg_pass sg_make_pass(const sg_pass_desc* desc); |
2745 | SOKOL_GFX_API_DECL void sg_destroy_buffer(sg_buffer buf); |
2746 | SOKOL_GFX_API_DECL void sg_destroy_image(sg_image img); |
2747 | SOKOL_GFX_API_DECL void sg_destroy_shader(sg_shader shd); |
2748 | SOKOL_GFX_API_DECL void sg_destroy_pipeline(sg_pipeline pip); |
2749 | SOKOL_GFX_API_DECL void sg_destroy_pass(sg_pass pass); |
2750 | SOKOL_GFX_API_DECL void sg_update_buffer(sg_buffer buf, const sg_range* data); |
2751 | SOKOL_GFX_API_DECL void sg_update_image(sg_image img, const sg_image_data* data); |
2752 | SOKOL_GFX_API_DECL int sg_append_buffer(sg_buffer buf, const sg_range* data); |
2753 | SOKOL_GFX_API_DECL bool sg_query_buffer_overflow(sg_buffer buf); |
2754 | SOKOL_GFX_API_DECL bool sg_query_buffer_will_overflow(sg_buffer buf, size_t size); |
2755 | |
2756 | /* rendering functions */ |
2757 | SOKOL_GFX_API_DECL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height); |
2758 | SOKOL_GFX_API_DECL void sg_begin_default_passf(const sg_pass_action* pass_action, float width, float height); |
2759 | SOKOL_GFX_API_DECL void sg_begin_pass(sg_pass pass, const sg_pass_action* pass_action); |
2760 | SOKOL_GFX_API_DECL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left); |
2761 | SOKOL_GFX_API_DECL void sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left); |
2762 | SOKOL_GFX_API_DECL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left); |
2763 | SOKOL_GFX_API_DECL void sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left); |
2764 | SOKOL_GFX_API_DECL void sg_apply_pipeline(sg_pipeline pip); |
2765 | SOKOL_GFX_API_DECL void sg_apply_bindings(const sg_bindings* bindings); |
2766 | SOKOL_GFX_API_DECL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data); |
2767 | SOKOL_GFX_API_DECL void sg_draw(int base_element, int num_elements, int num_instances); |
2768 | SOKOL_GFX_API_DECL void sg_end_pass(void); |
2769 | SOKOL_GFX_API_DECL void sg_commit(void); |
2770 | |
2771 | /* getting information */ |
2772 | SOKOL_GFX_API_DECL sg_desc sg_query_desc(void); |
2773 | SOKOL_GFX_API_DECL sg_backend sg_query_backend(void); |
2774 | SOKOL_GFX_API_DECL sg_features sg_query_features(void); |
2775 | SOKOL_GFX_API_DECL sg_limits sg_query_limits(void); |
2776 | SOKOL_GFX_API_DECL sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt); |
2777 | /* get current state of a resource (INITIAL, ALLOC, VALID, FAILED, INVALID) */ |
2778 | SOKOL_GFX_API_DECL sg_resource_state sg_query_buffer_state(sg_buffer buf); |
2779 | SOKOL_GFX_API_DECL sg_resource_state sg_query_image_state(sg_image img); |
2780 | SOKOL_GFX_API_DECL sg_resource_state sg_query_shader_state(sg_shader shd); |
2781 | SOKOL_GFX_API_DECL sg_resource_state sg_query_pipeline_state(sg_pipeline pip); |
2782 | SOKOL_GFX_API_DECL sg_resource_state sg_query_pass_state(sg_pass pass); |
2783 | /* get runtime information about a resource */ |
2784 | SOKOL_GFX_API_DECL sg_buffer_info sg_query_buffer_info(sg_buffer buf); |
2785 | SOKOL_GFX_API_DECL sg_image_info sg_query_image_info(sg_image img); |
2786 | SOKOL_GFX_API_DECL sg_shader_info sg_query_shader_info(sg_shader shd); |
2787 | SOKOL_GFX_API_DECL sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip); |
2788 | SOKOL_GFX_API_DECL sg_pass_info sg_query_pass_info(sg_pass pass); |
2789 | /* get resource creation desc struct with their default values replaced */ |
2790 | SOKOL_GFX_API_DECL sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc); |
2791 | SOKOL_GFX_API_DECL sg_image_desc sg_query_image_defaults(const sg_image_desc* desc); |
2792 | SOKOL_GFX_API_DECL sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc); |
2793 | SOKOL_GFX_API_DECL sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc); |
2794 | SOKOL_GFX_API_DECL sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc); |
2795 | |
2796 | /* separate resource allocation and initialization (for async setup) */ |
2797 | SOKOL_GFX_API_DECL sg_buffer sg_alloc_buffer(void); |
2798 | SOKOL_GFX_API_DECL sg_image sg_alloc_image(void); |
2799 | SOKOL_GFX_API_DECL sg_shader sg_alloc_shader(void); |
2800 | SOKOL_GFX_API_DECL sg_pipeline sg_alloc_pipeline(void); |
2801 | SOKOL_GFX_API_DECL sg_pass sg_alloc_pass(void); |
2802 | SOKOL_GFX_API_DECL void sg_dealloc_buffer(sg_buffer buf); |
2803 | SOKOL_GFX_API_DECL void sg_dealloc_image(sg_image img); |
2804 | SOKOL_GFX_API_DECL void sg_dealloc_shader(sg_shader shd); |
2805 | SOKOL_GFX_API_DECL void sg_dealloc_pipeline(sg_pipeline pip); |
2806 | SOKOL_GFX_API_DECL void sg_dealloc_pass(sg_pass pass); |
2807 | SOKOL_GFX_API_DECL void sg_init_buffer(sg_buffer buf, const sg_buffer_desc* desc); |
2808 | SOKOL_GFX_API_DECL void sg_init_image(sg_image img, const sg_image_desc* desc); |
2809 | SOKOL_GFX_API_DECL void sg_init_shader(sg_shader shd, const sg_shader_desc* desc); |
2810 | SOKOL_GFX_API_DECL void sg_init_pipeline(sg_pipeline pip, const sg_pipeline_desc* desc); |
2811 | SOKOL_GFX_API_DECL void sg_init_pass(sg_pass pass, const sg_pass_desc* desc); |
2812 | SOKOL_GFX_API_DECL void sg_uninit_buffer(sg_buffer buf); |
2813 | SOKOL_GFX_API_DECL void sg_uninit_image(sg_image img); |
2814 | SOKOL_GFX_API_DECL void sg_uninit_shader(sg_shader shd); |
2815 | SOKOL_GFX_API_DECL void sg_uninit_pipeline(sg_pipeline pip); |
2816 | SOKOL_GFX_API_DECL void sg_uninit_pass(sg_pass pass); |
2817 | SOKOL_GFX_API_DECL void sg_fail_buffer(sg_buffer buf); |
2818 | SOKOL_GFX_API_DECL void sg_fail_image(sg_image img); |
2819 | SOKOL_GFX_API_DECL void sg_fail_shader(sg_shader shd); |
2820 | SOKOL_GFX_API_DECL void sg_fail_pipeline(sg_pipeline pip); |
2821 | SOKOL_GFX_API_DECL void sg_fail_pass(sg_pass pass); |
2822 | |
2823 | /* rendering contexts (optional) */ |
2824 | SOKOL_GFX_API_DECL sg_context sg_setup_context(void); |
2825 | SOKOL_GFX_API_DECL void sg_activate_context(sg_context ctx_id); |
2826 | SOKOL_GFX_API_DECL void sg_discard_context(sg_context ctx_id); |
2827 | |
2828 | /* Backend-specific helper functions, these may come in handy for mixing |
2829 | sokol-gfx rendering with 'native backend' rendering functions. |
2830 | |
2831 | This group of functions will be expanded as needed. |
2832 | */ |
2833 | |
2834 | /* D3D11: return ID3D11Device */ |
2835 | SOKOL_GFX_API_DECL const void* sg_d3d11_device(void); |
2836 | |
2837 | /* Metal: return __bridge-casted MTLDevice */ |
2838 | SOKOL_GFX_API_DECL const void* sg_mtl_device(void); |
2839 | |
2840 | /* Metal: return __bridge-casted MTLRenderCommandEncoder in current pass (or zero if outside pass) */ |
2841 | SOKOL_GFX_API_DECL const void* sg_mtl_render_command_encoder(void); |
2842 | |
2843 | #ifdef __cplusplus |
2844 | } /* extern "C" */ |
2845 | |
2846 | /* reference-based equivalents for c++ */ |
2847 | inline void sg_setup(const sg_desc& desc) { return sg_setup(&desc); } |
2848 | |
2849 | inline sg_buffer sg_make_buffer(const sg_buffer_desc& desc) { return sg_make_buffer(&desc); } |
2850 | inline sg_image sg_make_image(const sg_image_desc& desc) { return sg_make_image(&desc); } |
2851 | inline sg_shader sg_make_shader(const sg_shader_desc& desc) { return sg_make_shader(&desc); } |
2852 | inline sg_pipeline sg_make_pipeline(const sg_pipeline_desc& desc) { return sg_make_pipeline(&desc); } |
2853 | inline sg_pass sg_make_pass(const sg_pass_desc& desc) { return sg_make_pass(&desc); } |
2854 | inline void sg_update_image(sg_image img, const sg_image_data& data) { return sg_update_image(img, &data); } |
2855 | |
2856 | inline void sg_begin_default_pass(const sg_pass_action& pass_action, int width, int height) { return sg_begin_default_pass(&pass_action, width, height); } |
2857 | inline void sg_begin_default_passf(const sg_pass_action& pass_action, float width, float height) { return sg_begin_default_passf(&pass_action, width, height); } |
2858 | inline void sg_begin_pass(sg_pass pass, const sg_pass_action& pass_action) { return sg_begin_pass(pass, &pass_action); } |
2859 | inline void sg_apply_bindings(const sg_bindings& bindings) { return sg_apply_bindings(&bindings); } |
2860 | inline void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range& data) { return sg_apply_uniforms(stage, ub_index, &data); } |
2861 | |
2862 | inline sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc& desc) { return sg_query_buffer_defaults(&desc); } |
2863 | inline sg_image_desc sg_query_image_defaults(const sg_image_desc& desc) { return sg_query_image_defaults(&desc); } |
2864 | inline sg_shader_desc sg_query_shader_defaults(const sg_shader_desc& desc) { return sg_query_shader_defaults(&desc); } |
2865 | inline sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc& desc) { return sg_query_pipeline_defaults(&desc); } |
2866 | inline sg_pass_desc sg_query_pass_defaults(const sg_pass_desc& desc) { return sg_query_pass_defaults(&desc); } |
2867 | |
2868 | inline void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc& desc) { return sg_init_buffer(buf_id, &desc); } |
2869 | inline void sg_init_image(sg_image img_id, const sg_image_desc& desc) { return sg_init_image(img_id, &desc); } |
2870 | inline void sg_init_shader(sg_shader shd_id, const sg_shader_desc& desc) { return sg_init_shader(shd_id, &desc); } |
2871 | inline void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc& desc) { return sg_init_pipeline(pip_id, &desc); } |
2872 | inline void sg_init_pass(sg_pass pass_id, const sg_pass_desc& desc) { return sg_init_pass(pass_id, &desc); } |
2873 | |
2874 | inline void sg_update_buffer(sg_buffer buf_id, const sg_range& data) { return sg_update_buffer(buf_id, &data); } |
2875 | inline int sg_append_buffer(sg_buffer buf_id, const sg_range& data) { return sg_append_buffer(buf_id, &data); } |
2876 | #endif |
2877 | #endif // SOKOL_GFX_INCLUDED |
2878 | |
2879 | /*--- IMPLEMENTATION ---------------------------------------------------------*/ |
2880 | #ifdef SOKOL_GFX_IMPL |
2881 | #define SOKOL_GFX_IMPL_INCLUDED (1) |
2882 | |
2883 | #if !(defined(SOKOL_GLCORE33)||defined(SOKOL_GLES2)||defined(SOKOL_GLES3)||defined(SOKOL_D3D11)||defined(SOKOL_METAL)||defined(SOKOL_WGPU)||defined(SOKOL_DUMMY_BACKEND)) |
2884 | #error "Please select a backend with SOKOL_GLCORE33, SOKOL_GLES2, SOKOL_GLES3, SOKOL_D3D11, SOKOL_METAL, SOKOL_WGPU or SOKOL_DUMMY_BACKEND" |
2885 | #endif |
2886 | #if defined(SOKOL_MALLOC) || defined(SOKOL_CALLOC) || defined(SOKOL_FREE) |
2887 | #error "SOKOL_MALLOC/CALLOC/FREE macros are no longer supported, please use sg_desc.allocator to override memory allocation functions" |
2888 | #endif |
2889 | |
2890 | #include <stdlib.h> // malloc, free |
2891 | #include <string.h> // memset |
2892 | #include <float.h> // FLT_MAX |
2893 | |
2894 | #ifndef SOKOL_API_IMPL |
2895 | #define SOKOL_API_IMPL |
2896 | #endif |
2897 | #ifndef SOKOL_DEBUG |
2898 | #ifndef NDEBUG |
2899 | #define SOKOL_DEBUG |
2900 | #endif |
2901 | #endif |
2902 | #ifndef SOKOL_ASSERT |
2903 | #include <assert.h> |
2904 | #define SOKOL_ASSERT(c) assert(c) |
2905 | #endif |
2906 | #ifndef SOKOL_VALIDATE_BEGIN |
2907 | #define SOKOL_VALIDATE_BEGIN() _sg_validate_begin() |
2908 | #endif |
2909 | #ifndef SOKOL_VALIDATE |
2910 | #define SOKOL_VALIDATE(cond, err) _sg_validate((cond), err) |
2911 | #endif |
2912 | #ifndef SOKOL_VALIDATE_END |
2913 | #define SOKOL_VALIDATE_END() _sg_validate_end() |
2914 | #endif |
2915 | #ifndef SOKOL_UNREACHABLE |
2916 | #define SOKOL_UNREACHABLE SOKOL_ASSERT(false) |
2917 | #endif |
2918 | |
2919 | #if !defined(SOKOL_DEBUG) |
2920 | #define SG_LOG(s) |
2921 | #else |
2922 | #define SG_LOG(s) _sg_log(s) |
2923 | #ifndef SOKOL_LOG |
2924 | #if defined(__ANDROID__) |
2925 | #include <android/log.h> |
2926 | #define SOKOL_LOG(s) __android_log_write(ANDROID_LOG_INFO, "SOKOL_GFX", s) |
2927 | #else |
2928 | #include <stdio.h> |
2929 | #define SOKOL_LOG(s) puts(s) |
2930 | #endif |
2931 | #endif |
2932 | #endif |
2933 | |
2934 | #ifndef _SOKOL_PRIVATE |
2935 | #if defined(__GNUC__) || defined(__clang__) |
2936 | #define _SOKOL_PRIVATE __attribute__((unused)) static |
2937 | #else |
2938 | #define _SOKOL_PRIVATE static |
2939 | #endif |
2940 | #endif |
2941 | |
2942 | #ifndef _SOKOL_UNUSED |
2943 | #define _SOKOL_UNUSED(x) (void)(x) |
2944 | #endif |
2945 | |
2946 | #if defined(SOKOL_TRACE_HOOKS) |
2947 | #define _SG_TRACE_ARGS(fn, ...) if (_sg.hooks.fn) { _sg.hooks.fn(__VA_ARGS__, _sg.hooks.user_data); } |
2948 | #define _SG_TRACE_NOARGS(fn) if (_sg.hooks.fn) { _sg.hooks.fn(_sg.hooks.user_data); } |
2949 | #else |
2950 | #define _SG_TRACE_ARGS(fn, ...) |
2951 | #define _SG_TRACE_NOARGS(fn) |
2952 | #endif |
2953 | |
2954 | /* default clear values */ |
2955 | #ifndef SG_DEFAULT_CLEAR_RED |
2956 | #define SG_DEFAULT_CLEAR_RED (0.5f) |
2957 | #endif |
2958 | #ifndef SG_DEFAULT_CLEAR_GREEN |
2959 | #define SG_DEFAULT_CLEAR_GREEN (0.5f) |
2960 | #endif |
2961 | #ifndef SG_DEFAULT_CLEAR_BLUE |
2962 | #define SG_DEFAULT_CLEAR_BLUE (0.5f) |
2963 | #endif |
2964 | #ifndef SG_DEFAULT_CLEAR_ALPHA |
2965 | #define SG_DEFAULT_CLEAR_ALPHA (1.0f) |
2966 | #endif |
2967 | #ifndef SG_DEFAULT_CLEAR_DEPTH |
2968 | #define SG_DEFAULT_CLEAR_DEPTH (1.0f) |
2969 | #endif |
2970 | #ifndef SG_DEFAULT_CLEAR_STENCIL |
2971 | #define SG_DEFAULT_CLEAR_STENCIL (0) |
2972 | #endif |
2973 | |
2974 | #ifdef _MSC_VER |
2975 | #pragma warning(push) |
2976 | #pragma warning(disable:4115) /* named type definition in parentheses */ |
2977 | #pragma warning(disable:4505) /* unreferenced local function has been removed */ |
2978 | #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union (needed by d3d11.h) */ |
2979 | #pragma warning(disable:4054) /* 'type cast': from function pointer */ |
2980 | #pragma warning(disable:4055) /* 'type cast': from data pointer */ |
2981 | #endif |
2982 | |
2983 | #if defined(SOKOL_D3D11) |
2984 | #ifndef D3D11_NO_HELPERS |
2985 | #define D3D11_NO_HELPERS |
2986 | #endif |
2987 | #ifndef WIN32_LEAN_AND_MEAN |
2988 | #define WIN32_LEAN_AND_MEAN |
2989 | #endif |
2990 | #ifndef NOMINMAX |
2991 | #define NOMINMAX |
2992 | #endif |
2993 | #include <d3d11.h> |
2994 | #include <d3dcompiler.h> |
2995 | #ifdef _MSC_VER |
2996 | #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) |
2997 | #pragma comment (lib, "WindowsApp") |
2998 | #else |
2999 | #pragma comment (lib, "kernel32") |
3000 | #pragma comment (lib, "user32") |
3001 | #pragma comment (lib, "dxgi") |
3002 | #pragma comment (lib, "d3d11") |
3003 | #endif |
3004 | #endif |
3005 | #elif defined(SOKOL_METAL) |
3006 | // see https://clang.llvm.org/docs/LanguageExtensions.html#automatic-reference-counting |
3007 | #if !defined(__cplusplus) |
3008 | #if __has_feature(objc_arc) && !__has_feature(objc_arc_fields) |
3009 | #error "sokol_gfx.h requires __has_feature(objc_arc_field) if ARC is enabled (use a more recent compiler version)" |
3010 | #endif |
3011 | #endif |
3012 | #include <TargetConditionals.h> |
3013 | #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE |
3014 | #define _SG_TARGET_MACOS (1) |
3015 | #else |
3016 | #define _SG_TARGET_IOS (1) |
3017 | #if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR |
3018 | #define _SG_TARGET_IOS_SIMULATOR (1) |
3019 | #endif |
3020 | #endif |
3021 | #import <Metal/Metal.h> |
3022 | #elif defined(SOKOL_WGPU) |
3023 | #if defined(__EMSCRIPTEN__) |
3024 | #include <webgpu/webgpu.h> |
3025 | #else |
3026 | #include <dawn/webgpu.h> |
3027 | #endif |
3028 | #elif defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
3029 | #define _SOKOL_ANY_GL (1) |
3030 | |
3031 | // include platform specific GL headers (or on Win32: use an embedded GL loader) |
3032 | #if !defined(SOKOL_EXTERNAL_GL_LOADER) |
3033 | #if defined(_WIN32) |
3034 | #if defined(SOKOL_GLCORE33) && !defined(SOKOL_EXTERNAL_GL_LOADER) |
3035 | #ifndef WIN32_LEAN_AND_MEAN |
3036 | #define WIN32_LEAN_AND_MEAN |
3037 | #endif |
3038 | #ifndef NOMINMAX |
3039 | #define NOMINMAX |
3040 | #endif |
3041 | #include <windows.h> |
3042 | #define _SOKOL_USE_WIN32_GL_LOADER (1) |
3043 | #pragma comment (lib, "kernel32") // GetProcAddress() |
3044 | #endif |
3045 | #elif defined(__APPLE__) |
3046 | #include <TargetConditionals.h> |
3047 | #ifndef GL_SILENCE_DEPRECATION |
3048 | #define GL_SILENCE_DEPRECATION |
3049 | #endif |
3050 | #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE |
3051 | #include <OpenGL/gl3.h> |
3052 | #else |
3053 | #include <OpenGLES/ES3/gl.h> |
3054 | #include <OpenGLES/ES3/glext.h> |
3055 | #endif |
3056 | #elif defined(__EMSCRIPTEN__) || defined(__ANDROID__) |
3057 | #if defined(SOKOL_GLES3) |
3058 | #include <GLES3/gl3.h> |
3059 | #elif defined(SOKOL_GLES2) |
3060 | #ifndef GL_EXT_PROTOTYPES |
3061 | #define GL_GLEXT_PROTOTYPES |
3062 | #endif |
3063 | #include <GLES2/gl2.h> |
3064 | #include <GLES2/gl2ext.h> |
3065 | #endif |
3066 | #elif defined(__linux__) || defined(__unix__) |
3067 | #define GL_GLEXT_PROTOTYPES |
3068 | #include <GL/gl.h> |
3069 | #endif |
3070 | #endif |
3071 | |
3072 | // optional GL loader definitions (only on Win32) |
3073 | #if defined(_SOKOL_USE_WIN32_GL_LOADER) |
3074 | #define __gl_h_ 1 |
3075 | #define __gl32_h_ 1 |
3076 | #define __gl31_h_ 1 |
3077 | #define __GL_H__ 1 |
3078 | #define __glext_h_ 1 |
3079 | #define __GLEXT_H_ 1 |
3080 | #define __gltypes_h_ 1 |
3081 | #define __glcorearb_h_ 1 |
3082 | #define __gl_glcorearb_h_ 1 |
3083 | #define GL_APIENTRY APIENTRY |
3084 | |
3085 | typedef unsigned int GLenum; |
3086 | typedef unsigned int GLuint; |
3087 | typedef int GLsizei; |
3088 | typedef char GLchar; |
3089 | typedef ptrdiff_t GLintptr; |
3090 | typedef ptrdiff_t GLsizeiptr; |
3091 | typedef double GLclampd; |
3092 | typedef unsigned short GLushort; |
3093 | typedef unsigned char GLubyte; |
3094 | typedef unsigned char GLboolean; |
3095 | typedef uint64_t GLuint64; |
3096 | typedef double GLdouble; |
3097 | typedef unsigned short GLhalf; |
3098 | typedef float GLclampf; |
3099 | typedef unsigned int GLbitfield; |
3100 | typedef signed char GLbyte; |
3101 | typedef short GLshort; |
3102 | typedef void GLvoid; |
3103 | typedef int64_t GLint64; |
3104 | typedef float GLfloat; |
3105 | typedef struct __GLsync * GLsync; |
3106 | typedef int GLint; |
3107 | #define GL_INT_2_10_10_10_REV 0x8D9F |
3108 | #define GL_R32F 0x822E |
3109 | #define GL_PROGRAM_POINT_SIZE 0x8642 |
3110 | #define GL_STENCIL_ATTACHMENT 0x8D20 |
3111 | #define GL_DEPTH_ATTACHMENT 0x8D00 |
3112 | #define GL_COLOR_ATTACHMENT2 0x8CE2 |
3113 | #define GL_COLOR_ATTACHMENT0 0x8CE0 |
3114 | #define GL_R16F 0x822D |
3115 | #define GL_COLOR_ATTACHMENT22 0x8CF6 |
3116 | #define GL_DRAW_FRAMEBUFFER 0x8CA9 |
3117 | #define GL_FRAMEBUFFER_COMPLETE 0x8CD5 |
3118 | #define GL_NUM_EXTENSIONS 0x821D |
3119 | #define GL_INFO_LOG_LENGTH 0x8B84 |
3120 | #define GL_VERTEX_SHADER 0x8B31 |
3121 | #define GL_INCR 0x1E02 |
3122 | #define GL_DYNAMIC_DRAW 0x88E8 |
3123 | #define GL_STATIC_DRAW 0x88E4 |
3124 | #define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 |
3125 | #define GL_TEXTURE_CUBE_MAP 0x8513 |
3126 | #define GL_FUNC_SUBTRACT 0x800A |
3127 | #define GL_FUNC_REVERSE_SUBTRACT 0x800B |
3128 | #define GL_CONSTANT_COLOR 0x8001 |
3129 | #define GL_DECR_WRAP 0x8508 |
3130 | #define GL_R8 0x8229 |
3131 | #define GL_LINEAR_MIPMAP_LINEAR 0x2703 |
3132 | #define GL_ELEMENT_ARRAY_BUFFER 0x8893 |
3133 | #define GL_SHORT 0x1402 |
3134 | #define GL_DEPTH_TEST 0x0B71 |
3135 | #define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 |
3136 | #define GL_LINK_STATUS 0x8B82 |
3137 | #define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 |
3138 | #define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E |
3139 | #define GL_RGBA16F 0x881A |
3140 | #define GL_CONSTANT_ALPHA 0x8003 |
3141 | #define GL_READ_FRAMEBUFFER 0x8CA8 |
3142 | #define GL_TEXTURE0 0x84C0 |
3143 | #define GL_TEXTURE_MIN_LOD 0x813A |
3144 | #define GL_CLAMP_TO_EDGE 0x812F |
3145 | #define GL_UNSIGNED_SHORT_5_6_5 0x8363 |
3146 | #define GL_TEXTURE_WRAP_R 0x8072 |
3147 | #define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 |
3148 | #define GL_NEAREST_MIPMAP_NEAREST 0x2700 |
3149 | #define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 |
3150 | #define GL_SRC_ALPHA_SATURATE 0x0308 |
3151 | #define GL_STREAM_DRAW 0x88E0 |
3152 | #define GL_ONE 1 |
3153 | #define GL_NEAREST_MIPMAP_LINEAR 0x2702 |
3154 | #define GL_RGB10_A2 0x8059 |
3155 | #define GL_RGBA8 0x8058 |
3156 | #define GL_COLOR_ATTACHMENT1 0x8CE1 |
3157 | #define GL_RGBA4 0x8056 |
3158 | #define GL_RGB8 0x8051 |
3159 | #define GL_ARRAY_BUFFER 0x8892 |
3160 | #define GL_STENCIL 0x1802 |
3161 | #define GL_TEXTURE_2D 0x0DE1 |
3162 | #define GL_DEPTH 0x1801 |
3163 | #define GL_FRONT 0x0404 |
3164 | #define GL_STENCIL_BUFFER_BIT 0x00000400 |
3165 | #define GL_REPEAT 0x2901 |
3166 | #define GL_RGBA 0x1908 |
3167 | #define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 |
3168 | #define GL_DECR 0x1E03 |
3169 | #define GL_FRAGMENT_SHADER 0x8B30 |
3170 | #define GL_FLOAT 0x1406 |
3171 | #define GL_TEXTURE_MAX_LOD 0x813B |
3172 | #define GL_DEPTH_COMPONENT 0x1902 |
3173 | #define GL_ONE_MINUS_DST_ALPHA 0x0305 |
3174 | #define GL_COLOR 0x1800 |
3175 | #define GL_TEXTURE_2D_ARRAY 0x8C1A |
3176 | #define GL_TRIANGLES 0x0004 |
3177 | #define GL_UNSIGNED_BYTE 0x1401 |
3178 | #define GL_TEXTURE_MAG_FILTER 0x2800 |
3179 | #define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 |
3180 | #define GL_NONE 0 |
3181 | #define GL_SRC_COLOR 0x0300 |
3182 | #define GL_BYTE 0x1400 |
3183 | #define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A |
3184 | #define GL_LINE_STRIP 0x0003 |
3185 | #define GL_TEXTURE_3D 0x806F |
3186 | #define GL_CW 0x0900 |
3187 | #define GL_LINEAR 0x2601 |
3188 | #define GL_RENDERBUFFER 0x8D41 |
3189 | #define GL_GEQUAL 0x0206 |
3190 | #define GL_COLOR_BUFFER_BIT 0x00004000 |
3191 | #define GL_RGBA32F 0x8814 |
3192 | #define GL_BLEND 0x0BE2 |
3193 | #define GL_ONE_MINUS_SRC_ALPHA 0x0303 |
3194 | #define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 |
3195 | #define GL_TEXTURE_WRAP_T 0x2803 |
3196 | #define GL_TEXTURE_WRAP_S 0x2802 |
3197 | #define GL_TEXTURE_MIN_FILTER 0x2801 |
3198 | #define GL_LINEAR_MIPMAP_NEAREST 0x2701 |
3199 | #define GL_EXTENSIONS 0x1F03 |
3200 | #define GL_NO_ERROR 0 |
3201 | #define GL_REPLACE 0x1E01 |
3202 | #define GL_KEEP 0x1E00 |
3203 | #define GL_CCW 0x0901 |
3204 | #define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 |
3205 | #define GL_RGB 0x1907 |
3206 | #define GL_TRIANGLE_STRIP 0x0005 |
3207 | #define GL_FALSE 0 |
3208 | #define GL_ZERO 0 |
3209 | #define GL_CULL_FACE 0x0B44 |
3210 | #define GL_INVERT 0x150A |
3211 | #define GL_INT 0x1404 |
3212 | #define GL_UNSIGNED_INT 0x1405 |
3213 | #define GL_UNSIGNED_SHORT 0x1403 |
3214 | #define GL_NEAREST 0x2600 |
3215 | #define GL_SCISSOR_TEST 0x0C11 |
3216 | #define GL_LEQUAL 0x0203 |
3217 | #define GL_STENCIL_TEST 0x0B90 |
3218 | #define GL_DITHER 0x0BD0 |
3219 | #define GL_DEPTH_COMPONENT16 0x81A5 |
3220 | #define GL_EQUAL 0x0202 |
3221 | #define GL_FRAMEBUFFER 0x8D40 |
3222 | #define GL_RGB5 0x8050 |
3223 | #define GL_LINES 0x0001 |
3224 | #define GL_DEPTH_BUFFER_BIT 0x00000100 |
3225 | #define GL_SRC_ALPHA 0x0302 |
3226 | #define GL_INCR_WRAP 0x8507 |
3227 | #define GL_LESS 0x0201 |
3228 | #define GL_MULTISAMPLE 0x809D |
3229 | #define GL_FRAMEBUFFER_BINDING 0x8CA6 |
3230 | #define GL_BACK 0x0405 |
3231 | #define GL_ALWAYS 0x0207 |
3232 | #define GL_FUNC_ADD 0x8006 |
3233 | #define GL_ONE_MINUS_DST_COLOR 0x0307 |
3234 | #define GL_NOTEQUAL 0x0205 |
3235 | #define GL_DST_COLOR 0x0306 |
3236 | #define GL_COMPILE_STATUS 0x8B81 |
3237 | #define GL_RED 0x1903 |
3238 | #define GL_COLOR_ATTACHMENT3 0x8CE3 |
3239 | #define GL_DST_ALPHA 0x0304 |
3240 | #define GL_RGB5_A1 0x8057 |
3241 | #define GL_GREATER 0x0204 |
3242 | #define GL_POLYGON_OFFSET_FILL 0x8037 |
3243 | #define GL_TRUE 1 |
3244 | #define GL_NEVER 0x0200 |
3245 | #define GL_POINTS 0x0000 |
3246 | #define GL_ONE_MINUS_SRC_COLOR 0x0301 |
3247 | #define GL_MIRRORED_REPEAT 0x8370 |
3248 | #define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D |
3249 | #define GL_R11F_G11F_B10F 0x8C3A |
3250 | #define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B |
3251 | #define GL_RGB9_E5 0x8C3D |
3252 | #define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E |
3253 | #define GL_RGBA32UI 0x8D70 |
3254 | #define GL_RGB32UI 0x8D71 |
3255 | #define GL_RGBA16UI 0x8D76 |
3256 | #define GL_RGB16UI 0x8D77 |
3257 | #define GL_RGBA8UI 0x8D7C |
3258 | #define GL_RGB8UI 0x8D7D |
3259 | #define GL_RGBA32I 0x8D82 |
3260 | #define GL_RGB32I 0x8D83 |
3261 | #define GL_RGBA16I 0x8D88 |
3262 | #define GL_RGB16I 0x8D89 |
3263 | #define GL_RGBA8I 0x8D8E |
3264 | #define GL_RGB8I 0x8D8F |
3265 | #define GL_RED_INTEGER 0x8D94 |
3266 | #define GL_RG 0x8227 |
3267 | #define GL_RG_INTEGER 0x8228 |
3268 | #define GL_R8 0x8229 |
3269 | #define GL_R16 0x822A |
3270 | #define GL_RG8 0x822B |
3271 | #define GL_RG16 0x822C |
3272 | #define GL_R16F 0x822D |
3273 | #define GL_R32F 0x822E |
3274 | #define GL_RG16F 0x822F |
3275 | #define GL_RG32F 0x8230 |
3276 | #define GL_R8I 0x8231 |
3277 | #define GL_R8UI 0x8232 |
3278 | #define GL_R16I 0x8233 |
3279 | #define GL_R16UI 0x8234 |
3280 | #define GL_R32I 0x8235 |
3281 | #define GL_R32UI 0x8236 |
3282 | #define GL_RG8I 0x8237 |
3283 | #define GL_RG8UI 0x8238 |
3284 | #define GL_RG16I 0x8239 |
3285 | #define GL_RG16UI 0x823A |
3286 | #define GL_RG32I 0x823B |
3287 | #define GL_RG32UI 0x823C |
3288 | #define GL_RGBA_INTEGER 0x8D99 |
3289 | #define GL_R8_SNORM 0x8F94 |
3290 | #define GL_RG8_SNORM 0x8F95 |
3291 | #define GL_RGB8_SNORM 0x8F96 |
3292 | #define GL_RGBA8_SNORM 0x8F97 |
3293 | #define GL_R16_SNORM 0x8F98 |
3294 | #define GL_RG16_SNORM 0x8F99 |
3295 | #define GL_RGB16_SNORM 0x8F9A |
3296 | #define GL_RGBA16_SNORM 0x8F9B |
3297 | #define GL_RGBA16 0x805B |
3298 | #define GL_MAX_TEXTURE_SIZE 0x0D33 |
3299 | #define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C |
3300 | #define GL_MAX_3D_TEXTURE_SIZE 0x8073 |
3301 | #define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF |
3302 | #define GL_MAX_VERTEX_ATTRIBS 0x8869 |
3303 | #define GL_CLAMP_TO_BORDER 0x812D |
3304 | #define GL_TEXTURE_BORDER_COLOR 0x1004 |
3305 | #define GL_CURRENT_PROGRAM 0x8B8D |
3306 | #define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB |
3307 | #endif |
3308 | |
3309 | #ifndef GL_UNSIGNED_INT_2_10_10_10_REV |
3310 | #define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 |
3311 | #endif |
3312 | #ifndef GL_UNSIGNED_INT_24_8 |
3313 | #define GL_UNSIGNED_INT_24_8 0x84FA |
3314 | #endif |
3315 | #ifndef GL_TEXTURE_MAX_ANISOTROPY_EXT |
3316 | #define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE |
3317 | #endif |
3318 | #ifndef GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT |
3319 | #define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF |
3320 | #endif |
3321 | #ifndef GL_COMPRESSED_RGBA_S3TC_DXT1_EXT |
3322 | #define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 |
3323 | #endif |
3324 | #ifndef GL_COMPRESSED_RGBA_S3TC_DXT3_EXT |
3325 | #define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2 |
3326 | #endif |
3327 | #ifndef GL_COMPRESSED_RGBA_S3TC_DXT5_EXT |
3328 | #define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3 |
3329 | #endif |
3330 | #ifndef GL_COMPRESSED_RED_RGTC1 |
3331 | #define GL_COMPRESSED_RED_RGTC1 0x8DBB |
3332 | #endif |
3333 | #ifndef GL_COMPRESSED_SIGNED_RED_RGTC1 |
3334 | #define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC |
3335 | #endif |
3336 | #ifndef GL_COMPRESSED_RED_GREEN_RGTC2 |
3337 | #define GL_COMPRESSED_RED_GREEN_RGTC2 0x8DBD |
3338 | #endif |
3339 | #ifndef GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 |
3340 | #define GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 0x8DBE |
3341 | #endif |
3342 | #ifndef GL_COMPRESSED_RGBA_BPTC_UNORM_ARB |
3343 | #define GL_COMPRESSED_RGBA_BPTC_UNORM_ARB 0x8E8C |
3344 | #endif |
3345 | #ifndef GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB |
3346 | #define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB 0x8E8D |
3347 | #endif |
3348 | #ifndef GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB |
3349 | #define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB 0x8E8E |
3350 | #endif |
3351 | #ifndef GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB |
3352 | #define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB 0x8E8F |
3353 | #endif |
3354 | #ifndef GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG |
3355 | #define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 |
3356 | #endif |
3357 | #ifndef GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG |
3358 | #define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 |
3359 | #endif |
3360 | #ifndef GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG |
3361 | #define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 |
3362 | #endif |
3363 | #ifndef GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG |
3364 | #define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 |
3365 | #endif |
3366 | #ifndef GL_COMPRESSED_RGB8_ETC2 |
3367 | #define GL_COMPRESSED_RGB8_ETC2 0x9274 |
3368 | #endif |
3369 | #ifndef GL_COMPRESSED_RGBA8_ETC2_EAC |
3370 | #define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 |
3371 | #endif |
3372 | #ifndef GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 |
3373 | #define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 |
3374 | #endif |
3375 | #ifndef GL_COMPRESSED_RG11_EAC |
3376 | #define GL_COMPRESSED_RG11_EAC 0x9272 |
3377 | #endif |
3378 | #ifndef GL_COMPRESSED_SIGNED_RG11_EAC |
3379 | #define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 |
3380 | #endif |
3381 | #ifndef GL_DEPTH24_STENCIL8 |
3382 | #define GL_DEPTH24_STENCIL8 0x88F0 |
3383 | #endif |
3384 | #ifndef GL_HALF_FLOAT |
3385 | #define GL_HALF_FLOAT 0x140B |
3386 | #endif |
3387 | #ifndef GL_DEPTH_STENCIL |
3388 | #define GL_DEPTH_STENCIL 0x84F9 |
3389 | #endif |
3390 | #ifndef GL_LUMINANCE |
3391 | #define GL_LUMINANCE 0x1909 |
3392 | #endif |
3393 | |
3394 | #ifdef SOKOL_GLES2 |
3395 | #ifdef GL_ANGLE_instanced_arrays |
3396 | #define _SOKOL_GL_INSTANCING_ENABLED |
3397 | #define glDrawArraysInstanced(mode, first, count, instancecount) glDrawArraysInstancedANGLE(mode, first, count, instancecount) |
3398 | #define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedANGLE(mode, count, type, indices, instancecount) |
3399 | #define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorANGLE(index, divisor) |
3400 | #elif defined(GL_EXT_draw_instanced) && defined(GL_EXT_instanced_arrays) |
3401 | #define _SOKOL_GL_INSTANCING_ENABLED |
3402 | #define glDrawArraysInstanced(mode, first, count, instancecount) glDrawArraysInstancedEXT(mode, first, count, instancecount) |
3403 | #define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedEXT(mode, count, type, indices, instancecount) |
3404 | #define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorEXT(index, divisor) |
3405 | #else |
3406 | #define _SOKOL_GLES2_INSTANCING_ERROR "Select GL_ANGLE_instanced_arrays or (GL_EXT_draw_instanced & GL_EXT_instanced_arrays) to enable instancing in GLES2" |
3407 | #define glDrawArraysInstanced(mode, first, count, instancecount) SOKOL_ASSERT(0 && _SOKOL_GLES2_INSTANCING_ERROR) |
3408 | #define glDrawElementsInstanced(mode, count, type, indices, instancecount) SOKOL_ASSERT(0 && _SOKOL_GLES2_INSTANCING_ERROR) |
3409 | #define glVertexAttribDivisor(index, divisor) SOKOL_ASSERT(0 && _SOKOL_GLES2_INSTANCING_ERROR) |
3410 | #endif |
3411 | #else |
3412 | #define _SOKOL_GL_INSTANCING_ENABLED |
3413 | #endif |
3414 | #define _SG_GL_CHECK_ERROR() { SOKOL_ASSERT(glGetError() == GL_NO_ERROR); } |
3415 | #endif |
3416 | |
3417 | /*=== COMMON BACKEND STUFF ===================================================*/ |
3418 | |
3419 | /* resource pool slots */ |
3420 | typedef struct { |
3421 | uint32_t id; |
3422 | uint32_t ctx_id; |
3423 | sg_resource_state state; |
3424 | } _sg_slot_t; |
3425 | |
3426 | /* constants */ |
3427 | enum { |
3428 | _SG_STRING_SIZE = 16, |
3429 | _SG_SLOT_SHIFT = 16, |
3430 | _SG_SLOT_MASK = (1<<_SG_SLOT_SHIFT)-1, |
3431 | _SG_MAX_POOL_SIZE = (1<<_SG_SLOT_SHIFT), |
3432 | _SG_DEFAULT_BUFFER_POOL_SIZE = 128, |
3433 | _SG_DEFAULT_IMAGE_POOL_SIZE = 128, |
3434 | _SG_DEFAULT_SHADER_POOL_SIZE = 32, |
3435 | _SG_DEFAULT_PIPELINE_POOL_SIZE = 64, |
3436 | _SG_DEFAULT_PASS_POOL_SIZE = 16, |
3437 | _SG_DEFAULT_CONTEXT_POOL_SIZE = 16, |
3438 | _SG_DEFAULT_SAMPLER_CACHE_CAPACITY = 64, |
3439 | _SG_DEFAULT_UB_SIZE = 4 * 1024 * 1024, |
3440 | _SG_DEFAULT_STAGING_SIZE = 8 * 1024 * 1024, |
3441 | _SG_DEFAULT_MAX_COMMIT_LISTENERS = 1024, |
3442 | }; |
3443 | |
3444 | /* fixed-size string */ |
3445 | typedef struct { |
3446 | char buf[_SG_STRING_SIZE]; |
3447 | } _sg_str_t; |
3448 | |
3449 | /* helper macros */ |
3450 | #define _sg_def(val, def) (((val) == 0) ? (def) : (val)) |
3451 | #define _sg_def_flt(val, def) (((val) == 0.0f) ? (def) : (val)) |
3452 | #define _sg_min(a,b) (((a)<(b))?(a):(b)) |
3453 | #define _sg_max(a,b) (((a)>(b))?(a):(b)) |
3454 | #define _sg_clamp(v,v0,v1) (((v)<(v0))?(v0):(((v)>(v1))?(v1):(v))) |
3455 | #define _sg_fequal(val,cmp,delta) ((((val)-(cmp))> -(delta))&&(((val)-(cmp))<(delta))) |
3456 | |
3457 | _SOKOL_PRIVATE void* _sg_malloc_clear(size_t size); |
3458 | _SOKOL_PRIVATE void _sg_free(void* ptr); |
3459 | _SOKOL_PRIVATE void _sg_clear(void* ptr, size_t size); |
3460 | |
3461 | typedef struct { |
3462 | int size; |
3463 | int append_pos; |
3464 | bool append_overflow; |
3465 | sg_buffer_type type; |
3466 | sg_usage usage; |
3467 | uint32_t update_frame_index; |
3468 | uint32_t append_frame_index; |
3469 | int num_slots; |
3470 | int active_slot; |
3471 | } _sg_buffer_common_t; |
3472 | |
3473 | _SOKOL_PRIVATE void _sg_buffer_common_init(_sg_buffer_common_t* cmn, const sg_buffer_desc* desc) { |
3474 | cmn->size = (int)desc->size; |
3475 | cmn->append_pos = 0; |
3476 | cmn->append_overflow = false; |
3477 | cmn->type = desc->type; |
3478 | cmn->usage = desc->usage; |
3479 | cmn->update_frame_index = 0; |
3480 | cmn->append_frame_index = 0; |
3481 | cmn->num_slots = (cmn->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; |
3482 | cmn->active_slot = 0; |
3483 | } |
3484 | |
3485 | typedef struct { |
3486 | sg_image_type type; |
3487 | bool render_target; |
3488 | int width; |
3489 | int height; |
3490 | int num_slices; |
3491 | int num_mipmaps; |
3492 | sg_usage usage; |
3493 | sg_pixel_format pixel_format; |
3494 | int sample_count; |
3495 | sg_filter min_filter; |
3496 | sg_filter mag_filter; |
3497 | sg_wrap wrap_u; |
3498 | sg_wrap wrap_v; |
3499 | sg_wrap wrap_w; |
3500 | sg_border_color border_color; |
3501 | uint32_t max_anisotropy; |
3502 | uint32_t upd_frame_index; |
3503 | int num_slots; |
3504 | int active_slot; |
3505 | } _sg_image_common_t; |
3506 | |
3507 | _SOKOL_PRIVATE void _sg_image_common_init(_sg_image_common_t* cmn, const sg_image_desc* desc) { |
3508 | cmn->type = desc->type; |
3509 | cmn->render_target = desc->render_target; |
3510 | cmn->width = desc->width; |
3511 | cmn->height = desc->height; |
3512 | cmn->num_slices = desc->num_slices; |
3513 | cmn->num_mipmaps = desc->num_mipmaps; |
3514 | cmn->usage = desc->usage; |
3515 | cmn->pixel_format = desc->pixel_format; |
3516 | cmn->sample_count = desc->sample_count; |
3517 | cmn->min_filter = desc->min_filter; |
3518 | cmn->mag_filter = desc->mag_filter; |
3519 | cmn->wrap_u = desc->wrap_u; |
3520 | cmn->wrap_v = desc->wrap_v; |
3521 | cmn->wrap_w = desc->wrap_w; |
3522 | cmn->border_color = desc->border_color; |
3523 | cmn->max_anisotropy = desc->max_anisotropy; |
3524 | cmn->upd_frame_index = 0; |
3525 | cmn->num_slots = (cmn->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; |
3526 | cmn->active_slot = 0; |
3527 | } |
3528 | |
3529 | typedef struct { |
3530 | size_t size; |
3531 | } _sg_uniform_block_t; |
3532 | |
3533 | typedef struct { |
3534 | sg_image_type image_type; |
3535 | sg_sampler_type sampler_type; |
3536 | } _sg_shader_image_t; |
3537 | |
3538 | typedef struct { |
3539 | int num_uniform_blocks; |
3540 | int num_images; |
3541 | _sg_uniform_block_t uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
3542 | _sg_shader_image_t images[SG_MAX_SHADERSTAGE_IMAGES]; |
3543 | } _sg_shader_stage_t; |
3544 | |
3545 | typedef struct { |
3546 | _sg_shader_stage_t stage[SG_NUM_SHADER_STAGES]; |
3547 | } _sg_shader_common_t; |
3548 | |
3549 | _SOKOL_PRIVATE void _sg_shader_common_init(_sg_shader_common_t* cmn, const sg_shader_desc* desc) { |
3550 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
3551 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; |
3552 | _sg_shader_stage_t* stage = &cmn->stage[stage_index]; |
3553 | SOKOL_ASSERT(stage->num_uniform_blocks == 0); |
3554 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
3555 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
3556 | if (0 == ub_desc->size) { |
3557 | break; |
3558 | } |
3559 | stage->uniform_blocks[ub_index].size = ub_desc->size; |
3560 | stage->num_uniform_blocks++; |
3561 | } |
3562 | SOKOL_ASSERT(stage->num_images == 0); |
3563 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
3564 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
3565 | if (img_desc->image_type == _SG_IMAGETYPE_DEFAULT) { |
3566 | break; |
3567 | } |
3568 | stage->images[img_index].image_type = img_desc->image_type; |
3569 | stage->images[img_index].sampler_type = img_desc->sampler_type; |
3570 | stage->num_images++; |
3571 | } |
3572 | } |
3573 | } |
3574 | |
3575 | typedef struct { |
3576 | sg_shader shader_id; |
3577 | sg_index_type index_type; |
3578 | bool use_instanced_draw; |
3579 | bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS]; |
3580 | int color_attachment_count; |
3581 | sg_pixel_format color_formats[SG_MAX_COLOR_ATTACHMENTS]; |
3582 | sg_pixel_format depth_format; |
3583 | int sample_count; |
3584 | float depth_bias; |
3585 | float depth_bias_slope_scale; |
3586 | float depth_bias_clamp; |
3587 | sg_color blend_color; |
3588 | } _sg_pipeline_common_t; |
3589 | |
3590 | _SOKOL_PRIVATE void _sg_pipeline_common_init(_sg_pipeline_common_t* cmn, const sg_pipeline_desc* desc) { |
3591 | SOKOL_ASSERT((desc->color_count >= 1) && (desc->color_count <= SG_MAX_COLOR_ATTACHMENTS)); |
3592 | cmn->shader_id = desc->shader; |
3593 | cmn->index_type = desc->index_type; |
3594 | cmn->use_instanced_draw = false; |
3595 | for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { |
3596 | cmn->vertex_layout_valid[i] = false; |
3597 | } |
3598 | cmn->color_attachment_count = desc->color_count; |
3599 | for (int i = 0; i < cmn->color_attachment_count; i++) { |
3600 | cmn->color_formats[i] = desc->colors[i].pixel_format; |
3601 | } |
3602 | cmn->depth_format = desc->depth.pixel_format; |
3603 | cmn->sample_count = desc->sample_count; |
3604 | cmn->depth_bias = desc->depth.bias; |
3605 | cmn->depth_bias_slope_scale = desc->depth.bias_slope_scale; |
3606 | cmn->depth_bias_clamp = desc->depth.bias_clamp; |
3607 | cmn->blend_color = desc->blend_color; |
3608 | } |
3609 | |
3610 | typedef struct { |
3611 | sg_image image_id; |
3612 | int mip_level; |
3613 | int slice; |
3614 | } _sg_pass_attachment_common_t; |
3615 | |
3616 | typedef struct { |
3617 | int num_color_atts; |
3618 | _sg_pass_attachment_common_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
3619 | _sg_pass_attachment_common_t ds_att; |
3620 | } _sg_pass_common_t; |
3621 | |
3622 | _SOKOL_PRIVATE void _sg_pass_common_init(_sg_pass_common_t* cmn, const sg_pass_desc* desc) { |
3623 | const sg_pass_attachment_desc* att_desc; |
3624 | _sg_pass_attachment_common_t* att; |
3625 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3626 | att_desc = &desc->color_attachments[i]; |
3627 | if (att_desc->image.id != SG_INVALID_ID) { |
3628 | cmn->num_color_atts++; |
3629 | att = &cmn->color_atts[i]; |
3630 | att->image_id = att_desc->image; |
3631 | att->mip_level = att_desc->mip_level; |
3632 | att->slice = att_desc->slice; |
3633 | } |
3634 | } |
3635 | att_desc = &desc->depth_stencil_attachment; |
3636 | if (att_desc->image.id != SG_INVALID_ID) { |
3637 | att = &cmn->ds_att; |
3638 | att->image_id = att_desc->image; |
3639 | att->mip_level = att_desc->mip_level; |
3640 | att->slice = att_desc->slice; |
3641 | } |
3642 | } |
3643 | |
3644 | /*=== GENERIC SAMPLER CACHE ==================================================*/ |
3645 | |
3646 | /* |
3647 | this is used by the Metal and WGPU backends to reduce the |
3648 | number of sampler state objects created through the backend API |
3649 | */ |
3650 | typedef struct { |
3651 | sg_filter min_filter; |
3652 | sg_filter mag_filter; |
3653 | sg_wrap wrap_u; |
3654 | sg_wrap wrap_v; |
3655 | sg_wrap wrap_w; |
3656 | sg_border_color border_color; |
3657 | uint32_t max_anisotropy; |
3658 | int min_lod; /* orig min/max_lod is float, this is int(min/max_lod*1000.0) */ |
3659 | int max_lod; |
3660 | uintptr_t sampler_handle; |
3661 | } _sg_sampler_cache_item_t; |
3662 | |
3663 | typedef struct { |
3664 | int capacity; |
3665 | int num_items; |
3666 | _sg_sampler_cache_item_t* items; |
3667 | } _sg_sampler_cache_t; |
3668 | |
3669 | _SOKOL_PRIVATE void _sg_smpcache_init(_sg_sampler_cache_t* cache, int capacity) { |
3670 | SOKOL_ASSERT(cache && (capacity > 0)); |
3671 | _sg_clear(cache, sizeof(_sg_sampler_cache_t)); |
3672 | cache->capacity = capacity; |
3673 | const size_t size = (size_t)cache->capacity * sizeof(_sg_sampler_cache_item_t); |
3674 | cache->items = (_sg_sampler_cache_item_t*) _sg_malloc_clear(size); |
3675 | } |
3676 | |
3677 | _SOKOL_PRIVATE void _sg_smpcache_discard(_sg_sampler_cache_t* cache) { |
3678 | SOKOL_ASSERT(cache && cache->items); |
3679 | _sg_free(cache->items); |
3680 | cache->items = 0; |
3681 | cache->num_items = 0; |
3682 | cache->capacity = 0; |
3683 | } |
3684 | |
3685 | _SOKOL_PRIVATE int _sg_smpcache_minlod_int(float min_lod) { |
3686 | return (int) (min_lod * 1000.0f); |
3687 | } |
3688 | |
3689 | _SOKOL_PRIVATE int _sg_smpcache_maxlod_int(float max_lod) { |
3690 | return (int) (_sg_clamp(max_lod, 0.0f, 1000.0f) * 1000.0f); |
3691 | } |
3692 | |
3693 | _SOKOL_PRIVATE int _sg_smpcache_find_item(const _sg_sampler_cache_t* cache, const sg_image_desc* img_desc) { |
3694 | /* return matching sampler cache item index or -1 */ |
3695 | SOKOL_ASSERT(cache && cache->items); |
3696 | SOKOL_ASSERT(img_desc); |
3697 | const int min_lod = _sg_smpcache_minlod_int(img_desc->min_lod); |
3698 | const int max_lod = _sg_smpcache_maxlod_int(img_desc->max_lod); |
3699 | for (int i = 0; i < cache->num_items; i++) { |
3700 | const _sg_sampler_cache_item_t* item = &cache->items[i]; |
3701 | if ((img_desc->min_filter == item->min_filter) && |
3702 | (img_desc->mag_filter == item->mag_filter) && |
3703 | (img_desc->wrap_u == item->wrap_u) && |
3704 | (img_desc->wrap_v == item->wrap_v) && |
3705 | (img_desc->wrap_w == item->wrap_w) && |
3706 | (img_desc->max_anisotropy == item->max_anisotropy) && |
3707 | (img_desc->border_color == item->border_color) && |
3708 | (min_lod == item->min_lod) && |
3709 | (max_lod == item->max_lod)) |
3710 | { |
3711 | return i; |
3712 | } |
3713 | } |
3714 | /* fallthrough: no matching cache item found */ |
3715 | return -1; |
3716 | } |
3717 | |
3718 | _SOKOL_PRIVATE void _sg_smpcache_add_item(_sg_sampler_cache_t* cache, const sg_image_desc* img_desc, uintptr_t sampler_handle) { |
3719 | SOKOL_ASSERT(cache && cache->items); |
3720 | SOKOL_ASSERT(img_desc); |
3721 | SOKOL_ASSERT(cache->num_items < cache->capacity); |
3722 | const int item_index = cache->num_items++; |
3723 | _sg_sampler_cache_item_t* item = &cache->items[item_index]; |
3724 | item->min_filter = img_desc->min_filter; |
3725 | item->mag_filter = img_desc->mag_filter; |
3726 | item->wrap_u = img_desc->wrap_u; |
3727 | item->wrap_v = img_desc->wrap_v; |
3728 | item->wrap_w = img_desc->wrap_w; |
3729 | item->border_color = img_desc->border_color; |
3730 | item->max_anisotropy = img_desc->max_anisotropy; |
3731 | item->min_lod = _sg_smpcache_minlod_int(img_desc->min_lod); |
3732 | item->max_lod = _sg_smpcache_maxlod_int(img_desc->max_lod); |
3733 | item->sampler_handle = sampler_handle; |
3734 | } |
3735 | |
3736 | _SOKOL_PRIVATE uintptr_t _sg_smpcache_sampler(_sg_sampler_cache_t* cache, int item_index) { |
3737 | SOKOL_ASSERT(cache && cache->items); |
3738 | SOKOL_ASSERT(item_index < cache->num_items); |
3739 | return cache->items[item_index].sampler_handle; |
3740 | } |
3741 | |
3742 | /*=== DUMMY BACKEND DECLARATIONS =============================================*/ |
3743 | #if defined(SOKOL_DUMMY_BACKEND) |
3744 | typedef struct { |
3745 | _sg_slot_t slot; |
3746 | _sg_buffer_common_t cmn; |
3747 | } _sg_dummy_buffer_t; |
3748 | typedef _sg_dummy_buffer_t _sg_buffer_t; |
3749 | |
3750 | typedef struct { |
3751 | _sg_slot_t slot; |
3752 | _sg_image_common_t cmn; |
3753 | } _sg_dummy_image_t; |
3754 | typedef _sg_dummy_image_t _sg_image_t; |
3755 | |
3756 | typedef struct { |
3757 | _sg_slot_t slot; |
3758 | _sg_shader_common_t cmn; |
3759 | } _sg_dummy_shader_t; |
3760 | typedef _sg_dummy_shader_t _sg_shader_t; |
3761 | |
3762 | typedef struct { |
3763 | _sg_slot_t slot; |
3764 | _sg_shader_t* shader; |
3765 | _sg_pipeline_common_t cmn; |
3766 | } _sg_dummy_pipeline_t; |
3767 | typedef _sg_dummy_pipeline_t _sg_pipeline_t; |
3768 | |
3769 | typedef struct { |
3770 | _sg_image_t* image; |
3771 | } _sg_dummy_attachment_t; |
3772 | |
3773 | typedef struct { |
3774 | _sg_slot_t slot; |
3775 | _sg_pass_common_t cmn; |
3776 | struct { |
3777 | _sg_dummy_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
3778 | _sg_dummy_attachment_t ds_att; |
3779 | } dmy; |
3780 | } _sg_dummy_pass_t; |
3781 | typedef _sg_dummy_pass_t _sg_pass_t; |
3782 | typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; |
3783 | |
3784 | typedef struct { |
3785 | _sg_slot_t slot; |
3786 | } _sg_dummy_context_t; |
3787 | typedef _sg_dummy_context_t _sg_context_t; |
3788 | |
3789 | /*== GL BACKEND DECLARATIONS =================================================*/ |
3790 | #elif defined(_SOKOL_ANY_GL) |
3791 | typedef struct { |
3792 | _sg_slot_t slot; |
3793 | _sg_buffer_common_t cmn; |
3794 | struct { |
3795 | GLuint buf[SG_NUM_INFLIGHT_FRAMES]; |
3796 | bool ext_buffers; /* if true, external buffers were injected with sg_buffer_desc.gl_buffers */ |
3797 | } gl; |
3798 | } _sg_gl_buffer_t; |
3799 | typedef _sg_gl_buffer_t _sg_buffer_t; |
3800 | |
3801 | typedef struct { |
3802 | _sg_slot_t slot; |
3803 | _sg_image_common_t cmn; |
3804 | struct { |
3805 | GLenum target; |
3806 | GLuint depth_render_buffer; |
3807 | GLuint msaa_render_buffer; |
3808 | GLuint tex[SG_NUM_INFLIGHT_FRAMES]; |
3809 | bool ext_textures; /* if true, external textures were injected with sg_image_desc.gl_textures */ |
3810 | } gl; |
3811 | } _sg_gl_image_t; |
3812 | typedef _sg_gl_image_t _sg_image_t; |
3813 | |
3814 | typedef struct { |
3815 | GLint gl_loc; |
3816 | sg_uniform_type type; |
3817 | uint16_t count; |
3818 | uint16_t offset; |
3819 | } _sg_gl_uniform_t; |
3820 | |
3821 | typedef struct { |
3822 | int num_uniforms; |
3823 | _sg_gl_uniform_t uniforms[SG_MAX_UB_MEMBERS]; |
3824 | } _sg_gl_uniform_block_t; |
3825 | |
3826 | typedef struct { |
3827 | int gl_tex_slot; |
3828 | } _sg_gl_shader_image_t; |
3829 | |
3830 | typedef struct { |
3831 | _sg_str_t name; |
3832 | } _sg_gl_shader_attr_t; |
3833 | |
3834 | typedef struct { |
3835 | _sg_gl_uniform_block_t uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
3836 | _sg_gl_shader_image_t images[SG_MAX_SHADERSTAGE_IMAGES]; |
3837 | } _sg_gl_shader_stage_t; |
3838 | |
3839 | typedef struct { |
3840 | _sg_slot_t slot; |
3841 | _sg_shader_common_t cmn; |
3842 | struct { |
3843 | GLuint prog; |
3844 | _sg_gl_shader_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
3845 | _sg_gl_shader_stage_t stage[SG_NUM_SHADER_STAGES]; |
3846 | } gl; |
3847 | } _sg_gl_shader_t; |
3848 | typedef _sg_gl_shader_t _sg_shader_t; |
3849 | |
3850 | typedef struct { |
3851 | int8_t vb_index; /* -1 if attr is not enabled */ |
3852 | int8_t divisor; /* -1 if not initialized */ |
3853 | uint8_t stride; |
3854 | uint8_t size; |
3855 | uint8_t normalized; |
3856 | int offset; |
3857 | GLenum type; |
3858 | } _sg_gl_attr_t; |
3859 | |
3860 | typedef struct { |
3861 | _sg_slot_t slot; |
3862 | _sg_pipeline_common_t cmn; |
3863 | _sg_shader_t* shader; |
3864 | struct { |
3865 | _sg_gl_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
3866 | sg_depth_state depth; |
3867 | sg_stencil_state stencil; |
3868 | sg_primitive_type primitive_type; |
3869 | sg_blend_state blend; |
3870 | sg_color_mask color_write_mask[SG_MAX_COLOR_ATTACHMENTS]; |
3871 | sg_cull_mode cull_mode; |
3872 | sg_face_winding face_winding; |
3873 | int sample_count; |
3874 | bool alpha_to_coverage_enabled; |
3875 | } gl; |
3876 | } _sg_gl_pipeline_t; |
3877 | typedef _sg_gl_pipeline_t _sg_pipeline_t; |
3878 | |
3879 | typedef struct { |
3880 | _sg_image_t* image; |
3881 | GLuint gl_msaa_resolve_buffer; |
3882 | } _sg_gl_attachment_t; |
3883 | |
3884 | typedef struct { |
3885 | _sg_slot_t slot; |
3886 | _sg_pass_common_t cmn; |
3887 | struct { |
3888 | GLuint fb; |
3889 | _sg_gl_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
3890 | _sg_gl_attachment_t ds_att; |
3891 | } gl; |
3892 | } _sg_gl_pass_t; |
3893 | typedef _sg_gl_pass_t _sg_pass_t; |
3894 | typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; |
3895 | |
3896 | typedef struct { |
3897 | _sg_slot_t slot; |
3898 | #if !defined(SOKOL_GLES2) |
3899 | GLuint vao; |
3900 | #endif |
3901 | GLuint default_framebuffer; |
3902 | } _sg_gl_context_t; |
3903 | typedef _sg_gl_context_t _sg_context_t; |
3904 | |
3905 | typedef struct { |
3906 | _sg_gl_attr_t gl_attr; |
3907 | GLuint gl_vbuf; |
3908 | } _sg_gl_cache_attr_t; |
3909 | |
3910 | typedef struct { |
3911 | GLenum target; |
3912 | GLuint texture; |
3913 | } _sg_gl_texture_bind_slot; |
3914 | |
3915 | typedef struct { |
3916 | sg_depth_state depth; |
3917 | sg_stencil_state stencil; |
3918 | sg_blend_state blend; |
3919 | sg_color_mask color_write_mask[SG_MAX_COLOR_ATTACHMENTS]; |
3920 | sg_cull_mode cull_mode; |
3921 | sg_face_winding face_winding; |
3922 | bool polygon_offset_enabled; |
3923 | int sample_count; |
3924 | sg_color blend_color; |
3925 | bool alpha_to_coverage_enabled; |
3926 | _sg_gl_cache_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
3927 | GLuint vertex_buffer; |
3928 | GLuint index_buffer; |
3929 | GLuint stored_vertex_buffer; |
3930 | GLuint stored_index_buffer; |
3931 | GLuint prog; |
3932 | _sg_gl_texture_bind_slot textures[SG_MAX_SHADERSTAGE_IMAGES]; |
3933 | _sg_gl_texture_bind_slot stored_texture; |
3934 | int cur_ib_offset; |
3935 | GLenum cur_primitive_type; |
3936 | GLenum cur_index_type; |
3937 | GLenum cur_active_texture; |
3938 | _sg_pipeline_t* cur_pipeline; |
3939 | sg_pipeline cur_pipeline_id; |
3940 | } _sg_gl_state_cache_t; |
3941 | |
3942 | typedef struct { |
3943 | bool valid; |
3944 | bool gles2; |
3945 | bool in_pass; |
3946 | int cur_pass_width; |
3947 | int cur_pass_height; |
3948 | _sg_context_t* cur_context; |
3949 | _sg_pass_t* cur_pass; |
3950 | sg_pass cur_pass_id; |
3951 | _sg_gl_state_cache_t cache; |
3952 | bool ext_anisotropic; |
3953 | GLint max_anisotropy; |
3954 | GLint max_combined_texture_image_units; |
3955 | #if _SOKOL_USE_WIN32_GL_LOADER |
3956 | HINSTANCE opengl32_dll; |
3957 | #endif |
3958 | } _sg_gl_backend_t; |
3959 | |
3960 | /*== D3D11 BACKEND DECLARATIONS ==============================================*/ |
3961 | #elif defined(SOKOL_D3D11) |
3962 | |
3963 | typedef struct { |
3964 | _sg_slot_t slot; |
3965 | _sg_buffer_common_t cmn; |
3966 | struct { |
3967 | ID3D11Buffer* buf; |
3968 | } d3d11; |
3969 | } _sg_d3d11_buffer_t; |
3970 | typedef _sg_d3d11_buffer_t _sg_buffer_t; |
3971 | |
3972 | typedef struct { |
3973 | _sg_slot_t slot; |
3974 | _sg_image_common_t cmn; |
3975 | struct { |
3976 | DXGI_FORMAT format; |
3977 | ID3D11Texture2D* tex2d; |
3978 | ID3D11Texture3D* tex3d; |
3979 | ID3D11Texture2D* texds; |
3980 | ID3D11Texture2D* texmsaa; |
3981 | ID3D11ShaderResourceView* srv; |
3982 | ID3D11SamplerState* smp; |
3983 | } d3d11; |
3984 | } _sg_d3d11_image_t; |
3985 | typedef _sg_d3d11_image_t _sg_image_t; |
3986 | |
3987 | typedef struct { |
3988 | _sg_str_t sem_name; |
3989 | int sem_index; |
3990 | } _sg_d3d11_shader_attr_t; |
3991 | |
3992 | typedef struct { |
3993 | ID3D11Buffer* cbufs[SG_MAX_SHADERSTAGE_UBS]; |
3994 | } _sg_d3d11_shader_stage_t; |
3995 | |
3996 | typedef struct { |
3997 | _sg_slot_t slot; |
3998 | _sg_shader_common_t cmn; |
3999 | struct { |
4000 | _sg_d3d11_shader_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
4001 | _sg_d3d11_shader_stage_t stage[SG_NUM_SHADER_STAGES]; |
4002 | ID3D11VertexShader* vs; |
4003 | ID3D11PixelShader* fs; |
4004 | void* vs_blob; |
4005 | size_t vs_blob_length; |
4006 | } d3d11; |
4007 | } _sg_d3d11_shader_t; |
4008 | typedef _sg_d3d11_shader_t _sg_shader_t; |
4009 | |
4010 | typedef struct { |
4011 | _sg_slot_t slot; |
4012 | _sg_pipeline_common_t cmn; |
4013 | _sg_shader_t* shader; |
4014 | struct { |
4015 | UINT stencil_ref; |
4016 | UINT vb_strides[SG_MAX_SHADERSTAGE_BUFFERS]; |
4017 | D3D_PRIMITIVE_TOPOLOGY topology; |
4018 | DXGI_FORMAT index_format; |
4019 | ID3D11InputLayout* il; |
4020 | ID3D11RasterizerState* rs; |
4021 | ID3D11DepthStencilState* dss; |
4022 | ID3D11BlendState* bs; |
4023 | } d3d11; |
4024 | } _sg_d3d11_pipeline_t; |
4025 | typedef _sg_d3d11_pipeline_t _sg_pipeline_t; |
4026 | |
4027 | typedef struct { |
4028 | _sg_image_t* image; |
4029 | ID3D11RenderTargetView* rtv; |
4030 | } _sg_d3d11_color_attachment_t; |
4031 | |
4032 | typedef struct { |
4033 | _sg_image_t* image; |
4034 | ID3D11DepthStencilView* dsv; |
4035 | } _sg_d3d11_ds_attachment_t; |
4036 | |
4037 | typedef struct { |
4038 | _sg_slot_t slot; |
4039 | _sg_pass_common_t cmn; |
4040 | struct { |
4041 | _sg_d3d11_color_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
4042 | _sg_d3d11_ds_attachment_t ds_att; |
4043 | } d3d11; |
4044 | } _sg_d3d11_pass_t; |
4045 | typedef _sg_d3d11_pass_t _sg_pass_t; |
4046 | typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; |
4047 | |
4048 | typedef struct { |
4049 | _sg_slot_t slot; |
4050 | } _sg_d3d11_context_t; |
4051 | typedef _sg_d3d11_context_t _sg_context_t; |
4052 | |
4053 | typedef struct { |
4054 | bool valid; |
4055 | ID3D11Device* dev; |
4056 | ID3D11DeviceContext* ctx; |
4057 | const void* (*rtv_cb)(void); |
4058 | const void* (*rtv_userdata_cb)(void*); |
4059 | const void* (*dsv_cb)(void); |
4060 | const void* (*dsv_userdata_cb)(void*); |
4061 | void* user_data; |
4062 | bool in_pass; |
4063 | bool use_indexed_draw; |
4064 | bool use_instanced_draw; |
4065 | int cur_width; |
4066 | int cur_height; |
4067 | int num_rtvs; |
4068 | _sg_pass_t* cur_pass; |
4069 | sg_pass cur_pass_id; |
4070 | _sg_pipeline_t* cur_pipeline; |
4071 | sg_pipeline cur_pipeline_id; |
4072 | ID3D11RenderTargetView* cur_rtvs[SG_MAX_COLOR_ATTACHMENTS]; |
4073 | ID3D11DepthStencilView* cur_dsv; |
4074 | /* on-demand loaded d3dcompiler_47.dll handles */ |
4075 | HINSTANCE d3dcompiler_dll; |
4076 | bool d3dcompiler_dll_load_failed; |
4077 | pD3DCompile D3DCompile_func; |
4078 | /* global subresourcedata array for texture updates */ |
4079 | D3D11_SUBRESOURCE_DATA subres_data[SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS]; |
4080 | } _sg_d3d11_backend_t; |
4081 | |
4082 | /*=== METAL BACKEND DECLARATIONS =============================================*/ |
4083 | #elif defined(SOKOL_METAL) |
4084 | |
4085 | #if defined(_SG_TARGET_MACOS) || defined(_SG_TARGET_IOS_SIMULATOR) |
4086 | #define _SG_MTL_UB_ALIGN (256) |
4087 | #else |
4088 | #define _SG_MTL_UB_ALIGN (16) |
4089 | #endif |
4090 | #define _SG_MTL_INVALID_SLOT_INDEX (0) |
4091 | |
4092 | typedef struct { |
4093 | uint32_t frame_index; /* frame index at which it is safe to release this resource */ |
4094 | int slot_index; |
4095 | } _sg_mtl_release_item_t; |
4096 | |
4097 | typedef struct { |
4098 | NSMutableArray* pool; |
4099 | int num_slots; |
4100 | int free_queue_top; |
4101 | int* free_queue; |
4102 | int release_queue_front; |
4103 | int release_queue_back; |
4104 | _sg_mtl_release_item_t* release_queue; |
4105 | } _sg_mtl_idpool_t; |
4106 | |
4107 | typedef struct { |
4108 | _sg_slot_t slot; |
4109 | _sg_buffer_common_t cmn; |
4110 | struct { |
4111 | int buf[SG_NUM_INFLIGHT_FRAMES]; /* index into _sg_mtl_pool */ |
4112 | } mtl; |
4113 | } _sg_mtl_buffer_t; |
4114 | typedef _sg_mtl_buffer_t _sg_buffer_t; |
4115 | |
4116 | typedef struct { |
4117 | _sg_slot_t slot; |
4118 | _sg_image_common_t cmn; |
4119 | struct { |
4120 | int tex[SG_NUM_INFLIGHT_FRAMES]; |
4121 | int depth_tex; |
4122 | int msaa_tex; |
4123 | int sampler_state; |
4124 | } mtl; |
4125 | } _sg_mtl_image_t; |
4126 | typedef _sg_mtl_image_t _sg_image_t; |
4127 | |
4128 | typedef struct { |
4129 | int mtl_lib; |
4130 | int mtl_func; |
4131 | } _sg_mtl_shader_stage_t; |
4132 | |
4133 | typedef struct { |
4134 | _sg_slot_t slot; |
4135 | _sg_shader_common_t cmn; |
4136 | struct { |
4137 | _sg_mtl_shader_stage_t stage[SG_NUM_SHADER_STAGES]; |
4138 | } mtl; |
4139 | } _sg_mtl_shader_t; |
4140 | typedef _sg_mtl_shader_t _sg_shader_t; |
4141 | |
4142 | typedef struct { |
4143 | _sg_slot_t slot; |
4144 | _sg_pipeline_common_t cmn; |
4145 | _sg_shader_t* shader; |
4146 | struct { |
4147 | MTLPrimitiveType prim_type; |
4148 | int index_size; |
4149 | MTLIndexType index_type; |
4150 | MTLCullMode cull_mode; |
4151 | MTLWinding winding; |
4152 | uint32_t stencil_ref; |
4153 | int rps; |
4154 | int dss; |
4155 | } mtl; |
4156 | } _sg_mtl_pipeline_t; |
4157 | typedef _sg_mtl_pipeline_t _sg_pipeline_t; |
4158 | |
4159 | typedef struct { |
4160 | _sg_image_t* image; |
4161 | } _sg_mtl_attachment_t; |
4162 | |
4163 | typedef struct { |
4164 | _sg_slot_t slot; |
4165 | _sg_pass_common_t cmn; |
4166 | struct { |
4167 | _sg_mtl_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
4168 | _sg_mtl_attachment_t ds_att; |
4169 | } mtl; |
4170 | } _sg_mtl_pass_t; |
4171 | typedef _sg_mtl_pass_t _sg_pass_t; |
4172 | typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; |
4173 | |
4174 | typedef struct { |
4175 | _sg_slot_t slot; |
4176 | } _sg_mtl_context_t; |
4177 | typedef _sg_mtl_context_t _sg_context_t; |
4178 | |
4179 | /* resouce binding state cache */ |
4180 | typedef struct { |
4181 | const _sg_pipeline_t* cur_pipeline; |
4182 | sg_pipeline cur_pipeline_id; |
4183 | const _sg_buffer_t* cur_indexbuffer; |
4184 | int cur_indexbuffer_offset; |
4185 | sg_buffer cur_indexbuffer_id; |
4186 | const _sg_buffer_t* cur_vertexbuffers[SG_MAX_SHADERSTAGE_BUFFERS]; |
4187 | int cur_vertexbuffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
4188 | sg_buffer cur_vertexbuffer_ids[SG_MAX_SHADERSTAGE_BUFFERS]; |
4189 | const _sg_image_t* cur_vs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
4190 | sg_image cur_vs_image_ids[SG_MAX_SHADERSTAGE_IMAGES]; |
4191 | const _sg_image_t* cur_fs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
4192 | sg_image cur_fs_image_ids[SG_MAX_SHADERSTAGE_IMAGES]; |
4193 | } _sg_mtl_state_cache_t; |
4194 | |
4195 | typedef struct { |
4196 | bool valid; |
4197 | const void*(*renderpass_descriptor_cb)(void); |
4198 | const void*(*renderpass_descriptor_userdata_cb)(void*); |
4199 | const void*(*drawable_cb)(void); |
4200 | const void*(*drawable_userdata_cb)(void*); |
4201 | void* user_data; |
4202 | uint32_t frame_index; |
4203 | uint32_t cur_frame_rotate_index; |
4204 | int ub_size; |
4205 | int cur_ub_offset; |
4206 | uint8_t* cur_ub_base_ptr; |
4207 | bool in_pass; |
4208 | bool pass_valid; |
4209 | int cur_width; |
4210 | int cur_height; |
4211 | _sg_mtl_state_cache_t state_cache; |
4212 | _sg_sampler_cache_t sampler_cache; |
4213 | _sg_mtl_idpool_t idpool; |
4214 | dispatch_semaphore_t sem; |
4215 | id<MTLDevice> device; |
4216 | id<MTLCommandQueue> cmd_queue; |
4217 | id<MTLCommandBuffer> cmd_buffer; |
4218 | id<MTLCommandBuffer> present_cmd_buffer; |
4219 | id<MTLRenderCommandEncoder> cmd_encoder; |
4220 | id<MTLBuffer> uniform_buffers[SG_NUM_INFLIGHT_FRAMES]; |
4221 | } _sg_mtl_backend_t; |
4222 | |
4223 | /*=== WGPU BACKEND DECLARATIONS ==============================================*/ |
4224 | #elif defined(SOKOL_WGPU) |
4225 | |
4226 | #define _SG_WGPU_STAGING_ALIGN (256) |
4227 | #define _SG_WGPU_STAGING_PIPELINE_SIZE (8) |
4228 | #define _SG_WGPU_ROWPITCH_ALIGN (256) |
4229 | #define _SG_WGPU_MAX_SHADERSTAGE_IMAGES (8) |
4230 | #define _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE (1<<16) |
4231 | |
4232 | typedef struct { |
4233 | _sg_slot_t slot; |
4234 | _sg_buffer_common_t cmn; |
4235 | struct { |
4236 | WGPUBuffer buf; |
4237 | } wgpu; |
4238 | } _sg_wgpu_buffer_t; |
4239 | typedef _sg_wgpu_buffer_t _sg_buffer_t; |
4240 | |
4241 | typedef struct { |
4242 | _sg_slot_t slot; |
4243 | _sg_image_common_t cmn; |
4244 | struct { |
4245 | WGPUTexture tex; |
4246 | WGPUTextureView tex_view; |
4247 | WGPUTexture msaa_tex; |
4248 | WGPUSampler sampler; |
4249 | } wgpu; |
4250 | } _sg_wgpu_image_t; |
4251 | typedef _sg_wgpu_image_t _sg_image_t; |
4252 | |
4253 | typedef struct { |
4254 | WGPUShaderModule module; |
4255 | WGPUBindGroupLayout bind_group_layout; |
4256 | _sg_str_t entry; |
4257 | } _sg_wgpu_shader_stage_t; |
4258 | |
4259 | typedef struct { |
4260 | _sg_slot_t slot; |
4261 | _sg_shader_common_t cmn; |
4262 | struct { |
4263 | _sg_wgpu_shader_stage_t stage[SG_NUM_SHADER_STAGES]; |
4264 | } wgpu; |
4265 | } _sg_wgpu_shader_t; |
4266 | typedef _sg_wgpu_shader_t _sg_shader_t; |
4267 | |
4268 | typedef struct { |
4269 | _sg_slot_t slot; |
4270 | _sg_pipeline_common_t cmn; |
4271 | _sg_shader_t* shader; |
4272 | struct { |
4273 | WGPURenderPipeline pip; |
4274 | uint32_t stencil_ref; |
4275 | } wgpu; |
4276 | } _sg_wgpu_pipeline_t; |
4277 | typedef _sg_wgpu_pipeline_t _sg_pipeline_t; |
4278 | |
4279 | typedef struct { |
4280 | _sg_image_t* image; |
4281 | WGPUTextureView render_tex_view; |
4282 | WGPUTextureView resolve_tex_view; |
4283 | } _sg_wgpu_attachment_t; |
4284 | |
4285 | typedef struct { |
4286 | _sg_slot_t slot; |
4287 | _sg_pass_common_t cmn; |
4288 | struct { |
4289 | _sg_wgpu_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
4290 | _sg_wgpu_attachment_t ds_att; |
4291 | } wgpu; |
4292 | } _sg_wgpu_pass_t; |
4293 | typedef _sg_wgpu_pass_t _sg_pass_t; |
4294 | typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; |
4295 | |
4296 | typedef struct { |
4297 | _sg_slot_t slot; |
4298 | } _sg_wgpu_context_t; |
4299 | typedef _sg_wgpu_context_t _sg_context_t; |
4300 | |
4301 | /* a pool of per-frame uniform buffers */ |
4302 | typedef struct { |
4303 | WGPUBindGroupLayout bindgroup_layout; |
4304 | uint32_t num_bytes; |
4305 | uint32_t offset; /* current offset into current frame's mapped uniform buffer */ |
4306 | uint32_t bind_offsets[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS]; |
4307 | WGPUBuffer buf; /* the GPU-side uniform buffer */ |
4308 | WGPUBindGroup bindgroup; |
4309 | struct { |
4310 | int num; |
4311 | int cur; |
4312 | WGPUBuffer buf[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* CPU-side staging buffers */ |
4313 | uint8_t* ptr[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* if != 0, staging buffer currently mapped */ |
4314 | } stage; |
4315 | } _sg_wgpu_ubpool_t; |
4316 | |
4317 | /* ...a similar pool (like uniform buffer pool) of dynamic-resource staging buffers */ |
4318 | typedef struct { |
4319 | uint32_t num_bytes; |
4320 | uint32_t offset; /* current offset into current frame's staging buffer */ |
4321 | int num; /* number of staging buffers */ |
4322 | int cur; /* this frame's staging buffer */ |
4323 | WGPUBuffer buf[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* CPU-side staging buffers */ |
4324 | uint8_t* ptr[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* if != 0, staging buffer currently mapped */ |
4325 | } _sg_wgpu_stagingpool_t; |
4326 | |
4327 | /* the WGPU backend state */ |
4328 | typedef struct { |
4329 | bool valid; |
4330 | bool in_pass; |
4331 | bool draw_indexed; |
4332 | int cur_width; |
4333 | int cur_height; |
4334 | WGPUDevice dev; |
4335 | WGPUTextureView (*render_view_cb)(void); |
4336 | WGPUTextureView (*render_view_userdata_cb)(void*); |
4337 | WGPUTextureView (*resolve_view_cb)(void); |
4338 | WGPUTextureView (*resolve_view_userdata_cb)(void*); |
4339 | WGPUTextureView (*depth_stencil_view_cb)(void); |
4340 | WGPUTextureView (*depth_stencil_view_userdata_cb)(void*); |
4341 | void* user_data; |
4342 | WGPUQueue queue; |
4343 | WGPUCommandEncoder render_cmd_enc; |
4344 | WGPUCommandEncoder staging_cmd_enc; |
4345 | WGPURenderPassEncoder pass_enc; |
4346 | WGPUBindGroup empty_bind_group; |
4347 | const _sg_pipeline_t* cur_pipeline; |
4348 | sg_pipeline cur_pipeline_id; |
4349 | _sg_sampler_cache_t sampler_cache; |
4350 | _sg_wgpu_ubpool_t ub; |
4351 | _sg_wgpu_stagingpool_t staging; |
4352 | } _sg_wgpu_backend_t; |
4353 | #endif |
4354 | |
4355 | /*=== RESOURCE POOL DECLARATIONS =============================================*/ |
4356 | |
4357 | /* this *MUST* remain 0 */ |
4358 | #define _SG_INVALID_SLOT_INDEX (0) |
4359 | |
4360 | typedef struct { |
4361 | int size; |
4362 | int queue_top; |
4363 | uint32_t* gen_ctrs; |
4364 | int* free_queue; |
4365 | } _sg_pool_t; |
4366 | |
4367 | typedef struct { |
4368 | _sg_pool_t buffer_pool; |
4369 | _sg_pool_t image_pool; |
4370 | _sg_pool_t shader_pool; |
4371 | _sg_pool_t pipeline_pool; |
4372 | _sg_pool_t pass_pool; |
4373 | _sg_pool_t context_pool; |
4374 | _sg_buffer_t* buffers; |
4375 | _sg_image_t* images; |
4376 | _sg_shader_t* shaders; |
4377 | _sg_pipeline_t* pipelines; |
4378 | _sg_pass_t* passes; |
4379 | _sg_context_t* contexts; |
4380 | } _sg_pools_t; |
4381 | |
4382 | /*=== VALIDATION LAYER DECLARATIONS ==========================================*/ |
4383 | typedef enum { |
4384 | /* special case 'validation was successful' */ |
4385 | _SG_VALIDATE_SUCCESS, |
4386 | |
4387 | /* buffer creation */ |
4388 | _SG_VALIDATE_BUFFERDESC_CANARY, |
4389 | _SG_VALIDATE_BUFFERDESC_SIZE, |
4390 | _SG_VALIDATE_BUFFERDESC_DATA, |
4391 | _SG_VALIDATE_BUFFERDESC_DATA_SIZE, |
4392 | _SG_VALIDATE_BUFFERDESC_NO_DATA, |
4393 | |
4394 | /* image data (for image creation and updating) */ |
4395 | _SG_VALIDATE_IMAGEDATA_NODATA, |
4396 | _SG_VALIDATE_IMAGEDATA_DATA_SIZE, |
4397 | |
4398 | /* image creation */ |
4399 | _SG_VALIDATE_IMAGEDESC_CANARY, |
4400 | _SG_VALIDATE_IMAGEDESC_WIDTH, |
4401 | _SG_VALIDATE_IMAGEDESC_HEIGHT, |
4402 | _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT, |
4403 | _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT, |
4404 | _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT, |
4405 | _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT, |
4406 | _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE, |
4407 | _SG_VALIDATE_IMAGEDESC_RT_NO_DATA, |
4408 | _SG_VALIDATE_IMAGEDESC_INJECTED_NO_DATA, |
4409 | _SG_VALIDATE_IMAGEDESC_DYNAMIC_NO_DATA, |
4410 | _SG_VALIDATE_IMAGEDESC_COMPRESSED_IMMUTABLE, |
4411 | |
4412 | /* shader creation */ |
4413 | _SG_VALIDATE_SHADERDESC_CANARY, |
4414 | _SG_VALIDATE_SHADERDESC_SOURCE, |
4415 | _SG_VALIDATE_SHADERDESC_BYTECODE, |
4416 | _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE, |
4417 | _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE, |
4418 | _SG_VALIDATE_SHADERDESC_NO_CONT_UBS, |
4419 | _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS, |
4420 | _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS, |
4421 | _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS, |
4422 | _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME, |
4423 | _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH, |
4424 | _SG_VALIDATE_SHADERDESC_UB_ARRAY_COUNT, |
4425 | _SG_VALIDATE_SHADERDESC_UB_STD140_ARRAY_TYPE, |
4426 | _SG_VALIDATE_SHADERDESC_IMG_NAME, |
4427 | _SG_VALIDATE_SHADERDESC_ATTR_NAMES, |
4428 | _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS, |
4429 | _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG, |
4430 | |
4431 | /* pipeline creation */ |
4432 | _SG_VALIDATE_PIPELINEDESC_CANARY, |
4433 | _SG_VALIDATE_PIPELINEDESC_SHADER, |
4434 | _SG_VALIDATE_PIPELINEDESC_NO_ATTRS, |
4435 | _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4, |
4436 | _SG_VALIDATE_PIPELINEDESC_ATTR_NAME, |
4437 | _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS, |
4438 | |
4439 | /* pass creation */ |
4440 | _SG_VALIDATE_PASSDESC_CANARY, |
4441 | _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS, |
4442 | _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS, |
4443 | _SG_VALIDATE_PASSDESC_IMAGE, |
4444 | _SG_VALIDATE_PASSDESC_MIPLEVEL, |
4445 | _SG_VALIDATE_PASSDESC_FACE, |
4446 | _SG_VALIDATE_PASSDESC_LAYER, |
4447 | _SG_VALIDATE_PASSDESC_SLICE, |
4448 | _SG_VALIDATE_PASSDESC_IMAGE_NO_RT, |
4449 | _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT, |
4450 | _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT, |
4451 | _SG_VALIDATE_PASSDESC_IMAGE_SIZES, |
4452 | _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS, |
4453 | |
4454 | /* sg_begin_pass validation */ |
4455 | _SG_VALIDATE_BEGINPASS_PASS, |
4456 | _SG_VALIDATE_BEGINPASS_IMAGE, |
4457 | |
4458 | /* sg_apply_pipeline validation */ |
4459 | _SG_VALIDATE_APIP_PIPELINE_VALID_ID, |
4460 | _SG_VALIDATE_APIP_PIPELINE_EXISTS, |
4461 | _SG_VALIDATE_APIP_PIPELINE_VALID, |
4462 | _SG_VALIDATE_APIP_SHADER_EXISTS, |
4463 | _SG_VALIDATE_APIP_SHADER_VALID, |
4464 | _SG_VALIDATE_APIP_ATT_COUNT, |
4465 | _SG_VALIDATE_APIP_COLOR_FORMAT, |
4466 | _SG_VALIDATE_APIP_DEPTH_FORMAT, |
4467 | _SG_VALIDATE_APIP_SAMPLE_COUNT, |
4468 | |
4469 | /* sg_apply_bindings validation */ |
4470 | _SG_VALIDATE_ABND_PIPELINE, |
4471 | _SG_VALIDATE_ABND_PIPELINE_EXISTS, |
4472 | _SG_VALIDATE_ABND_PIPELINE_VALID, |
4473 | _SG_VALIDATE_ABND_VBS, |
4474 | _SG_VALIDATE_ABND_VB_EXISTS, |
4475 | _SG_VALIDATE_ABND_VB_TYPE, |
4476 | _SG_VALIDATE_ABND_VB_OVERFLOW, |
4477 | _SG_VALIDATE_ABND_NO_IB, |
4478 | _SG_VALIDATE_ABND_IB, |
4479 | _SG_VALIDATE_ABND_IB_EXISTS, |
4480 | _SG_VALIDATE_ABND_IB_TYPE, |
4481 | _SG_VALIDATE_ABND_IB_OVERFLOW, |
4482 | _SG_VALIDATE_ABND_VS_IMGS, |
4483 | _SG_VALIDATE_ABND_VS_IMG_EXISTS, |
4484 | _SG_VALIDATE_ABND_VS_IMG_TYPES, |
4485 | _SG_VALIDATE_ABND_FS_IMGS, |
4486 | _SG_VALIDATE_ABND_FS_IMG_EXISTS, |
4487 | _SG_VALIDATE_ABND_FS_IMG_TYPES, |
4488 | |
4489 | /* sg_apply_uniforms validation */ |
4490 | _SG_VALIDATE_AUB_NO_PIPELINE, |
4491 | _SG_VALIDATE_AUB_NO_UB_AT_SLOT, |
4492 | _SG_VALIDATE_AUB_SIZE, |
4493 | |
4494 | /* sg_update_buffer validation */ |
4495 | _SG_VALIDATE_UPDATEBUF_USAGE, |
4496 | _SG_VALIDATE_UPDATEBUF_SIZE, |
4497 | _SG_VALIDATE_UPDATEBUF_ONCE, |
4498 | _SG_VALIDATE_UPDATEBUF_APPEND, |
4499 | |
4500 | /* sg_append_buffer validation */ |
4501 | _SG_VALIDATE_APPENDBUF_USAGE, |
4502 | _SG_VALIDATE_APPENDBUF_SIZE, |
4503 | _SG_VALIDATE_APPENDBUF_UPDATE, |
4504 | |
4505 | /* sg_update_image validation */ |
4506 | _SG_VALIDATE_UPDIMG_USAGE, |
4507 | _SG_VALIDATE_UPDIMG_NOTENOUGHDATA, |
4508 | _SG_VALIDATE_UPDIMG_ONCE |
4509 | } _sg_validate_error_t; |
4510 | |
4511 | /*=== GENERIC BACKEND STATE ==================================================*/ |
4512 | |
4513 | typedef struct { |
4514 | int num; // number of allocated commit listener items |
4515 | int upper; // the current upper index (no valid items past this point) |
4516 | sg_commit_listener* items; |
4517 | } _sg_commit_listeners_t; |
4518 | |
4519 | typedef struct { |
4520 | bool valid; |
4521 | sg_desc desc; /* original desc with default values patched in */ |
4522 | uint32_t frame_index; |
4523 | sg_context active_context; |
4524 | sg_pass cur_pass; |
4525 | sg_pipeline cur_pipeline; |
4526 | bool pass_valid; |
4527 | bool bindings_valid; |
4528 | bool next_draw_valid; |
4529 | #if defined(SOKOL_DEBUG) |
4530 | _sg_validate_error_t validate_error; |
4531 | #endif |
4532 | _sg_pools_t pools; |
4533 | sg_backend backend; |
4534 | sg_features features; |
4535 | sg_limits limits; |
4536 | sg_pixelformat_info formats[_SG_PIXELFORMAT_NUM]; |
4537 | #if defined(_SOKOL_ANY_GL) |
4538 | _sg_gl_backend_t gl; |
4539 | #elif defined(SOKOL_METAL) |
4540 | _sg_mtl_backend_t mtl; |
4541 | #elif defined(SOKOL_D3D11) |
4542 | _sg_d3d11_backend_t d3d11; |
4543 | #elif defined(SOKOL_WGPU) |
4544 | _sg_wgpu_backend_t wgpu; |
4545 | #endif |
4546 | #if defined(SOKOL_TRACE_HOOKS) |
4547 | sg_trace_hooks hooks; |
4548 | #endif |
4549 | _sg_commit_listeners_t commit_listeners; |
4550 | } _sg_state_t; |
4551 | static _sg_state_t _sg; |
4552 | |
4553 | /*-- helper functions --------------------------------------------------------*/ |
4554 | |
4555 | // a helper macro to clear a struct with potentially ARC'ed ObjC references |
4556 | #if defined(SOKOL_METAL) |
4557 | #if defined(__cplusplus) |
4558 | #define _SG_CLEAR_ARC_STRUCT(type, item) { item = type(); } |
4559 | #else |
4560 | #define _SG_CLEAR_ARC_STRUCT(type, item) { item = (type) { 0 }; } |
4561 | #endif |
4562 | #else |
4563 | #define _SG_CLEAR_ARC_STRUCT(type, item) { _sg_clear(&item, sizeof(item)); } |
4564 | #endif |
4565 | |
4566 | _SOKOL_PRIVATE void _sg_clear(void* ptr, size_t size) { |
4567 | SOKOL_ASSERT(ptr && (size > 0)); |
4568 | memset(ptr, 0, size); |
4569 | } |
4570 | |
4571 | _SOKOL_PRIVATE void* _sg_malloc(size_t size) { |
4572 | SOKOL_ASSERT(size > 0); |
4573 | void* ptr; |
4574 | if (_sg.desc.allocator.alloc) { |
4575 | ptr = _sg.desc.allocator.alloc(size, _sg.desc.allocator.user_data); |
4576 | } |
4577 | else { |
4578 | ptr = malloc(size); |
4579 | } |
4580 | SOKOL_ASSERT(ptr); |
4581 | return ptr; |
4582 | } |
4583 | |
4584 | _SOKOL_PRIVATE void* _sg_malloc_clear(size_t size) { |
4585 | void* ptr = _sg_malloc(size); |
4586 | _sg_clear(ptr, size); |
4587 | return ptr; |
4588 | } |
4589 | |
4590 | _SOKOL_PRIVATE void _sg_free(void* ptr) { |
4591 | if (_sg.desc.allocator.free) { |
4592 | _sg.desc.allocator.free(ptr, _sg.desc.allocator.user_data); |
4593 | } |
4594 | else { |
4595 | free(ptr); |
4596 | } |
4597 | } |
4598 | |
4599 | #if defined(SOKOL_DEBUG) |
4600 | _SOKOL_PRIVATE void _sg_log(const char* msg) { |
4601 | SOKOL_ASSERT(msg); |
4602 | if (_sg.desc.logger.log_cb) { |
4603 | _sg.desc.logger.log_cb(msg, _sg.desc.logger.user_data); |
4604 | } else { |
4605 | SOKOL_LOG(msg); |
4606 | } |
4607 | } |
4608 | #endif |
4609 | |
4610 | _SOKOL_PRIVATE bool _sg_strempty(const _sg_str_t* str) { |
4611 | return 0 == str->buf[0]; |
4612 | } |
4613 | |
4614 | _SOKOL_PRIVATE const char* _sg_strptr(const _sg_str_t* str) { |
4615 | return &str->buf[0]; |
4616 | } |
4617 | |
4618 | _SOKOL_PRIVATE void _sg_strcpy(_sg_str_t* dst, const char* src) { |
4619 | SOKOL_ASSERT(dst); |
4620 | if (src) { |
4621 | #if defined(_MSC_VER) |
4622 | strncpy_s(dst->buf, _SG_STRING_SIZE, src, (_SG_STRING_SIZE-1)); |
4623 | #else |
4624 | strncpy(dst->buf, src, _SG_STRING_SIZE); |
4625 | #endif |
4626 | dst->buf[_SG_STRING_SIZE-1] = 0; |
4627 | } |
4628 | else { |
4629 | _sg_clear(dst->buf, _SG_STRING_SIZE); |
4630 | } |
4631 | } |
4632 | |
4633 | _SOKOL_PRIVATE uint32_t _sg_align_u32(uint32_t val, uint32_t align) { |
4634 | SOKOL_ASSERT((align > 0) && ((align & (align - 1)) == 0)); |
4635 | return (val + (align - 1)) & ~(align - 1); |
4636 | } |
4637 | |
4638 | /* return byte size of a vertex format */ |
4639 | _SOKOL_PRIVATE int _sg_vertexformat_bytesize(sg_vertex_format fmt) { |
4640 | switch (fmt) { |
4641 | case SG_VERTEXFORMAT_FLOAT: return 4; |
4642 | case SG_VERTEXFORMAT_FLOAT2: return 8; |
4643 | case SG_VERTEXFORMAT_FLOAT3: return 12; |
4644 | case SG_VERTEXFORMAT_FLOAT4: return 16; |
4645 | case SG_VERTEXFORMAT_BYTE4: return 4; |
4646 | case SG_VERTEXFORMAT_BYTE4N: return 4; |
4647 | case SG_VERTEXFORMAT_UBYTE4: return 4; |
4648 | case SG_VERTEXFORMAT_UBYTE4N: return 4; |
4649 | case SG_VERTEXFORMAT_SHORT2: return 4; |
4650 | case SG_VERTEXFORMAT_SHORT2N: return 4; |
4651 | case SG_VERTEXFORMAT_USHORT2N: return 4; |
4652 | case SG_VERTEXFORMAT_SHORT4: return 8; |
4653 | case SG_VERTEXFORMAT_SHORT4N: return 8; |
4654 | case SG_VERTEXFORMAT_USHORT4N: return 8; |
4655 | case SG_VERTEXFORMAT_UINT10_N2: return 4; |
4656 | case SG_VERTEXFORMAT_INVALID: return 0; |
4657 | default: |
4658 | SOKOL_UNREACHABLE; |
4659 | return -1; |
4660 | } |
4661 | } |
4662 | |
4663 | _SOKOL_PRIVATE uint32_t _sg_uniform_alignment(sg_uniform_type type, int array_count, sg_uniform_layout ub_layout) { |
4664 | if (ub_layout == SG_UNIFORMLAYOUT_NATIVE) { |
4665 | return 1; |
4666 | } |
4667 | else { |
4668 | SOKOL_ASSERT(array_count > 0); |
4669 | if (array_count == 1) { |
4670 | switch (type) { |
4671 | case SG_UNIFORMTYPE_FLOAT: |
4672 | case SG_UNIFORMTYPE_INT: |
4673 | return 4; |
4674 | case SG_UNIFORMTYPE_FLOAT2: |
4675 | case SG_UNIFORMTYPE_INT2: |
4676 | return 8; |
4677 | case SG_UNIFORMTYPE_FLOAT3: |
4678 | case SG_UNIFORMTYPE_FLOAT4: |
4679 | case SG_UNIFORMTYPE_INT3: |
4680 | case SG_UNIFORMTYPE_INT4: |
4681 | return 16; |
4682 | case SG_UNIFORMTYPE_MAT4: |
4683 | return 16; |
4684 | default: |
4685 | SOKOL_UNREACHABLE; |
4686 | return 1; |
4687 | } |
4688 | } |
4689 | else { |
4690 | return 16; |
4691 | } |
4692 | } |
4693 | } |
4694 | |
4695 | _SOKOL_PRIVATE uint32_t _sg_uniform_size(sg_uniform_type type, int array_count, sg_uniform_layout ub_layout) { |
4696 | SOKOL_ASSERT(array_count > 0); |
4697 | if (array_count == 1) { |
4698 | switch (type) { |
4699 | case SG_UNIFORMTYPE_FLOAT: |
4700 | case SG_UNIFORMTYPE_INT: |
4701 | return 4; |
4702 | case SG_UNIFORMTYPE_FLOAT2: |
4703 | case SG_UNIFORMTYPE_INT2: |
4704 | return 8; |
4705 | case SG_UNIFORMTYPE_FLOAT3: |
4706 | case SG_UNIFORMTYPE_INT3: |
4707 | return 12; |
4708 | case SG_UNIFORMTYPE_FLOAT4: |
4709 | case SG_UNIFORMTYPE_INT4: |
4710 | return 16; |
4711 | case SG_UNIFORMTYPE_MAT4: |
4712 | return 64; |
4713 | default: |
4714 | SOKOL_UNREACHABLE; |
4715 | return 0; |
4716 | } |
4717 | } |
4718 | else { |
4719 | if (ub_layout == SG_UNIFORMLAYOUT_NATIVE) { |
4720 | switch (type) { |
4721 | case SG_UNIFORMTYPE_FLOAT: |
4722 | case SG_UNIFORMTYPE_INT: |
4723 | return 4 * (uint32_t)array_count; |
4724 | case SG_UNIFORMTYPE_FLOAT2: |
4725 | case SG_UNIFORMTYPE_INT2: |
4726 | return 8 * (uint32_t)array_count; |
4727 | case SG_UNIFORMTYPE_FLOAT3: |
4728 | case SG_UNIFORMTYPE_INT3: |
4729 | return 12 * (uint32_t)array_count; |
4730 | case SG_UNIFORMTYPE_FLOAT4: |
4731 | case SG_UNIFORMTYPE_INT4: |
4732 | return 16 * (uint32_t)array_count; |
4733 | case SG_UNIFORMTYPE_MAT4: |
4734 | return 64 * (uint32_t)array_count; |
4735 | default: |
4736 | SOKOL_UNREACHABLE; |
4737 | return 0; |
4738 | } |
4739 | } |
4740 | else { |
4741 | switch (type) { |
4742 | case SG_UNIFORMTYPE_FLOAT: |
4743 | case SG_UNIFORMTYPE_FLOAT2: |
4744 | case SG_UNIFORMTYPE_FLOAT3: |
4745 | case SG_UNIFORMTYPE_FLOAT4: |
4746 | case SG_UNIFORMTYPE_INT: |
4747 | case SG_UNIFORMTYPE_INT2: |
4748 | case SG_UNIFORMTYPE_INT3: |
4749 | case SG_UNIFORMTYPE_INT4: |
4750 | return 16 * (uint32_t)array_count; |
4751 | case SG_UNIFORMTYPE_MAT4: |
4752 | return 64 * (uint32_t)array_count; |
4753 | default: |
4754 | SOKOL_UNREACHABLE; |
4755 | return 0; |
4756 | } |
4757 | } |
4758 | } |
4759 | } |
4760 | |
4761 | /* return true if pixel format is a compressed format */ |
4762 | _SOKOL_PRIVATE bool _sg_is_compressed_pixel_format(sg_pixel_format fmt) { |
4763 | switch (fmt) { |
4764 | case SG_PIXELFORMAT_BC1_RGBA: |
4765 | case SG_PIXELFORMAT_BC2_RGBA: |
4766 | case SG_PIXELFORMAT_BC3_RGBA: |
4767 | case SG_PIXELFORMAT_BC4_R: |
4768 | case SG_PIXELFORMAT_BC4_RSN: |
4769 | case SG_PIXELFORMAT_BC5_RG: |
4770 | case SG_PIXELFORMAT_BC5_RGSN: |
4771 | case SG_PIXELFORMAT_BC6H_RGBF: |
4772 | case SG_PIXELFORMAT_BC6H_RGBUF: |
4773 | case SG_PIXELFORMAT_BC7_RGBA: |
4774 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: |
4775 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: |
4776 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: |
4777 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: |
4778 | case SG_PIXELFORMAT_ETC2_RGB8: |
4779 | case SG_PIXELFORMAT_ETC2_RGB8A1: |
4780 | case SG_PIXELFORMAT_ETC2_RGBA8: |
4781 | case SG_PIXELFORMAT_ETC2_RG11: |
4782 | case SG_PIXELFORMAT_ETC2_RG11SN: |
4783 | return true; |
4784 | default: |
4785 | return false; |
4786 | } |
4787 | } |
4788 | |
4789 | /* return true if pixel format is a valid render target format */ |
4790 | _SOKOL_PRIVATE bool _sg_is_valid_rendertarget_color_format(sg_pixel_format fmt) { |
4791 | const int fmt_index = (int) fmt; |
4792 | SOKOL_ASSERT((fmt_index >= 0) && (fmt_index < _SG_PIXELFORMAT_NUM)); |
4793 | return _sg.formats[fmt_index].render && !_sg.formats[fmt_index].depth; |
4794 | } |
4795 | |
4796 | /* return true if pixel format is a valid depth format */ |
4797 | _SOKOL_PRIVATE bool _sg_is_valid_rendertarget_depth_format(sg_pixel_format fmt) { |
4798 | const int fmt_index = (int) fmt; |
4799 | SOKOL_ASSERT((fmt_index >= 0) && (fmt_index < _SG_PIXELFORMAT_NUM)); |
4800 | return _sg.formats[fmt_index].render && _sg.formats[fmt_index].depth; |
4801 | } |
4802 | |
4803 | /* return true if pixel format is a depth-stencil format */ |
4804 | _SOKOL_PRIVATE bool _sg_is_depth_stencil_format(sg_pixel_format fmt) { |
4805 | return (SG_PIXELFORMAT_DEPTH_STENCIL == fmt); |
4806 | } |
4807 | |
4808 | /* return the bytes-per-pixel for a pixel format */ |
4809 | _SOKOL_PRIVATE int _sg_pixelformat_bytesize(sg_pixel_format fmt) { |
4810 | switch (fmt) { |
4811 | case SG_PIXELFORMAT_R8: |
4812 | case SG_PIXELFORMAT_R8SN: |
4813 | case SG_PIXELFORMAT_R8UI: |
4814 | case SG_PIXELFORMAT_R8SI: |
4815 | return 1; |
4816 | |
4817 | case SG_PIXELFORMAT_R16: |
4818 | case SG_PIXELFORMAT_R16SN: |
4819 | case SG_PIXELFORMAT_R16UI: |
4820 | case SG_PIXELFORMAT_R16SI: |
4821 | case SG_PIXELFORMAT_R16F: |
4822 | case SG_PIXELFORMAT_RG8: |
4823 | case SG_PIXELFORMAT_RG8SN: |
4824 | case SG_PIXELFORMAT_RG8UI: |
4825 | case SG_PIXELFORMAT_RG8SI: |
4826 | return 2; |
4827 | |
4828 | case SG_PIXELFORMAT_R32UI: |
4829 | case SG_PIXELFORMAT_R32SI: |
4830 | case SG_PIXELFORMAT_R32F: |
4831 | case SG_PIXELFORMAT_RG16: |
4832 | case SG_PIXELFORMAT_RG16SN: |
4833 | case SG_PIXELFORMAT_RG16UI: |
4834 | case SG_PIXELFORMAT_RG16SI: |
4835 | case SG_PIXELFORMAT_RG16F: |
4836 | case SG_PIXELFORMAT_RGBA8: |
4837 | case SG_PIXELFORMAT_RGBA8SN: |
4838 | case SG_PIXELFORMAT_RGBA8UI: |
4839 | case SG_PIXELFORMAT_RGBA8SI: |
4840 | case SG_PIXELFORMAT_BGRA8: |
4841 | case SG_PIXELFORMAT_RGB10A2: |
4842 | case SG_PIXELFORMAT_RG11B10F: |
4843 | case SG_PIXELFORMAT_RGB9E5: |
4844 | return 4; |
4845 | |
4846 | case SG_PIXELFORMAT_RG32UI: |
4847 | case SG_PIXELFORMAT_RG32SI: |
4848 | case SG_PIXELFORMAT_RG32F: |
4849 | case SG_PIXELFORMAT_RGBA16: |
4850 | case SG_PIXELFORMAT_RGBA16SN: |
4851 | case SG_PIXELFORMAT_RGBA16UI: |
4852 | case SG_PIXELFORMAT_RGBA16SI: |
4853 | case SG_PIXELFORMAT_RGBA16F: |
4854 | return 8; |
4855 | |
4856 | case SG_PIXELFORMAT_RGBA32UI: |
4857 | case SG_PIXELFORMAT_RGBA32SI: |
4858 | case SG_PIXELFORMAT_RGBA32F: |
4859 | return 16; |
4860 | |
4861 | default: |
4862 | SOKOL_UNREACHABLE; |
4863 | return 0; |
4864 | } |
4865 | } |
4866 | |
4867 | _SOKOL_PRIVATE int _sg_roundup(int val, int round_to) { |
4868 | return (val+(round_to-1)) & ~(round_to-1); |
4869 | } |
4870 | |
4871 | /* return row pitch for an image |
4872 | |
4873 | see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp |
4874 | |
4875 | For the special PVRTC pitch computation, see: |
4876 | GL extension requirement (https://www.khronos.org/registry/OpenGL/extensions/IMG/IMG_texture_compression_pvrtc.txt) |
4877 | |
4878 | Quote: |
4879 | |
4880 | 6) How is the imageSize argument calculated for the CompressedTexImage2D |
4881 | and CompressedTexSubImage2D functions. |
4882 | |
4883 | Resolution: For PVRTC 4BPP formats the imageSize is calculated as: |
4884 | ( max(width, 8) * max(height, 8) * 4 + 7) / 8 |
4885 | For PVRTC 2BPP formats the imageSize is calculated as: |
4886 | ( max(width, 16) * max(height, 8) * 2 + 7) / 8 |
4887 | */ |
4888 | _SOKOL_PRIVATE int _sg_row_pitch(sg_pixel_format fmt, int width, int row_align) { |
4889 | int pitch; |
4890 | switch (fmt) { |
4891 | case SG_PIXELFORMAT_BC1_RGBA: |
4892 | case SG_PIXELFORMAT_BC4_R: |
4893 | case SG_PIXELFORMAT_BC4_RSN: |
4894 | case SG_PIXELFORMAT_ETC2_RGB8: |
4895 | case SG_PIXELFORMAT_ETC2_RGB8A1: |
4896 | pitch = ((width + 3) / 4) * 8; |
4897 | pitch = pitch < 8 ? 8 : pitch; |
4898 | break; |
4899 | case SG_PIXELFORMAT_BC2_RGBA: |
4900 | case SG_PIXELFORMAT_BC3_RGBA: |
4901 | case SG_PIXELFORMAT_BC5_RG: |
4902 | case SG_PIXELFORMAT_BC5_RGSN: |
4903 | case SG_PIXELFORMAT_BC6H_RGBF: |
4904 | case SG_PIXELFORMAT_BC6H_RGBUF: |
4905 | case SG_PIXELFORMAT_BC7_RGBA: |
4906 | case SG_PIXELFORMAT_ETC2_RGBA8: |
4907 | case SG_PIXELFORMAT_ETC2_RG11: |
4908 | case SG_PIXELFORMAT_ETC2_RG11SN: |
4909 | pitch = ((width + 3) / 4) * 16; |
4910 | pitch = pitch < 16 ? 16 : pitch; |
4911 | break; |
4912 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: |
4913 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: |
4914 | pitch = (_sg_max(width, 8) * 4 + 7) / 8; |
4915 | break; |
4916 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: |
4917 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: |
4918 | pitch = (_sg_max(width, 16) * 2 + 7) / 8; |
4919 | break; |
4920 | default: |
4921 | pitch = width * _sg_pixelformat_bytesize(fmt); |
4922 | break; |
4923 | } |
4924 | pitch = _sg_roundup(pitch, row_align); |
4925 | return pitch; |
4926 | } |
4927 | |
4928 | /* compute the number of rows in a surface depending on pixel format */ |
4929 | _SOKOL_PRIVATE int _sg_num_rows(sg_pixel_format fmt, int height) { |
4930 | int num_rows; |
4931 | switch (fmt) { |
4932 | case SG_PIXELFORMAT_BC1_RGBA: |
4933 | case SG_PIXELFORMAT_BC4_R: |
4934 | case SG_PIXELFORMAT_BC4_RSN: |
4935 | case SG_PIXELFORMAT_ETC2_RGB8: |
4936 | case SG_PIXELFORMAT_ETC2_RGB8A1: |
4937 | case SG_PIXELFORMAT_ETC2_RGBA8: |
4938 | case SG_PIXELFORMAT_ETC2_RG11: |
4939 | case SG_PIXELFORMAT_ETC2_RG11SN: |
4940 | case SG_PIXELFORMAT_BC2_RGBA: |
4941 | case SG_PIXELFORMAT_BC3_RGBA: |
4942 | case SG_PIXELFORMAT_BC5_RG: |
4943 | case SG_PIXELFORMAT_BC5_RGSN: |
4944 | case SG_PIXELFORMAT_BC6H_RGBF: |
4945 | case SG_PIXELFORMAT_BC6H_RGBUF: |
4946 | case SG_PIXELFORMAT_BC7_RGBA: |
4947 | num_rows = ((height + 3) / 4); |
4948 | break; |
4949 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: |
4950 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: |
4951 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: |
4952 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: |
4953 | /* NOTE: this is most likely not correct because it ignores any |
4954 | PVCRTC block size, but multiplied with _sg_row_pitch() |
4955 | it gives the correct surface pitch. |
4956 | |
4957 | See: https://www.khronos.org/registry/OpenGL/extensions/IMG/IMG_texture_compression_pvrtc.txt |
4958 | */ |
4959 | num_rows = ((_sg_max(height, 8) + 7) / 8) * 8; |
4960 | break; |
4961 | default: |
4962 | num_rows = height; |
4963 | break; |
4964 | } |
4965 | if (num_rows < 1) { |
4966 | num_rows = 1; |
4967 | } |
4968 | return num_rows; |
4969 | } |
4970 | |
4971 | /* return pitch of a 2D subimage / texture slice |
4972 | see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp |
4973 | */ |
4974 | _SOKOL_PRIVATE int _sg_surface_pitch(sg_pixel_format fmt, int width, int height, int row_align) { |
4975 | int num_rows = _sg_num_rows(fmt, height); |
4976 | return num_rows * _sg_row_pitch(fmt, width, row_align); |
4977 | } |
4978 | |
4979 | /* capability table pixel format helper functions */ |
4980 | _SOKOL_PRIVATE void _sg_pixelformat_all(sg_pixelformat_info* pfi) { |
4981 | pfi->sample = true; |
4982 | pfi->filter = true; |
4983 | pfi->blend = true; |
4984 | pfi->render = true; |
4985 | pfi->msaa = true; |
4986 | } |
4987 | |
4988 | _SOKOL_PRIVATE void _sg_pixelformat_s(sg_pixelformat_info* pfi) { |
4989 | pfi->sample = true; |
4990 | } |
4991 | |
4992 | _SOKOL_PRIVATE void _sg_pixelformat_sf(sg_pixelformat_info* pfi) { |
4993 | pfi->sample = true; |
4994 | pfi->filter = true; |
4995 | } |
4996 | |
4997 | _SOKOL_PRIVATE void _sg_pixelformat_sr(sg_pixelformat_info* pfi) { |
4998 | pfi->sample = true; |
4999 | pfi->render = true; |
5000 | } |
5001 | |
5002 | _SOKOL_PRIVATE void _sg_pixelformat_srmd(sg_pixelformat_info* pfi) { |
5003 | pfi->sample = true; |
5004 | pfi->render = true; |
5005 | pfi->msaa = true; |
5006 | pfi->depth = true; |
5007 | } |
5008 | |
5009 | _SOKOL_PRIVATE void _sg_pixelformat_srm(sg_pixelformat_info* pfi) { |
5010 | pfi->sample = true; |
5011 | pfi->render = true; |
5012 | pfi->msaa = true; |
5013 | } |
5014 | |
5015 | _SOKOL_PRIVATE void _sg_pixelformat_sfrm(sg_pixelformat_info* pfi) { |
5016 | pfi->sample = true; |
5017 | pfi->filter = true; |
5018 | pfi->render = true; |
5019 | pfi->msaa = true; |
5020 | } |
5021 | _SOKOL_PRIVATE void _sg_pixelformat_sbrm(sg_pixelformat_info* pfi) { |
5022 | pfi->sample = true; |
5023 | pfi->blend = true; |
5024 | pfi->render = true; |
5025 | pfi->msaa = true; |
5026 | } |
5027 | |
5028 | _SOKOL_PRIVATE void _sg_pixelformat_sbr(sg_pixelformat_info* pfi) { |
5029 | pfi->sample = true; |
5030 | pfi->blend = true; |
5031 | pfi->render = true; |
5032 | } |
5033 | |
5034 | _SOKOL_PRIVATE void _sg_pixelformat_sfbr(sg_pixelformat_info* pfi) { |
5035 | pfi->sample = true; |
5036 | pfi->filter = true; |
5037 | pfi->blend = true; |
5038 | pfi->render = true; |
5039 | } |
5040 | |
5041 | /* resolve pass action defaults into a new pass action struct */ |
5042 | _SOKOL_PRIVATE void _sg_resolve_default_pass_action(const sg_pass_action* from, sg_pass_action* to) { |
5043 | SOKOL_ASSERT(from && to); |
5044 | *to = *from; |
5045 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
5046 | if (to->colors[i].action == _SG_ACTION_DEFAULT) { |
5047 | to->colors[i].action = SG_ACTION_CLEAR; |
5048 | to->colors[i].value.r = SG_DEFAULT_CLEAR_RED; |
5049 | to->colors[i].value.g = SG_DEFAULT_CLEAR_GREEN; |
5050 | to->colors[i].value.b = SG_DEFAULT_CLEAR_BLUE; |
5051 | to->colors[i].value.a = SG_DEFAULT_CLEAR_ALPHA; |
5052 | } |
5053 | } |
5054 | if (to->depth.action == _SG_ACTION_DEFAULT) { |
5055 | to->depth.action = SG_ACTION_CLEAR; |
5056 | to->depth.value = SG_DEFAULT_CLEAR_DEPTH; |
5057 | } |
5058 | if (to->stencil.action == _SG_ACTION_DEFAULT) { |
5059 | to->stencil.action = SG_ACTION_CLEAR; |
5060 | to->stencil.value = SG_DEFAULT_CLEAR_STENCIL; |
5061 | } |
5062 | } |
5063 | |
5064 | /*== DUMMY BACKEND IMPL ======================================================*/ |
5065 | #if defined(SOKOL_DUMMY_BACKEND) |
5066 | |
5067 | _SOKOL_PRIVATE void _sg_dummy_setup_backend(const sg_desc* desc) { |
5068 | SOKOL_ASSERT(desc); |
5069 | _SOKOL_UNUSED(desc); |
5070 | _sg.backend = SG_BACKEND_DUMMY; |
5071 | for (int i = SG_PIXELFORMAT_R8; i < SG_PIXELFORMAT_BC1_RGBA; i++) { |
5072 | _sg.formats[i].sample = true; |
5073 | _sg.formats[i].filter = true; |
5074 | _sg.formats[i].render = true; |
5075 | _sg.formats[i].blend = true; |
5076 | _sg.formats[i].msaa = true; |
5077 | } |
5078 | _sg.formats[SG_PIXELFORMAT_DEPTH].depth = true; |
5079 | _sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL].depth = true; |
5080 | } |
5081 | |
5082 | _SOKOL_PRIVATE void _sg_dummy_discard_backend(void) { |
5083 | /* empty */ |
5084 | } |
5085 | |
5086 | _SOKOL_PRIVATE void _sg_dummy_reset_state_cache(void) { |
5087 | /* empty*/ |
5088 | } |
5089 | |
5090 | _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_context(_sg_context_t* ctx) { |
5091 | SOKOL_ASSERT(ctx); |
5092 | _SOKOL_UNUSED(ctx); |
5093 | return SG_RESOURCESTATE_VALID; |
5094 | } |
5095 | |
5096 | _SOKOL_PRIVATE void _sg_dummy_discard_context(_sg_context_t* ctx) { |
5097 | SOKOL_ASSERT(ctx); |
5098 | _SOKOL_UNUSED(ctx); |
5099 | } |
5100 | |
5101 | _SOKOL_PRIVATE void _sg_dummy_activate_context(_sg_context_t* ctx) { |
5102 | SOKOL_ASSERT(ctx); |
5103 | _SOKOL_UNUSED(ctx); |
5104 | } |
5105 | |
5106 | _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
5107 | SOKOL_ASSERT(buf && desc); |
5108 | _sg_buffer_common_init(&buf->cmn, desc); |
5109 | return SG_RESOURCESTATE_VALID; |
5110 | } |
5111 | |
5112 | _SOKOL_PRIVATE void _sg_dummy_discard_buffer(_sg_buffer_t* buf) { |
5113 | SOKOL_ASSERT(buf); |
5114 | _SOKOL_UNUSED(buf); |
5115 | } |
5116 | |
5117 | _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_image(_sg_image_t* img, const sg_image_desc* desc) { |
5118 | SOKOL_ASSERT(img && desc); |
5119 | _sg_image_common_init(&img->cmn, desc); |
5120 | return SG_RESOURCESTATE_VALID; |
5121 | } |
5122 | |
5123 | _SOKOL_PRIVATE void _sg_dummy_discard_image(_sg_image_t* img) { |
5124 | SOKOL_ASSERT(img); |
5125 | _SOKOL_UNUSED(img); |
5126 | } |
5127 | |
5128 | _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
5129 | SOKOL_ASSERT(shd && desc); |
5130 | _sg_shader_common_init(&shd->cmn, desc); |
5131 | return SG_RESOURCESTATE_VALID; |
5132 | } |
5133 | |
5134 | _SOKOL_PRIVATE void _sg_dummy_discard_shader(_sg_shader_t* shd) { |
5135 | SOKOL_ASSERT(shd); |
5136 | _SOKOL_UNUSED(shd); |
5137 | } |
5138 | |
5139 | _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { |
5140 | SOKOL_ASSERT(pip && desc); |
5141 | pip->shader = shd; |
5142 | _sg_pipeline_common_init(&pip->cmn, desc); |
5143 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
5144 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
5145 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
5146 | break; |
5147 | } |
5148 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
5149 | pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; |
5150 | } |
5151 | return SG_RESOURCESTATE_VALID; |
5152 | } |
5153 | |
5154 | _SOKOL_PRIVATE void _sg_dummy_discard_pipeline(_sg_pipeline_t* pip) { |
5155 | SOKOL_ASSERT(pip); |
5156 | _SOKOL_UNUSED(pip); |
5157 | } |
5158 | |
5159 | _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { |
5160 | SOKOL_ASSERT(pass && desc); |
5161 | SOKOL_ASSERT(att_images && att_images[0]); |
5162 | |
5163 | _sg_pass_common_init(&pass->cmn, desc); |
5164 | |
5165 | const sg_pass_attachment_desc* att_desc; |
5166 | for (int i = 0; i < pass->cmn.num_color_atts; i++) { |
5167 | att_desc = &desc->color_attachments[i]; |
5168 | SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); |
5169 | SOKOL_ASSERT(0 == pass->dmy.color_atts[i].image); |
5170 | SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); |
5171 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format)); |
5172 | pass->dmy.color_atts[i].image = att_images[i]; |
5173 | } |
5174 | |
5175 | SOKOL_ASSERT(0 == pass->dmy.ds_att.image); |
5176 | att_desc = &desc->depth_stencil_attachment; |
5177 | if (att_desc->image.id != SG_INVALID_ID) { |
5178 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
5179 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
5180 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); |
5181 | pass->dmy.ds_att.image = att_images[ds_img_index]; |
5182 | } |
5183 | return SG_RESOURCESTATE_VALID; |
5184 | } |
5185 | |
5186 | _SOKOL_PRIVATE void _sg_dummy_discard_pass(_sg_pass_t* pass) { |
5187 | SOKOL_ASSERT(pass); |
5188 | _SOKOL_UNUSED(pass); |
5189 | } |
5190 | |
5191 | _SOKOL_PRIVATE _sg_image_t* _sg_dummy_pass_color_image(const _sg_pass_t* pass, int index) { |
5192 | SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); |
5193 | /* NOTE: may return null */ |
5194 | return pass->dmy.color_atts[index].image; |
5195 | } |
5196 | |
5197 | _SOKOL_PRIVATE _sg_image_t* _sg_dummy_pass_ds_image(const _sg_pass_t* pass) { |
5198 | /* NOTE: may return null */ |
5199 | SOKOL_ASSERT(pass); |
5200 | return pass->dmy.ds_att.image; |
5201 | } |
5202 | |
5203 | _SOKOL_PRIVATE void _sg_dummy_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { |
5204 | SOKOL_ASSERT(action); |
5205 | _SOKOL_UNUSED(pass); |
5206 | _SOKOL_UNUSED(action); |
5207 | _SOKOL_UNUSED(w); |
5208 | _SOKOL_UNUSED(h); |
5209 | } |
5210 | |
5211 | _SOKOL_PRIVATE void _sg_dummy_end_pass(void) { |
5212 | /* empty */ |
5213 | } |
5214 | |
5215 | _SOKOL_PRIVATE void _sg_dummy_commit(void) { |
5216 | /* empty */ |
5217 | } |
5218 | |
5219 | _SOKOL_PRIVATE void _sg_dummy_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
5220 | _SOKOL_UNUSED(x); |
5221 | _SOKOL_UNUSED(y); |
5222 | _SOKOL_UNUSED(w); |
5223 | _SOKOL_UNUSED(h); |
5224 | _SOKOL_UNUSED(origin_top_left); |
5225 | } |
5226 | |
5227 | _SOKOL_PRIVATE void _sg_dummy_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
5228 | _SOKOL_UNUSED(x); |
5229 | _SOKOL_UNUSED(y); |
5230 | _SOKOL_UNUSED(w); |
5231 | _SOKOL_UNUSED(h); |
5232 | _SOKOL_UNUSED(origin_top_left); |
5233 | } |
5234 | |
5235 | _SOKOL_PRIVATE void _sg_dummy_apply_pipeline(_sg_pipeline_t* pip) { |
5236 | SOKOL_ASSERT(pip); |
5237 | _SOKOL_UNUSED(pip); |
5238 | } |
5239 | |
5240 | _SOKOL_PRIVATE void _sg_dummy_apply_bindings( |
5241 | _sg_pipeline_t* pip, |
5242 | _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, |
5243 | _sg_buffer_t* ib, int ib_offset, |
5244 | _sg_image_t** vs_imgs, int num_vs_imgs, |
5245 | _sg_image_t** fs_imgs, int num_fs_imgs) |
5246 | { |
5247 | SOKOL_ASSERT(pip); |
5248 | SOKOL_ASSERT(vbs && vb_offsets); |
5249 | SOKOL_ASSERT(vs_imgs); |
5250 | SOKOL_ASSERT(fs_imgs); |
5251 | _SOKOL_UNUSED(pip); |
5252 | _SOKOL_UNUSED(vbs); _SOKOL_UNUSED(vb_offsets); _SOKOL_UNUSED(num_vbs); |
5253 | _SOKOL_UNUSED(ib); _SOKOL_UNUSED(ib_offset); |
5254 | _SOKOL_UNUSED(vs_imgs); _SOKOL_UNUSED(num_vs_imgs); |
5255 | _SOKOL_UNUSED(fs_imgs); _SOKOL_UNUSED(num_fs_imgs); |
5256 | } |
5257 | |
5258 | _SOKOL_PRIVATE void _sg_dummy_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
5259 | _SOKOL_UNUSED(stage_index); |
5260 | _SOKOL_UNUSED(ub_index); |
5261 | _SOKOL_UNUSED(data); |
5262 | } |
5263 | |
5264 | _SOKOL_PRIVATE void _sg_dummy_draw(int base_element, int num_elements, int num_instances) { |
5265 | _SOKOL_UNUSED(base_element); |
5266 | _SOKOL_UNUSED(num_elements); |
5267 | _SOKOL_UNUSED(num_instances); |
5268 | } |
5269 | |
5270 | _SOKOL_PRIVATE void _sg_dummy_update_buffer(_sg_buffer_t* buf, const sg_range* data) { |
5271 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
5272 | _SOKOL_UNUSED(data); |
5273 | if (++buf->cmn.active_slot >= buf->cmn.num_slots) { |
5274 | buf->cmn.active_slot = 0; |
5275 | } |
5276 | } |
5277 | |
5278 | _SOKOL_PRIVATE int _sg_dummy_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { |
5279 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
5280 | _SOKOL_UNUSED(data); |
5281 | if (new_frame) { |
5282 | if (++buf->cmn.active_slot >= buf->cmn.num_slots) { |
5283 | buf->cmn.active_slot = 0; |
5284 | } |
5285 | } |
5286 | /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ |
5287 | return _sg_roundup((int)data->size, 4); |
5288 | } |
5289 | |
5290 | _SOKOL_PRIVATE void _sg_dummy_update_image(_sg_image_t* img, const sg_image_data* data) { |
5291 | SOKOL_ASSERT(img && data); |
5292 | _SOKOL_UNUSED(data); |
5293 | if (++img->cmn.active_slot >= img->cmn.num_slots) { |
5294 | img->cmn.active_slot = 0; |
5295 | } |
5296 | } |
5297 | |
5298 | /*== GL BACKEND ==============================================================*/ |
5299 | #elif defined(_SOKOL_ANY_GL) |
5300 | |
5301 | /*=== OPTIONAL GL LOADER FOR WIN32 ===========================================*/ |
5302 | #if defined(_SOKOL_USE_WIN32_GL_LOADER) |
5303 | |
5304 | // X Macro list of GL function names and signatures |
5305 | #define _SG_GL_FUNCS \ |
5306 | _SG_XMACRO(glBindVertexArray, void, (GLuint array)) \ |
5307 | _SG_XMACRO(glFramebufferTextureLayer, void, (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer)) \ |
5308 | _SG_XMACRO(glGenFramebuffers, void, (GLsizei n, GLuint * framebuffers)) \ |
5309 | _SG_XMACRO(glBindFramebuffer, void, (GLenum target, GLuint framebuffer)) \ |
5310 | _SG_XMACRO(glBindRenderbuffer, void, (GLenum target, GLuint renderbuffer)) \ |
5311 | _SG_XMACRO(glGetStringi, const GLubyte *, (GLenum name, GLuint index)) \ |
5312 | _SG_XMACRO(glClearBufferfi, void, (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil)) \ |
5313 | _SG_XMACRO(glClearBufferfv, void, (GLenum buffer, GLint drawbuffer, const GLfloat * value)) \ |
5314 | _SG_XMACRO(glClearBufferuiv, void, (GLenum buffer, GLint drawbuffer, const GLuint * value)) \ |
5315 | _SG_XMACRO(glClearBufferiv, void, (GLenum buffer, GLint drawbuffer, const GLint * value)) \ |
5316 | _SG_XMACRO(glDeleteRenderbuffers, void, (GLsizei n, const GLuint * renderbuffers)) \ |
5317 | _SG_XMACRO(glUniform1fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ |
5318 | _SG_XMACRO(glUniform2fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ |
5319 | _SG_XMACRO(glUniform3fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ |
5320 | _SG_XMACRO(glUniform4fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ |
5321 | _SG_XMACRO(glUniform1iv, void, (GLint location, GLsizei count, const GLint * value)) \ |
5322 | _SG_XMACRO(glUniform2iv, void, (GLint location, GLsizei count, const GLint * value)) \ |
5323 | _SG_XMACRO(glUniform3iv, void, (GLint location, GLsizei count, const GLint * value)) \ |
5324 | _SG_XMACRO(glUniform4iv, void, (GLint location, GLsizei count, const GLint * value)) \ |
5325 | _SG_XMACRO(glUniformMatrix4fv, void, (GLint location, GLsizei count, GLboolean transpose, const GLfloat * value)) \ |
5326 | _SG_XMACRO(glUseProgram, void, (GLuint program)) \ |
5327 | _SG_XMACRO(glShaderSource, void, (GLuint shader, GLsizei count, const GLchar *const* string, const GLint * length)) \ |
5328 | _SG_XMACRO(glLinkProgram, void, (GLuint program)) \ |
5329 | _SG_XMACRO(glGetUniformLocation, GLint, (GLuint program, const GLchar * name)) \ |
5330 | _SG_XMACRO(glGetShaderiv, void, (GLuint shader, GLenum pname, GLint * params)) \ |
5331 | _SG_XMACRO(glGetProgramInfoLog, void, (GLuint program, GLsizei bufSize, GLsizei * length, GLchar * infoLog)) \ |
5332 | _SG_XMACRO(glGetAttribLocation, GLint, (GLuint program, const GLchar * name)) \ |
5333 | _SG_XMACRO(glDisableVertexAttribArray, void, (GLuint index)) \ |
5334 | _SG_XMACRO(glDeleteShader, void, (GLuint shader)) \ |
5335 | _SG_XMACRO(glDeleteProgram, void, (GLuint program)) \ |
5336 | _SG_XMACRO(glCompileShader, void, (GLuint shader)) \ |
5337 | _SG_XMACRO(glStencilFuncSeparate, void, (GLenum face, GLenum func, GLint ref, GLuint mask)) \ |
5338 | _SG_XMACRO(glStencilOpSeparate, void, (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass)) \ |
5339 | _SG_XMACRO(glRenderbufferStorageMultisample, void, (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height)) \ |
5340 | _SG_XMACRO(glDrawBuffers, void, (GLsizei n, const GLenum * bufs)) \ |
5341 | _SG_XMACRO(glVertexAttribDivisor, void, (GLuint index, GLuint divisor)) \ |
5342 | _SG_XMACRO(glBufferSubData, void, (GLenum target, GLintptr offset, GLsizeiptr size, const void * data)) \ |
5343 | _SG_XMACRO(glGenBuffers, void, (GLsizei n, GLuint * buffers)) \ |
5344 | _SG_XMACRO(glCheckFramebufferStatus, GLenum, (GLenum target)) \ |
5345 | _SG_XMACRO(glFramebufferRenderbuffer, void, (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer)) \ |
5346 | _SG_XMACRO(glCompressedTexImage2D, void, (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void * data)) \ |
5347 | _SG_XMACRO(glCompressedTexImage3D, void, (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void * data)) \ |
5348 | _SG_XMACRO(glActiveTexture, void, (GLenum texture)) \ |
5349 | _SG_XMACRO(glTexSubImage3D, void, (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void * pixels)) \ |
5350 | _SG_XMACRO(glRenderbufferStorage, void, (GLenum target, GLenum internalformat, GLsizei width, GLsizei height)) \ |
5351 | _SG_XMACRO(glGenTextures, void, (GLsizei n, GLuint * textures)) \ |
5352 | _SG_XMACRO(glPolygonOffset, void, (GLfloat factor, GLfloat units)) \ |
5353 | _SG_XMACRO(glDrawElements, void, (GLenum mode, GLsizei count, GLenum type, const void * indices)) \ |
5354 | _SG_XMACRO(glDeleteFramebuffers, void, (GLsizei n, const GLuint * framebuffers)) \ |
5355 | _SG_XMACRO(glBlendEquationSeparate, void, (GLenum modeRGB, GLenum modeAlpha)) \ |
5356 | _SG_XMACRO(glDeleteTextures, void, (GLsizei n, const GLuint * textures)) \ |
5357 | _SG_XMACRO(glGetProgramiv, void, (GLuint program, GLenum pname, GLint * params)) \ |
5358 | _SG_XMACRO(glBindTexture, void, (GLenum target, GLuint texture)) \ |
5359 | _SG_XMACRO(glTexImage3D, void, (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void * pixels)) \ |
5360 | _SG_XMACRO(glCreateShader, GLuint, (GLenum type)) \ |
5361 | _SG_XMACRO(glTexSubImage2D, void, (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void * pixels)) \ |
5362 | _SG_XMACRO(glClearDepth, void, (GLdouble depth)) \ |
5363 | _SG_XMACRO(glFramebufferTexture2D, void, (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level)) \ |
5364 | _SG_XMACRO(glCreateProgram, GLuint, (void)) \ |
5365 | _SG_XMACRO(glViewport, void, (GLint x, GLint y, GLsizei width, GLsizei height)) \ |
5366 | _SG_XMACRO(glDeleteBuffers, void, (GLsizei n, const GLuint * buffers)) \ |
5367 | _SG_XMACRO(glDrawArrays, void, (GLenum mode, GLint first, GLsizei count)) \ |
5368 | _SG_XMACRO(glDrawElementsInstanced, void, (GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount)) \ |
5369 | _SG_XMACRO(glVertexAttribPointer, void, (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void * pointer)) \ |
5370 | _SG_XMACRO(glUniform1i, void, (GLint location, GLint v0)) \ |
5371 | _SG_XMACRO(glDisable, void, (GLenum cap)) \ |
5372 | _SG_XMACRO(glColorMask, void, (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)) \ |
5373 | _SG_XMACRO(glColorMaski, void, (GLuint buf, GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)) \ |
5374 | _SG_XMACRO(glBindBuffer, void, (GLenum target, GLuint buffer)) \ |
5375 | _SG_XMACRO(glDeleteVertexArrays, void, (GLsizei n, const GLuint * arrays)) \ |
5376 | _SG_XMACRO(glDepthMask, void, (GLboolean flag)) \ |
5377 | _SG_XMACRO(glDrawArraysInstanced, void, (GLenum mode, GLint first, GLsizei count, GLsizei instancecount)) \ |
5378 | _SG_XMACRO(glClearStencil, void, (GLint s)) \ |
5379 | _SG_XMACRO(glScissor, void, (GLint x, GLint y, GLsizei width, GLsizei height)) \ |
5380 | _SG_XMACRO(glGenRenderbuffers, void, (GLsizei n, GLuint * renderbuffers)) \ |
5381 | _SG_XMACRO(glBufferData, void, (GLenum target, GLsizeiptr size, const void * data, GLenum usage)) \ |
5382 | _SG_XMACRO(glBlendFuncSeparate, void, (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha)) \ |
5383 | _SG_XMACRO(glTexParameteri, void, (GLenum target, GLenum pname, GLint param)) \ |
5384 | _SG_XMACRO(glGetIntegerv, void, (GLenum pname, GLint * data)) \ |
5385 | _SG_XMACRO(glEnable, void, (GLenum cap)) \ |
5386 | _SG_XMACRO(glBlitFramebuffer, void, (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter)) \ |
5387 | _SG_XMACRO(glStencilMask, void, (GLuint mask)) \ |
5388 | _SG_XMACRO(glAttachShader, void, (GLuint program, GLuint shader)) \ |
5389 | _SG_XMACRO(glGetError, GLenum, (void)) \ |
5390 | _SG_XMACRO(glClearColor, void, (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)) \ |
5391 | _SG_XMACRO(glBlendColor, void, (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)) \ |
5392 | _SG_XMACRO(glTexParameterf, void, (GLenum target, GLenum pname, GLfloat param)) \ |
5393 | _SG_XMACRO(glTexParameterfv, void, (GLenum target, GLenum pname, GLfloat* params)) \ |
5394 | _SG_XMACRO(glGetShaderInfoLog, void, (GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * infoLog)) \ |
5395 | _SG_XMACRO(glDepthFunc, void, (GLenum func)) \ |
5396 | _SG_XMACRO(glStencilOp , void, (GLenum fail, GLenum zfail, GLenum zpass)) \ |
5397 | _SG_XMACRO(glStencilFunc, void, (GLenum func, GLint ref, GLuint mask)) \ |
5398 | _SG_XMACRO(glEnableVertexAttribArray, void, (GLuint index)) \ |
5399 | _SG_XMACRO(glBlendFunc, void, (GLenum sfactor, GLenum dfactor)) \ |
5400 | _SG_XMACRO(glReadBuffer, void, (GLenum src)) \ |
5401 | _SG_XMACRO(glReadPixels, void, (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void * data)) \ |
5402 | _SG_XMACRO(glClear, void, (GLbitfield mask)) \ |
5403 | _SG_XMACRO(glTexImage2D, void, (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void * pixels)) \ |
5404 | _SG_XMACRO(glGenVertexArrays, void, (GLsizei n, GLuint * arrays)) \ |
5405 | _SG_XMACRO(glFrontFace, void, (GLenum mode)) \ |
5406 | _SG_XMACRO(glCullFace, void, (GLenum mode)) |
5407 | |
5408 | // generate GL function pointer typedefs |
5409 | #define _SG_XMACRO(name, ret, args) typedef ret (GL_APIENTRY* PFN_ ## name) args; |
5410 | _SG_GL_FUNCS |
5411 | #undef _SG_XMACRO |
5412 | |
5413 | // generate GL function pointers |
5414 | #define _SG_XMACRO(name, ret, args) static PFN_ ## name name; |
5415 | _SG_GL_FUNCS |
5416 | #undef _SG_XMACRO |
5417 | |
5418 | // helper function to lookup GL functions in GL DLL |
5419 | typedef PROC (WINAPI * _sg_wglGetProcAddress)(LPCSTR); |
5420 | _SOKOL_PRIVATE void* _sg_gl_getprocaddr(const char* name, _sg_wglGetProcAddress wgl_getprocaddress) { |
5421 | void* proc_addr = (void*) wgl_getprocaddress(name); |
5422 | if (0 == proc_addr) { |
5423 | proc_addr = (void*) GetProcAddress(_sg.gl.opengl32_dll, name); |
5424 | } |
5425 | SOKOL_ASSERT(proc_addr); |
5426 | return proc_addr; |
5427 | } |
5428 | |
5429 | // populate GL function pointers |
5430 | _SOKOL_PRIVATE void _sg_gl_load_opengl(void) { |
5431 | SOKOL_ASSERT(0 == _sg.gl.opengl32_dll); |
5432 | _sg.gl.opengl32_dll = LoadLibraryA("opengl32.dll"); |
5433 | SOKOL_ASSERT(_sg.gl.opengl32_dll); |
5434 | _sg_wglGetProcAddress wgl_getprocaddress = (_sg_wglGetProcAddress) GetProcAddress(_sg.gl.opengl32_dll, "wglGetProcAddress"); |
5435 | SOKOL_ASSERT(wgl_getprocaddress); |
5436 | #define _SG_XMACRO(name, ret, args) name = (PFN_ ## name) _sg_gl_getprocaddr(#name, wgl_getprocaddress); |
5437 | _SG_GL_FUNCS |
5438 | #undef _SG_XMACRO |
5439 | } |
5440 | |
5441 | _SOKOL_PRIVATE void _sg_gl_unload_opengl(void) { |
5442 | SOKOL_ASSERT(_sg.gl.opengl32_dll); |
5443 | FreeLibrary(_sg.gl.opengl32_dll); |
5444 | _sg.gl.opengl32_dll = 0; |
5445 | } |
5446 | #endif // _SOKOL_USE_WIN32_GL_LOADER |
5447 | |
5448 | /*-- type translation --------------------------------------------------------*/ |
5449 | _SOKOL_PRIVATE GLenum _sg_gl_buffer_target(sg_buffer_type t) { |
5450 | switch (t) { |
5451 | case SG_BUFFERTYPE_VERTEXBUFFER: return GL_ARRAY_BUFFER; |
5452 | case SG_BUFFERTYPE_INDEXBUFFER: return GL_ELEMENT_ARRAY_BUFFER; |
5453 | default: SOKOL_UNREACHABLE; return 0; |
5454 | } |
5455 | } |
5456 | |
5457 | _SOKOL_PRIVATE GLenum _sg_gl_texture_target(sg_image_type t) { |
5458 | switch (t) { |
5459 | case SG_IMAGETYPE_2D: return GL_TEXTURE_2D; |
5460 | case SG_IMAGETYPE_CUBE: return GL_TEXTURE_CUBE_MAP; |
5461 | #if !defined(SOKOL_GLES2) |
5462 | case SG_IMAGETYPE_3D: return GL_TEXTURE_3D; |
5463 | case SG_IMAGETYPE_ARRAY: return GL_TEXTURE_2D_ARRAY; |
5464 | #endif |
5465 | default: SOKOL_UNREACHABLE; return 0; |
5466 | } |
5467 | } |
5468 | |
5469 | _SOKOL_PRIVATE GLenum _sg_gl_usage(sg_usage u) { |
5470 | switch (u) { |
5471 | case SG_USAGE_IMMUTABLE: return GL_STATIC_DRAW; |
5472 | case SG_USAGE_DYNAMIC: return GL_DYNAMIC_DRAW; |
5473 | case SG_USAGE_STREAM: return GL_STREAM_DRAW; |
5474 | default: SOKOL_UNREACHABLE; return 0; |
5475 | } |
5476 | } |
5477 | |
5478 | _SOKOL_PRIVATE GLenum _sg_gl_shader_stage(sg_shader_stage stage) { |
5479 | switch (stage) { |
5480 | case SG_SHADERSTAGE_VS: return GL_VERTEX_SHADER; |
5481 | case SG_SHADERSTAGE_FS: return GL_FRAGMENT_SHADER; |
5482 | default: SOKOL_UNREACHABLE; return 0; |
5483 | } |
5484 | } |
5485 | |
5486 | _SOKOL_PRIVATE GLint _sg_gl_vertexformat_size(sg_vertex_format fmt) { |
5487 | switch (fmt) { |
5488 | case SG_VERTEXFORMAT_FLOAT: return 1; |
5489 | case SG_VERTEXFORMAT_FLOAT2: return 2; |
5490 | case SG_VERTEXFORMAT_FLOAT3: return 3; |
5491 | case SG_VERTEXFORMAT_FLOAT4: return 4; |
5492 | case SG_VERTEXFORMAT_BYTE4: return 4; |
5493 | case SG_VERTEXFORMAT_BYTE4N: return 4; |
5494 | case SG_VERTEXFORMAT_UBYTE4: return 4; |
5495 | case SG_VERTEXFORMAT_UBYTE4N: return 4; |
5496 | case SG_VERTEXFORMAT_SHORT2: return 2; |
5497 | case SG_VERTEXFORMAT_SHORT2N: return 2; |
5498 | case SG_VERTEXFORMAT_USHORT2N: return 2; |
5499 | case SG_VERTEXFORMAT_SHORT4: return 4; |
5500 | case SG_VERTEXFORMAT_SHORT4N: return 4; |
5501 | case SG_VERTEXFORMAT_USHORT4N: return 4; |
5502 | case SG_VERTEXFORMAT_UINT10_N2: return 4; |
5503 | default: SOKOL_UNREACHABLE; return 0; |
5504 | } |
5505 | } |
5506 | |
5507 | _SOKOL_PRIVATE GLenum _sg_gl_vertexformat_type(sg_vertex_format fmt) { |
5508 | switch (fmt) { |
5509 | case SG_VERTEXFORMAT_FLOAT: |
5510 | case SG_VERTEXFORMAT_FLOAT2: |
5511 | case SG_VERTEXFORMAT_FLOAT3: |
5512 | case SG_VERTEXFORMAT_FLOAT4: |
5513 | return GL_FLOAT; |
5514 | case SG_VERTEXFORMAT_BYTE4: |
5515 | case SG_VERTEXFORMAT_BYTE4N: |
5516 | return GL_BYTE; |
5517 | case SG_VERTEXFORMAT_UBYTE4: |
5518 | case SG_VERTEXFORMAT_UBYTE4N: |
5519 | return GL_UNSIGNED_BYTE; |
5520 | case SG_VERTEXFORMAT_SHORT2: |
5521 | case SG_VERTEXFORMAT_SHORT2N: |
5522 | case SG_VERTEXFORMAT_SHORT4: |
5523 | case SG_VERTEXFORMAT_SHORT4N: |
5524 | return GL_SHORT; |
5525 | case SG_VERTEXFORMAT_USHORT2N: |
5526 | case SG_VERTEXFORMAT_USHORT4N: |
5527 | return GL_UNSIGNED_SHORT; |
5528 | case SG_VERTEXFORMAT_UINT10_N2: |
5529 | return GL_UNSIGNED_INT_2_10_10_10_REV; |
5530 | default: |
5531 | SOKOL_UNREACHABLE; return 0; |
5532 | } |
5533 | } |
5534 | |
5535 | _SOKOL_PRIVATE GLboolean _sg_gl_vertexformat_normalized(sg_vertex_format fmt) { |
5536 | switch (fmt) { |
5537 | case SG_VERTEXFORMAT_BYTE4N: |
5538 | case SG_VERTEXFORMAT_UBYTE4N: |
5539 | case SG_VERTEXFORMAT_SHORT2N: |
5540 | case SG_VERTEXFORMAT_USHORT2N: |
5541 | case SG_VERTEXFORMAT_SHORT4N: |
5542 | case SG_VERTEXFORMAT_USHORT4N: |
5543 | case SG_VERTEXFORMAT_UINT10_N2: |
5544 | return GL_TRUE; |
5545 | default: |
5546 | return GL_FALSE; |
5547 | } |
5548 | } |
5549 | |
5550 | _SOKOL_PRIVATE GLenum _sg_gl_primitive_type(sg_primitive_type t) { |
5551 | switch (t) { |
5552 | case SG_PRIMITIVETYPE_POINTS: return GL_POINTS; |
5553 | case SG_PRIMITIVETYPE_LINES: return GL_LINES; |
5554 | case SG_PRIMITIVETYPE_LINE_STRIP: return GL_LINE_STRIP; |
5555 | case SG_PRIMITIVETYPE_TRIANGLES: return GL_TRIANGLES; |
5556 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return GL_TRIANGLE_STRIP; |
5557 | default: SOKOL_UNREACHABLE; return 0; |
5558 | } |
5559 | } |
5560 | |
5561 | _SOKOL_PRIVATE GLenum _sg_gl_index_type(sg_index_type t) { |
5562 | switch (t) { |
5563 | case SG_INDEXTYPE_NONE: return 0; |
5564 | case SG_INDEXTYPE_UINT16: return GL_UNSIGNED_SHORT; |
5565 | case SG_INDEXTYPE_UINT32: return GL_UNSIGNED_INT; |
5566 | default: SOKOL_UNREACHABLE; return 0; |
5567 | } |
5568 | } |
5569 | |
5570 | _SOKOL_PRIVATE GLenum _sg_gl_compare_func(sg_compare_func cmp) { |
5571 | switch (cmp) { |
5572 | case SG_COMPAREFUNC_NEVER: return GL_NEVER; |
5573 | case SG_COMPAREFUNC_LESS: return GL_LESS; |
5574 | case SG_COMPAREFUNC_EQUAL: return GL_EQUAL; |
5575 | case SG_COMPAREFUNC_LESS_EQUAL: return GL_LEQUAL; |
5576 | case SG_COMPAREFUNC_GREATER: return GL_GREATER; |
5577 | case SG_COMPAREFUNC_NOT_EQUAL: return GL_NOTEQUAL; |
5578 | case SG_COMPAREFUNC_GREATER_EQUAL: return GL_GEQUAL; |
5579 | case SG_COMPAREFUNC_ALWAYS: return GL_ALWAYS; |
5580 | default: SOKOL_UNREACHABLE; return 0; |
5581 | } |
5582 | } |
5583 | |
5584 | _SOKOL_PRIVATE GLenum _sg_gl_stencil_op(sg_stencil_op op) { |
5585 | switch (op) { |
5586 | case SG_STENCILOP_KEEP: return GL_KEEP; |
5587 | case SG_STENCILOP_ZERO: return GL_ZERO; |
5588 | case SG_STENCILOP_REPLACE: return GL_REPLACE; |
5589 | case SG_STENCILOP_INCR_CLAMP: return GL_INCR; |
5590 | case SG_STENCILOP_DECR_CLAMP: return GL_DECR; |
5591 | case SG_STENCILOP_INVERT: return GL_INVERT; |
5592 | case SG_STENCILOP_INCR_WRAP: return GL_INCR_WRAP; |
5593 | case SG_STENCILOP_DECR_WRAP: return GL_DECR_WRAP; |
5594 | default: SOKOL_UNREACHABLE; return 0; |
5595 | } |
5596 | } |
5597 | |
5598 | _SOKOL_PRIVATE GLenum _sg_gl_blend_factor(sg_blend_factor f) { |
5599 | switch (f) { |
5600 | case SG_BLENDFACTOR_ZERO: return GL_ZERO; |
5601 | case SG_BLENDFACTOR_ONE: return GL_ONE; |
5602 | case SG_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR; |
5603 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR; |
5604 | case SG_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA; |
5605 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA; |
5606 | case SG_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR; |
5607 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return GL_ONE_MINUS_DST_COLOR; |
5608 | case SG_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA; |
5609 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA; |
5610 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return GL_SRC_ALPHA_SATURATE; |
5611 | case SG_BLENDFACTOR_BLEND_COLOR: return GL_CONSTANT_COLOR; |
5612 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR; |
5613 | case SG_BLENDFACTOR_BLEND_ALPHA: return GL_CONSTANT_ALPHA; |
5614 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA; |
5615 | default: SOKOL_UNREACHABLE; return 0; |
5616 | } |
5617 | } |
5618 | |
5619 | _SOKOL_PRIVATE GLenum _sg_gl_blend_op(sg_blend_op op) { |
5620 | switch (op) { |
5621 | case SG_BLENDOP_ADD: return GL_FUNC_ADD; |
5622 | case SG_BLENDOP_SUBTRACT: return GL_FUNC_SUBTRACT; |
5623 | case SG_BLENDOP_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT; |
5624 | default: SOKOL_UNREACHABLE; return 0; |
5625 | } |
5626 | } |
5627 | |
5628 | _SOKOL_PRIVATE GLenum _sg_gl_filter(sg_filter f) { |
5629 | switch (f) { |
5630 | case SG_FILTER_NEAREST: return GL_NEAREST; |
5631 | case SG_FILTER_LINEAR: return GL_LINEAR; |
5632 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: return GL_NEAREST_MIPMAP_NEAREST; |
5633 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: return GL_NEAREST_MIPMAP_LINEAR; |
5634 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: return GL_LINEAR_MIPMAP_NEAREST; |
5635 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: return GL_LINEAR_MIPMAP_LINEAR; |
5636 | default: SOKOL_UNREACHABLE; return 0; |
5637 | } |
5638 | } |
5639 | |
5640 | _SOKOL_PRIVATE GLenum _sg_gl_wrap(sg_wrap w) { |
5641 | switch (w) { |
5642 | case SG_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE; |
5643 | #if defined(SOKOL_GLCORE33) |
5644 | case SG_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER; |
5645 | #else |
5646 | case SG_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_EDGE; |
5647 | #endif |
5648 | case SG_WRAP_REPEAT: return GL_REPEAT; |
5649 | case SG_WRAP_MIRRORED_REPEAT: return GL_MIRRORED_REPEAT; |
5650 | default: SOKOL_UNREACHABLE; return 0; |
5651 | } |
5652 | } |
5653 | |
5654 | _SOKOL_PRIVATE GLenum _sg_gl_teximage_type(sg_pixel_format fmt) { |
5655 | switch (fmt) { |
5656 | case SG_PIXELFORMAT_R8: |
5657 | case SG_PIXELFORMAT_R8UI: |
5658 | case SG_PIXELFORMAT_RG8: |
5659 | case SG_PIXELFORMAT_RG8UI: |
5660 | case SG_PIXELFORMAT_RGBA8: |
5661 | case SG_PIXELFORMAT_RGBA8UI: |
5662 | case SG_PIXELFORMAT_BGRA8: |
5663 | return GL_UNSIGNED_BYTE; |
5664 | case SG_PIXELFORMAT_R8SN: |
5665 | case SG_PIXELFORMAT_R8SI: |
5666 | case SG_PIXELFORMAT_RG8SN: |
5667 | case SG_PIXELFORMAT_RG8SI: |
5668 | case SG_PIXELFORMAT_RGBA8SN: |
5669 | case SG_PIXELFORMAT_RGBA8SI: |
5670 | return GL_BYTE; |
5671 | case SG_PIXELFORMAT_R16: |
5672 | case SG_PIXELFORMAT_R16UI: |
5673 | case SG_PIXELFORMAT_RG16: |
5674 | case SG_PIXELFORMAT_RG16UI: |
5675 | case SG_PIXELFORMAT_RGBA16: |
5676 | case SG_PIXELFORMAT_RGBA16UI: |
5677 | return GL_UNSIGNED_SHORT; |
5678 | case SG_PIXELFORMAT_R16SN: |
5679 | case SG_PIXELFORMAT_R16SI: |
5680 | case SG_PIXELFORMAT_RG16SN: |
5681 | case SG_PIXELFORMAT_RG16SI: |
5682 | case SG_PIXELFORMAT_RGBA16SN: |
5683 | case SG_PIXELFORMAT_RGBA16SI: |
5684 | return GL_SHORT; |
5685 | case SG_PIXELFORMAT_R16F: |
5686 | case SG_PIXELFORMAT_RG16F: |
5687 | case SG_PIXELFORMAT_RGBA16F: |
5688 | return GL_HALF_FLOAT; |
5689 | case SG_PIXELFORMAT_R32UI: |
5690 | case SG_PIXELFORMAT_RG32UI: |
5691 | case SG_PIXELFORMAT_RGBA32UI: |
5692 | return GL_UNSIGNED_INT; |
5693 | case SG_PIXELFORMAT_R32SI: |
5694 | case SG_PIXELFORMAT_RG32SI: |
5695 | case SG_PIXELFORMAT_RGBA32SI: |
5696 | return GL_INT; |
5697 | case SG_PIXELFORMAT_R32F: |
5698 | case SG_PIXELFORMAT_RG32F: |
5699 | case SG_PIXELFORMAT_RGBA32F: |
5700 | return GL_FLOAT; |
5701 | #if !defined(SOKOL_GLES2) |
5702 | case SG_PIXELFORMAT_RGB10A2: |
5703 | return GL_UNSIGNED_INT_2_10_10_10_REV; |
5704 | case SG_PIXELFORMAT_RG11B10F: |
5705 | return GL_UNSIGNED_INT_10F_11F_11F_REV; |
5706 | case SG_PIXELFORMAT_RGB9E5: |
5707 | return GL_UNSIGNED_INT_5_9_9_9_REV; |
5708 | #endif |
5709 | case SG_PIXELFORMAT_DEPTH: |
5710 | return GL_UNSIGNED_SHORT; |
5711 | case SG_PIXELFORMAT_DEPTH_STENCIL: |
5712 | return GL_UNSIGNED_INT_24_8; |
5713 | default: |
5714 | SOKOL_UNREACHABLE; return 0; |
5715 | } |
5716 | } |
5717 | |
5718 | _SOKOL_PRIVATE GLenum _sg_gl_teximage_format(sg_pixel_format fmt) { |
5719 | switch (fmt) { |
5720 | case SG_PIXELFORMAT_R8: |
5721 | case SG_PIXELFORMAT_R8SN: |
5722 | case SG_PIXELFORMAT_R16: |
5723 | case SG_PIXELFORMAT_R16SN: |
5724 | case SG_PIXELFORMAT_R16F: |
5725 | case SG_PIXELFORMAT_R32F: |
5726 | #if defined(SOKOL_GLES2) |
5727 | return GL_LUMINANCE; |
5728 | #else |
5729 | if (_sg.gl.gles2) { |
5730 | return GL_LUMINANCE; |
5731 | } |
5732 | else { |
5733 | return GL_RED; |
5734 | } |
5735 | #endif |
5736 | #if !defined(SOKOL_GLES2) |
5737 | case SG_PIXELFORMAT_R8UI: |
5738 | case SG_PIXELFORMAT_R8SI: |
5739 | case SG_PIXELFORMAT_R16UI: |
5740 | case SG_PIXELFORMAT_R16SI: |
5741 | case SG_PIXELFORMAT_R32UI: |
5742 | case SG_PIXELFORMAT_R32SI: |
5743 | return GL_RED_INTEGER; |
5744 | case SG_PIXELFORMAT_RG8: |
5745 | case SG_PIXELFORMAT_RG8SN: |
5746 | case SG_PIXELFORMAT_RG16: |
5747 | case SG_PIXELFORMAT_RG16SN: |
5748 | case SG_PIXELFORMAT_RG16F: |
5749 | case SG_PIXELFORMAT_RG32F: |
5750 | return GL_RG; |
5751 | case SG_PIXELFORMAT_RG8UI: |
5752 | case SG_PIXELFORMAT_RG8SI: |
5753 | case SG_PIXELFORMAT_RG16UI: |
5754 | case SG_PIXELFORMAT_RG16SI: |
5755 | case SG_PIXELFORMAT_RG32UI: |
5756 | case SG_PIXELFORMAT_RG32SI: |
5757 | return GL_RG_INTEGER; |
5758 | #endif |
5759 | case SG_PIXELFORMAT_RGBA8: |
5760 | case SG_PIXELFORMAT_RGBA8SN: |
5761 | case SG_PIXELFORMAT_RGBA16: |
5762 | case SG_PIXELFORMAT_RGBA16SN: |
5763 | case SG_PIXELFORMAT_RGBA16F: |
5764 | case SG_PIXELFORMAT_RGBA32F: |
5765 | case SG_PIXELFORMAT_RGB10A2: |
5766 | return GL_RGBA; |
5767 | #if !defined(SOKOL_GLES2) |
5768 | case SG_PIXELFORMAT_RGBA8UI: |
5769 | case SG_PIXELFORMAT_RGBA8SI: |
5770 | case SG_PIXELFORMAT_RGBA16UI: |
5771 | case SG_PIXELFORMAT_RGBA16SI: |
5772 | case SG_PIXELFORMAT_RGBA32UI: |
5773 | case SG_PIXELFORMAT_RGBA32SI: |
5774 | return GL_RGBA_INTEGER; |
5775 | #endif |
5776 | case SG_PIXELFORMAT_RG11B10F: |
5777 | case SG_PIXELFORMAT_RGB9E5: |
5778 | return GL_RGB; |
5779 | case SG_PIXELFORMAT_DEPTH: |
5780 | return GL_DEPTH_COMPONENT; |
5781 | case SG_PIXELFORMAT_DEPTH_STENCIL: |
5782 | return GL_DEPTH_STENCIL; |
5783 | case SG_PIXELFORMAT_BC1_RGBA: |
5784 | return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; |
5785 | case SG_PIXELFORMAT_BC2_RGBA: |
5786 | return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; |
5787 | case SG_PIXELFORMAT_BC3_RGBA: |
5788 | return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; |
5789 | case SG_PIXELFORMAT_BC4_R: |
5790 | return GL_COMPRESSED_RED_RGTC1; |
5791 | case SG_PIXELFORMAT_BC4_RSN: |
5792 | return GL_COMPRESSED_SIGNED_RED_RGTC1; |
5793 | case SG_PIXELFORMAT_BC5_RG: |
5794 | return GL_COMPRESSED_RED_GREEN_RGTC2; |
5795 | case SG_PIXELFORMAT_BC5_RGSN: |
5796 | return GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2; |
5797 | case SG_PIXELFORMAT_BC6H_RGBF: |
5798 | return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB; |
5799 | case SG_PIXELFORMAT_BC6H_RGBUF: |
5800 | return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB; |
5801 | case SG_PIXELFORMAT_BC7_RGBA: |
5802 | return GL_COMPRESSED_RGBA_BPTC_UNORM_ARB; |
5803 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: |
5804 | return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; |
5805 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: |
5806 | return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; |
5807 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: |
5808 | return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; |
5809 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: |
5810 | return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; |
5811 | case SG_PIXELFORMAT_ETC2_RGB8: |
5812 | return GL_COMPRESSED_RGB8_ETC2; |
5813 | case SG_PIXELFORMAT_ETC2_RGB8A1: |
5814 | return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; |
5815 | case SG_PIXELFORMAT_ETC2_RGBA8: |
5816 | return GL_COMPRESSED_RGBA8_ETC2_EAC; |
5817 | case SG_PIXELFORMAT_ETC2_RG11: |
5818 | return GL_COMPRESSED_RG11_EAC; |
5819 | case SG_PIXELFORMAT_ETC2_RG11SN: |
5820 | return GL_COMPRESSED_SIGNED_RG11_EAC; |
5821 | default: |
5822 | SOKOL_UNREACHABLE; return 0; |
5823 | } |
5824 | } |
5825 | |
5826 | _SOKOL_PRIVATE GLenum _sg_gl_teximage_internal_format(sg_pixel_format fmt) { |
5827 | #if defined(SOKOL_GLES2) |
5828 | return _sg_gl_teximage_format(fmt); |
5829 | #else |
5830 | if (_sg.gl.gles2) { |
5831 | return _sg_gl_teximage_format(fmt); |
5832 | } |
5833 | else { |
5834 | switch (fmt) { |
5835 | case SG_PIXELFORMAT_R8: return GL_R8; |
5836 | case SG_PIXELFORMAT_R8SN: return GL_R8_SNORM; |
5837 | case SG_PIXELFORMAT_R8UI: return GL_R8UI; |
5838 | case SG_PIXELFORMAT_R8SI: return GL_R8I; |
5839 | #if !defined(SOKOL_GLES3) |
5840 | case SG_PIXELFORMAT_R16: return GL_R16; |
5841 | case SG_PIXELFORMAT_R16SN: return GL_R16_SNORM; |
5842 | #endif |
5843 | case SG_PIXELFORMAT_R16UI: return GL_R16UI; |
5844 | case SG_PIXELFORMAT_R16SI: return GL_R16I; |
5845 | case SG_PIXELFORMAT_R16F: return GL_R16F; |
5846 | case SG_PIXELFORMAT_RG8: return GL_RG8; |
5847 | case SG_PIXELFORMAT_RG8SN: return GL_RG8_SNORM; |
5848 | case SG_PIXELFORMAT_RG8UI: return GL_RG8UI; |
5849 | case SG_PIXELFORMAT_RG8SI: return GL_RG8I; |
5850 | case SG_PIXELFORMAT_R32UI: return GL_R32UI; |
5851 | case SG_PIXELFORMAT_R32SI: return GL_R32I; |
5852 | case SG_PIXELFORMAT_R32F: return GL_R32F; |
5853 | #if !defined(SOKOL_GLES3) |
5854 | case SG_PIXELFORMAT_RG16: return GL_RG16; |
5855 | case SG_PIXELFORMAT_RG16SN: return GL_RG16_SNORM; |
5856 | #endif |
5857 | case SG_PIXELFORMAT_RG16UI: return GL_RG16UI; |
5858 | case SG_PIXELFORMAT_RG16SI: return GL_RG16I; |
5859 | case SG_PIXELFORMAT_RG16F: return GL_RG16F; |
5860 | case SG_PIXELFORMAT_RGBA8: return GL_RGBA8; |
5861 | case SG_PIXELFORMAT_RGBA8SN: return GL_RGBA8_SNORM; |
5862 | case SG_PIXELFORMAT_RGBA8UI: return GL_RGBA8UI; |
5863 | case SG_PIXELFORMAT_RGBA8SI: return GL_RGBA8I; |
5864 | case SG_PIXELFORMAT_RGB10A2: return GL_RGB10_A2; |
5865 | case SG_PIXELFORMAT_RG11B10F: return GL_R11F_G11F_B10F; |
5866 | case SG_PIXELFORMAT_RGB9E5: return GL_RGB9_E5; |
5867 | case SG_PIXELFORMAT_RG32UI: return GL_RG32UI; |
5868 | case SG_PIXELFORMAT_RG32SI: return GL_RG32I; |
5869 | case SG_PIXELFORMAT_RG32F: return GL_RG32F; |
5870 | #if !defined(SOKOL_GLES3) |
5871 | case SG_PIXELFORMAT_RGBA16: return GL_RGBA16; |
5872 | case SG_PIXELFORMAT_RGBA16SN: return GL_RGBA16_SNORM; |
5873 | #endif |
5874 | case SG_PIXELFORMAT_RGBA16UI: return GL_RGBA16UI; |
5875 | case SG_PIXELFORMAT_RGBA16SI: return GL_RGBA16I; |
5876 | case SG_PIXELFORMAT_RGBA16F: return GL_RGBA16F; |
5877 | case SG_PIXELFORMAT_RGBA32UI: return GL_RGBA32UI; |
5878 | case SG_PIXELFORMAT_RGBA32SI: return GL_RGBA32I; |
5879 | case SG_PIXELFORMAT_RGBA32F: return GL_RGBA32F; |
5880 | case SG_PIXELFORMAT_DEPTH: return GL_DEPTH_COMPONENT16; |
5881 | case SG_PIXELFORMAT_DEPTH_STENCIL: return GL_DEPTH24_STENCIL8; |
5882 | case SG_PIXELFORMAT_BC1_RGBA: return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; |
5883 | case SG_PIXELFORMAT_BC2_RGBA: return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; |
5884 | case SG_PIXELFORMAT_BC3_RGBA: return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; |
5885 | case SG_PIXELFORMAT_BC4_R: return GL_COMPRESSED_RED_RGTC1; |
5886 | case SG_PIXELFORMAT_BC4_RSN: return GL_COMPRESSED_SIGNED_RED_RGTC1; |
5887 | case SG_PIXELFORMAT_BC5_RG: return GL_COMPRESSED_RED_GREEN_RGTC2; |
5888 | case SG_PIXELFORMAT_BC5_RGSN: return GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2; |
5889 | case SG_PIXELFORMAT_BC6H_RGBF: return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB; |
5890 | case SG_PIXELFORMAT_BC6H_RGBUF: return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB; |
5891 | case SG_PIXELFORMAT_BC7_RGBA: return GL_COMPRESSED_RGBA_BPTC_UNORM_ARB; |
5892 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; |
5893 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; |
5894 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; |
5895 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; |
5896 | case SG_PIXELFORMAT_ETC2_RGB8: return GL_COMPRESSED_RGB8_ETC2; |
5897 | case SG_PIXELFORMAT_ETC2_RGB8A1: return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; |
5898 | case SG_PIXELFORMAT_ETC2_RGBA8: return GL_COMPRESSED_RGBA8_ETC2_EAC; |
5899 | case SG_PIXELFORMAT_ETC2_RG11: return GL_COMPRESSED_RG11_EAC; |
5900 | case SG_PIXELFORMAT_ETC2_RG11SN: return GL_COMPRESSED_SIGNED_RG11_EAC; |
5901 | default: SOKOL_UNREACHABLE; return 0; |
5902 | } |
5903 | } |
5904 | #endif |
5905 | } |
5906 | |
5907 | _SOKOL_PRIVATE GLenum _sg_gl_cubeface_target(int face_index) { |
5908 | switch (face_index) { |
5909 | case 0: return GL_TEXTURE_CUBE_MAP_POSITIVE_X; |
5910 | case 1: return GL_TEXTURE_CUBE_MAP_NEGATIVE_X; |
5911 | case 2: return GL_TEXTURE_CUBE_MAP_POSITIVE_Y; |
5912 | case 3: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Y; |
5913 | case 4: return GL_TEXTURE_CUBE_MAP_POSITIVE_Z; |
5914 | case 5: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Z; |
5915 | default: SOKOL_UNREACHABLE; return 0; |
5916 | } |
5917 | } |
5918 | |
5919 | _SOKOL_PRIVATE GLenum _sg_gl_depth_attachment_format(sg_pixel_format fmt) { |
5920 | switch (fmt) { |
5921 | case SG_PIXELFORMAT_DEPTH: return GL_DEPTH_COMPONENT16; |
5922 | case SG_PIXELFORMAT_DEPTH_STENCIL: return GL_DEPTH24_STENCIL8; |
5923 | default: SOKOL_UNREACHABLE; return 0; |
5924 | } |
5925 | } |
5926 | |
5927 | /* see: https://www.khronos.org/registry/OpenGL-Refpages/es3.0/html/glTexImage2D.xhtml */ |
5928 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats(bool has_bgra) { |
5929 | #if !defined(SOKOL_GLES2) |
5930 | if (!_sg.gl.gles2) { |
5931 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]); |
5932 | } |
5933 | else { |
5934 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8]); |
5935 | } |
5936 | #else |
5937 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8]); |
5938 | #endif |
5939 | #if !defined(SOKOL_GLES2) |
5940 | if (!_sg.gl.gles2) { |
5941 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8SN]); |
5942 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]); |
5943 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]); |
5944 | #if !defined(SOKOL_GLES3) |
5945 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16]); |
5946 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16SN]); |
5947 | #endif |
5948 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]); |
5949 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]); |
5950 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]); |
5951 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG8SN]); |
5952 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]); |
5953 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]); |
5954 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]); |
5955 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]); |
5956 | #if !defined(SOKOL_GLES3) |
5957 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16]); |
5958 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16SN]); |
5959 | #endif |
5960 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]); |
5961 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]); |
5962 | } |
5963 | #endif |
5964 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]); |
5965 | #if !defined(SOKOL_GLES2) |
5966 | if (!_sg.gl.gles2) { |
5967 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]); |
5968 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]); |
5969 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]); |
5970 | } |
5971 | #endif |
5972 | if (has_bgra) { |
5973 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]); |
5974 | } |
5975 | #if !defined(SOKOL_GLES2) |
5976 | if (!_sg.gl.gles2) { |
5977 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]); |
5978 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG11B10F]); |
5979 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGB9E5]); |
5980 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32UI]); |
5981 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32SI]); |
5982 | #if !defined(SOKOL_GLES3) |
5983 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16]); |
5984 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]); |
5985 | #endif |
5986 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]); |
5987 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]); |
5988 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); |
5989 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); |
5990 | } |
5991 | #endif |
5992 | // FIXME: WEBGL_depth_texture extension? |
5993 | _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]); |
5994 | _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]); |
5995 | } |
5996 | |
5997 | /* FIXME: OES_half_float_blend */ |
5998 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_half_float(bool has_colorbuffer_half_float, bool has_texture_half_float_linear) { |
5999 | #if !defined(SOKOL_GLES2) |
6000 | if (!_sg.gl.gles2) { |
6001 | if (has_texture_half_float_linear) { |
6002 | if (has_colorbuffer_half_float) { |
6003 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]); |
6004 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]); |
6005 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6006 | } |
6007 | else { |
6008 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R16F]); |
6009 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG16F]); |
6010 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6011 | } |
6012 | } |
6013 | else { |
6014 | if (has_colorbuffer_half_float) { |
6015 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_R16F]); |
6016 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RG16F]); |
6017 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6018 | } |
6019 | else { |
6020 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R16F]); |
6021 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RG16F]); |
6022 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6023 | } |
6024 | } |
6025 | } |
6026 | else { |
6027 | #endif |
6028 | /* GLES2 can only render to RGBA, and there's no RG format */ |
6029 | if (has_texture_half_float_linear) { |
6030 | if (has_colorbuffer_half_float) { |
6031 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6032 | } |
6033 | else { |
6034 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6035 | } |
6036 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R16F]); |
6037 | } |
6038 | else { |
6039 | if (has_colorbuffer_half_float) { |
6040 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6041 | } |
6042 | else { |
6043 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
6044 | } |
6045 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R16F]); |
6046 | } |
6047 | #if !defined(SOKOL_GLES2) |
6048 | } |
6049 | #endif |
6050 | } |
6051 | |
6052 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_float(bool has_colorbuffer_float, bool has_texture_float_linear, bool has_float_blend) { |
6053 | #if !defined(SOKOL_GLES2) |
6054 | if (!_sg.gl.gles2) { |
6055 | if (has_texture_float_linear) { |
6056 | if (has_colorbuffer_float) { |
6057 | if (has_float_blend) { |
6058 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6059 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
6060 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6061 | } |
6062 | else { |
6063 | _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6064 | _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
6065 | _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6066 | } |
6067 | } |
6068 | else { |
6069 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6070 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
6071 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6072 | } |
6073 | } |
6074 | else { |
6075 | if (has_colorbuffer_float) { |
6076 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6077 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
6078 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6079 | } |
6080 | else { |
6081 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6082 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
6083 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6084 | } |
6085 | } |
6086 | } |
6087 | else { |
6088 | #endif |
6089 | /* GLES2 can only render to RGBA, and there's no RG format */ |
6090 | if (has_texture_float_linear) { |
6091 | if (has_colorbuffer_float) { |
6092 | if (has_float_blend) { |
6093 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6094 | } |
6095 | else { |
6096 | _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6097 | } |
6098 | } |
6099 | else { |
6100 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6101 | } |
6102 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6103 | } |
6104 | else { |
6105 | if (has_colorbuffer_float) { |
6106 | _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6107 | } |
6108 | else { |
6109 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
6110 | } |
6111 | _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R32F]); |
6112 | } |
6113 | #if !defined(SOKOL_GLES2) |
6114 | } |
6115 | #endif |
6116 | } |
6117 | |
6118 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_s3tc(void) { |
6119 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]); |
6120 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]); |
6121 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]); |
6122 | } |
6123 | |
6124 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_rgtc(void) { |
6125 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]); |
6126 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]); |
6127 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]); |
6128 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]); |
6129 | } |
6130 | |
6131 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_bptc(void) { |
6132 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]); |
6133 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]); |
6134 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]); |
6135 | } |
6136 | |
6137 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_pvrtc(void) { |
6138 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_2BPP]); |
6139 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_4BPP]); |
6140 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_2BPP]); |
6141 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_4BPP]); |
6142 | } |
6143 | |
6144 | _SOKOL_PRIVATE void _sg_gl_init_pixelformats_etc2(void) { |
6145 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8]); |
6146 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8A1]); |
6147 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGBA8]); |
6148 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11]); |
6149 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11SN]); |
6150 | } |
6151 | |
6152 | _SOKOL_PRIVATE void _sg_gl_init_limits(void) { |
6153 | _SG_GL_CHECK_ERROR(); |
6154 | GLint gl_int; |
6155 | glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_int); |
6156 | _SG_GL_CHECK_ERROR(); |
6157 | _sg.limits.max_image_size_2d = gl_int; |
6158 | _sg.limits.max_image_size_array = gl_int; |
6159 | glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &gl_int); |
6160 | _SG_GL_CHECK_ERROR(); |
6161 | _sg.limits.max_image_size_cube = gl_int; |
6162 | glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &gl_int); |
6163 | _SG_GL_CHECK_ERROR(); |
6164 | if (gl_int > SG_MAX_VERTEX_ATTRIBUTES) { |
6165 | gl_int = SG_MAX_VERTEX_ATTRIBUTES; |
6166 | } |
6167 | _sg.limits.max_vertex_attrs = gl_int; |
6168 | glGetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS, &gl_int); |
6169 | _SG_GL_CHECK_ERROR(); |
6170 | _sg.limits.gl_max_vertex_uniform_vectors = gl_int; |
6171 | #if !defined(SOKOL_GLES2) |
6172 | if (!_sg.gl.gles2) { |
6173 | glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &gl_int); |
6174 | _SG_GL_CHECK_ERROR(); |
6175 | _sg.limits.max_image_size_3d = gl_int; |
6176 | glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &gl_int); |
6177 | _SG_GL_CHECK_ERROR(); |
6178 | _sg.limits.max_image_array_layers = gl_int; |
6179 | } |
6180 | #endif |
6181 | if (_sg.gl.ext_anisotropic) { |
6182 | glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_int); |
6183 | _SG_GL_CHECK_ERROR(); |
6184 | _sg.gl.max_anisotropy = gl_int; |
6185 | } |
6186 | else { |
6187 | _sg.gl.max_anisotropy = 1; |
6188 | } |
6189 | glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &gl_int); |
6190 | _SG_GL_CHECK_ERROR(); |
6191 | _sg.gl.max_combined_texture_image_units = gl_int; |
6192 | } |
6193 | |
6194 | #if defined(SOKOL_GLCORE33) |
6195 | _SOKOL_PRIVATE void _sg_gl_init_caps_glcore33(void) { |
6196 | _sg.backend = SG_BACKEND_GLCORE33; |
6197 | |
6198 | _sg.features.origin_top_left = false; |
6199 | _sg.features.instancing = true; |
6200 | _sg.features.multiple_render_targets = true; |
6201 | _sg.features.msaa_render_targets = true; |
6202 | _sg.features.imagetype_3d = true; |
6203 | _sg.features.imagetype_array = true; |
6204 | _sg.features.image_clamp_to_border = true; |
6205 | _sg.features.mrt_independent_blend_state = false; |
6206 | _sg.features.mrt_independent_write_mask = true; |
6207 | |
6208 | /* scan extensions */ |
6209 | bool has_s3tc = false; /* BC1..BC3 */ |
6210 | bool has_rgtc = false; /* BC4 and BC5 */ |
6211 | bool has_bptc = false; /* BC6H and BC7 */ |
6212 | bool has_pvrtc = false; |
6213 | bool has_etc2 = false; |
6214 | GLint num_ext = 0; |
6215 | glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); |
6216 | for (int i = 0; i < num_ext; i++) { |
6217 | const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, (GLuint)i); |
6218 | if (ext) { |
6219 | if (strstr(ext, "_texture_compression_s3tc")) { |
6220 | has_s3tc = true; |
6221 | } |
6222 | else if (strstr(ext, "_texture_compression_rgtc")) { |
6223 | has_rgtc = true; |
6224 | } |
6225 | else if (strstr(ext, "_texture_compression_bptc")) { |
6226 | has_bptc = true; |
6227 | } |
6228 | else if (strstr(ext, "_texture_compression_pvrtc")) { |
6229 | has_pvrtc = true; |
6230 | } |
6231 | else if (strstr(ext, "_ES3_compatibility")) { |
6232 | has_etc2 = true; |
6233 | } |
6234 | else if (strstr(ext, "_texture_filter_anisotropic")) { |
6235 | _sg.gl.ext_anisotropic = true; |
6236 | } |
6237 | } |
6238 | } |
6239 | |
6240 | /* limits */ |
6241 | _sg_gl_init_limits(); |
6242 | |
6243 | /* pixel formats */ |
6244 | const bool has_bgra = false; /* not a bug */ |
6245 | const bool has_colorbuffer_float = true; |
6246 | const bool has_colorbuffer_half_float = true; |
6247 | const bool has_texture_float_linear = true; /* FIXME??? */ |
6248 | const bool has_texture_half_float_linear = true; |
6249 | const bool has_float_blend = true; |
6250 | _sg_gl_init_pixelformats(has_bgra); |
6251 | _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend); |
6252 | _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear); |
6253 | if (has_s3tc) { |
6254 | _sg_gl_init_pixelformats_s3tc(); |
6255 | } |
6256 | if (has_rgtc) { |
6257 | _sg_gl_init_pixelformats_rgtc(); |
6258 | } |
6259 | if (has_bptc) { |
6260 | _sg_gl_init_pixelformats_bptc(); |
6261 | } |
6262 | if (has_pvrtc) { |
6263 | _sg_gl_init_pixelformats_pvrtc(); |
6264 | } |
6265 | if (has_etc2) { |
6266 | _sg_gl_init_pixelformats_etc2(); |
6267 | } |
6268 | } |
6269 | #endif |
6270 | |
6271 | #if defined(SOKOL_GLES3) |
6272 | _SOKOL_PRIVATE void _sg_gl_init_caps_gles3(void) { |
6273 | _sg.backend = SG_BACKEND_GLES3; |
6274 | |
6275 | _sg.features.origin_top_left = false; |
6276 | _sg.features.instancing = true; |
6277 | _sg.features.multiple_render_targets = true; |
6278 | _sg.features.msaa_render_targets = true; |
6279 | _sg.features.imagetype_3d = true; |
6280 | _sg.features.imagetype_array = true; |
6281 | _sg.features.image_clamp_to_border = false; |
6282 | _sg.features.mrt_independent_blend_state = false; |
6283 | _sg.features.mrt_independent_write_mask = false; |
6284 | |
6285 | bool has_s3tc = false; /* BC1..BC3 */ |
6286 | bool has_rgtc = false; /* BC4 and BC5 */ |
6287 | bool has_bptc = false; /* BC6H and BC7 */ |
6288 | bool has_pvrtc = false; |
6289 | #if defined(__EMSCRIPTEN__) |
6290 | bool has_etc2 = false; |
6291 | #else |
6292 | bool has_etc2 = true; |
6293 | #endif |
6294 | bool has_colorbuffer_float = false; |
6295 | bool has_colorbuffer_half_float = false; |
6296 | bool has_texture_float_linear = false; |
6297 | bool has_float_blend = false; |
6298 | GLint num_ext = 0; |
6299 | glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); |
6300 | for (int i = 0; i < num_ext; i++) { |
6301 | const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, (GLuint)i); |
6302 | if (ext) { |
6303 | if (strstr(ext, "_texture_compression_s3tc")) { |
6304 | has_s3tc = true; |
6305 | } |
6306 | else if (strstr(ext, "_compressed_texture_s3tc")) { |
6307 | has_s3tc = true; |
6308 | } |
6309 | else if (strstr(ext, "_texture_compression_rgtc")) { |
6310 | has_rgtc = true; |
6311 | } |
6312 | else if (strstr(ext, "_texture_compression_bptc")) { |
6313 | has_bptc = true; |
6314 | } |
6315 | else if (strstr(ext, "_texture_compression_pvrtc")) { |
6316 | has_pvrtc = true; |
6317 | } |
6318 | else if (strstr(ext, "_compressed_texture_pvrtc")) { |
6319 | has_pvrtc = true; |
6320 | } |
6321 | else if (strstr(ext, "_compressed_texture_etc")) { |
6322 | has_etc2 = true; |
6323 | } |
6324 | else if (strstr(ext, "_color_buffer_float")) { |
6325 | has_colorbuffer_float = true; |
6326 | } |
6327 | else if (strstr(ext, "_color_buffer_half_float")) { |
6328 | has_colorbuffer_half_float = true; |
6329 | } |
6330 | else if (strstr(ext, "_texture_float_linear")) { |
6331 | has_texture_float_linear = true; |
6332 | } |
6333 | else if (strstr(ext, "_float_blend")) { |
6334 | has_float_blend = true; |
6335 | } |
6336 | else if (strstr(ext, "_texture_filter_anisotropic")) { |
6337 | _sg.gl.ext_anisotropic = true; |
6338 | } |
6339 | } |
6340 | } |
6341 | |
6342 | /* on WebGL2, color_buffer_float also includes 16-bit formats |
6343 | see: https://developer.mozilla.org/en-US/docs/Web/API/EXT_color_buffer_float |
6344 | */ |
6345 | #if defined(__EMSCRIPTEN__) |
6346 | has_colorbuffer_half_float = has_colorbuffer_float; |
6347 | #endif |
6348 | |
6349 | /* limits */ |
6350 | _sg_gl_init_limits(); |
6351 | |
6352 | /* pixel formats */ |
6353 | const bool has_texture_half_float_linear = true; |
6354 | const bool has_bgra = false; /* not a bug */ |
6355 | _sg_gl_init_pixelformats(has_bgra); |
6356 | _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend); |
6357 | _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear); |
6358 | if (has_s3tc) { |
6359 | _sg_gl_init_pixelformats_s3tc(); |
6360 | } |
6361 | if (has_rgtc) { |
6362 | _sg_gl_init_pixelformats_rgtc(); |
6363 | } |
6364 | if (has_bptc) { |
6365 | _sg_gl_init_pixelformats_bptc(); |
6366 | } |
6367 | if (has_pvrtc) { |
6368 | _sg_gl_init_pixelformats_pvrtc(); |
6369 | } |
6370 | if (has_etc2) { |
6371 | _sg_gl_init_pixelformats_etc2(); |
6372 | } |
6373 | } |
6374 | #endif |
6375 | |
6376 | #if defined(SOKOL_GLES3) || defined(SOKOL_GLES2) |
6377 | _SOKOL_PRIVATE void _sg_gl_init_caps_gles2(void) { |
6378 | _sg.backend = SG_BACKEND_GLES2; |
6379 | |
6380 | bool has_s3tc = false; /* BC1..BC3 */ |
6381 | bool has_rgtc = false; /* BC4 and BC5 */ |
6382 | bool has_bptc = false; /* BC6H and BC7 */ |
6383 | bool has_pvrtc = false; |
6384 | bool has_etc2 = false; |
6385 | bool has_texture_float = false; |
6386 | bool has_texture_float_linear = false; |
6387 | bool has_colorbuffer_float = false; |
6388 | bool has_float_blend = false; |
6389 | bool has_instancing = false; |
6390 | const char* ext = (const char*) glGetString(GL_EXTENSIONS); |
6391 | if (ext) { |
6392 | has_s3tc = strstr(ext, "_texture_compression_s3tc") || strstr(ext, "_compressed_texture_s3tc"); |
6393 | has_rgtc = strstr(ext, "_texture_compression_rgtc"); |
6394 | has_bptc = strstr(ext, "_texture_compression_bptc"); |
6395 | has_pvrtc = strstr(ext, "_texture_compression_pvrtc") || strstr(ext, "_compressed_texture_pvrtc"); |
6396 | has_etc2 = strstr(ext, "_compressed_texture_etc"); |
6397 | has_texture_float = strstr(ext, "_texture_float"); |
6398 | has_texture_float_linear = strstr(ext, "_texture_float_linear"); |
6399 | has_colorbuffer_float = strstr(ext, "_color_buffer_float"); |
6400 | has_float_blend = strstr(ext, "_float_blend"); |
6401 | /* don't bother with half_float support on WebGL1 |
6402 | has_texture_half_float = strstr(ext, "_texture_half_float"); |
6403 | has_texture_half_float_linear = strstr(ext, "_texture_half_float_linear"); |
6404 | has_colorbuffer_half_float = strstr(ext, "_color_buffer_half_float"); |
6405 | */ |
6406 | has_instancing = strstr(ext, "_instanced_arrays"); |
6407 | _sg.gl.ext_anisotropic = strstr(ext, "ext_anisotropic"); |
6408 | } |
6409 | |
6410 | _sg.features.origin_top_left = false; |
6411 | #if defined(_SOKOL_GL_INSTANCING_ENABLED) |
6412 | _sg.features.instancing = has_instancing; |
6413 | #endif |
6414 | _sg.features.multiple_render_targets = false; |
6415 | _sg.features.msaa_render_targets = false; |
6416 | _sg.features.imagetype_3d = false; |
6417 | _sg.features.imagetype_array = false; |
6418 | _sg.features.image_clamp_to_border = false; |
6419 | _sg.features.mrt_independent_blend_state = false; |
6420 | _sg.features.mrt_independent_write_mask = false; |
6421 | |
6422 | /* limits */ |
6423 | _sg_gl_init_limits(); |
6424 | |
6425 | /* pixel formats */ |
6426 | const bool has_bgra = false; /* not a bug */ |
6427 | const bool has_texture_half_float = false; |
6428 | const bool has_texture_half_float_linear = false; |
6429 | const bool has_colorbuffer_half_float = false; |
6430 | _sg_gl_init_pixelformats(has_bgra); |
6431 | if (has_texture_float) { |
6432 | _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend); |
6433 | } |
6434 | if (has_texture_half_float) { |
6435 | _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear); |
6436 | } |
6437 | if (has_s3tc) { |
6438 | _sg_gl_init_pixelformats_s3tc(); |
6439 | } |
6440 | if (has_rgtc) { |
6441 | _sg_gl_init_pixelformats_rgtc(); |
6442 | } |
6443 | if (has_bptc) { |
6444 | _sg_gl_init_pixelformats_bptc(); |
6445 | } |
6446 | if (has_pvrtc) { |
6447 | _sg_gl_init_pixelformats_pvrtc(); |
6448 | } |
6449 | if (has_etc2) { |
6450 | _sg_gl_init_pixelformats_etc2(); |
6451 | } |
6452 | /* GLES2 doesn't allow multi-sampled render targets at all */ |
6453 | for (int i = 0; i < _SG_PIXELFORMAT_NUM; i++) { |
6454 | _sg.formats[i].msaa = false; |
6455 | } |
6456 | } |
6457 | #endif |
6458 | |
6459 | /*-- state cache implementation ----------------------------------------------*/ |
6460 | _SOKOL_PRIVATE void _sg_gl_cache_clear_buffer_bindings(bool force) { |
6461 | if (force || (_sg.gl.cache.vertex_buffer != 0)) { |
6462 | glBindBuffer(GL_ARRAY_BUFFER, 0); |
6463 | _sg.gl.cache.vertex_buffer = 0; |
6464 | } |
6465 | if (force || (_sg.gl.cache.index_buffer != 0)) { |
6466 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); |
6467 | _sg.gl.cache.index_buffer = 0; |
6468 | } |
6469 | } |
6470 | |
6471 | _SOKOL_PRIVATE void _sg_gl_cache_bind_buffer(GLenum target, GLuint buffer) { |
6472 | SOKOL_ASSERT((GL_ARRAY_BUFFER == target) || (GL_ELEMENT_ARRAY_BUFFER == target)); |
6473 | if (target == GL_ARRAY_BUFFER) { |
6474 | if (_sg.gl.cache.vertex_buffer != buffer) { |
6475 | _sg.gl.cache.vertex_buffer = buffer; |
6476 | glBindBuffer(target, buffer); |
6477 | } |
6478 | } |
6479 | else { |
6480 | if (_sg.gl.cache.index_buffer != buffer) { |
6481 | _sg.gl.cache.index_buffer = buffer; |
6482 | glBindBuffer(target, buffer); |
6483 | } |
6484 | } |
6485 | } |
6486 | |
6487 | _SOKOL_PRIVATE void _sg_gl_cache_store_buffer_binding(GLenum target) { |
6488 | if (target == GL_ARRAY_BUFFER) { |
6489 | _sg.gl.cache.stored_vertex_buffer = _sg.gl.cache.vertex_buffer; |
6490 | } |
6491 | else { |
6492 | _sg.gl.cache.stored_index_buffer = _sg.gl.cache.index_buffer; |
6493 | } |
6494 | } |
6495 | |
6496 | _SOKOL_PRIVATE void _sg_gl_cache_restore_buffer_binding(GLenum target) { |
6497 | if (target == GL_ARRAY_BUFFER) { |
6498 | if (_sg.gl.cache.stored_vertex_buffer != 0) { |
6499 | /* we only care restoring valid ids */ |
6500 | _sg_gl_cache_bind_buffer(target, _sg.gl.cache.stored_vertex_buffer); |
6501 | _sg.gl.cache.stored_vertex_buffer = 0; |
6502 | } |
6503 | } |
6504 | else { |
6505 | if (_sg.gl.cache.stored_index_buffer != 0) { |
6506 | /* we only care restoring valid ids */ |
6507 | _sg_gl_cache_bind_buffer(target, _sg.gl.cache.stored_index_buffer); |
6508 | _sg.gl.cache.stored_index_buffer = 0; |
6509 | } |
6510 | } |
6511 | } |
6512 | |
6513 | /* called when _sg_gl_deinit_buffer() */ |
6514 | _SOKOL_PRIVATE void _sg_gl_cache_invalidate_buffer(GLuint buf) { |
6515 | if (buf == _sg.gl.cache.vertex_buffer) { |
6516 | _sg.gl.cache.vertex_buffer = 0; |
6517 | glBindBuffer(GL_ARRAY_BUFFER, 0); |
6518 | } |
6519 | if (buf == _sg.gl.cache.index_buffer) { |
6520 | _sg.gl.cache.index_buffer = 0; |
6521 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); |
6522 | } |
6523 | if (buf == _sg.gl.cache.stored_vertex_buffer) { |
6524 | _sg.gl.cache.stored_vertex_buffer = 0; |
6525 | } |
6526 | if (buf == _sg.gl.cache.stored_index_buffer) { |
6527 | _sg.gl.cache.stored_index_buffer = 0; |
6528 | } |
6529 | for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { |
6530 | if (buf == _sg.gl.cache.attrs[i].gl_vbuf) { |
6531 | _sg.gl.cache.attrs[i].gl_vbuf = 0; |
6532 | } |
6533 | } |
6534 | } |
6535 | |
6536 | _SOKOL_PRIVATE void _sg_gl_cache_active_texture(GLenum texture) { |
6537 | if (_sg.gl.cache.cur_active_texture != texture) { |
6538 | _sg.gl.cache.cur_active_texture = texture; |
6539 | glActiveTexture(texture); |
6540 | } |
6541 | } |
6542 | |
6543 | _SOKOL_PRIVATE void _sg_gl_cache_clear_texture_bindings(bool force) { |
6544 | for (int i = 0; (i < SG_MAX_SHADERSTAGE_IMAGES) && (i < _sg.gl.max_combined_texture_image_units); i++) { |
6545 | if (force || (_sg.gl.cache.textures[i].texture != 0)) { |
6546 | GLenum gl_texture_slot = (GLenum) (GL_TEXTURE0 + i); |
6547 | glActiveTexture(gl_texture_slot); |
6548 | glBindTexture(GL_TEXTURE_2D, 0); |
6549 | glBindTexture(GL_TEXTURE_CUBE_MAP, 0); |
6550 | #if !defined(SOKOL_GLES2) |
6551 | if (!_sg.gl.gles2) { |
6552 | glBindTexture(GL_TEXTURE_3D, 0); |
6553 | glBindTexture(GL_TEXTURE_2D_ARRAY, 0); |
6554 | } |
6555 | #endif |
6556 | _sg.gl.cache.textures[i].target = 0; |
6557 | _sg.gl.cache.textures[i].texture = 0; |
6558 | _sg.gl.cache.cur_active_texture = gl_texture_slot; |
6559 | } |
6560 | } |
6561 | } |
6562 | |
6563 | _SOKOL_PRIVATE void _sg_gl_cache_bind_texture(int slot_index, GLenum target, GLuint texture) { |
6564 | /* it's valid to call this function with target=0 and/or texture=0 |
6565 | target=0 will unbind the previous binding, texture=0 will clear |
6566 | the new binding |
6567 | */ |
6568 | SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES); |
6569 | if (slot_index >= _sg.gl.max_combined_texture_image_units) { |
6570 | return; |
6571 | } |
6572 | _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[slot_index]; |
6573 | if ((slot->target != target) || (slot->texture != texture)) { |
6574 | _sg_gl_cache_active_texture((GLenum)(GL_TEXTURE0 + slot_index)); |
6575 | /* if the target has changed, clear the previous binding on that target */ |
6576 | if ((target != slot->target) && (slot->target != 0)) { |
6577 | glBindTexture(slot->target, 0); |
6578 | } |
6579 | /* apply new binding (texture can be 0 to unbind) */ |
6580 | if (target != 0) { |
6581 | glBindTexture(target, texture); |
6582 | } |
6583 | slot->target = target; |
6584 | slot->texture = texture; |
6585 | } |
6586 | } |
6587 | |
6588 | _SOKOL_PRIVATE void _sg_gl_cache_store_texture_binding(int slot_index) { |
6589 | SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES); |
6590 | _sg.gl.cache.stored_texture = _sg.gl.cache.textures[slot_index]; |
6591 | } |
6592 | |
6593 | _SOKOL_PRIVATE void _sg_gl_cache_restore_texture_binding(int slot_index) { |
6594 | SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES); |
6595 | _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.stored_texture; |
6596 | if (slot->texture != 0) { |
6597 | /* we only care restoring valid ids */ |
6598 | SOKOL_ASSERT(slot->target != 0); |
6599 | _sg_gl_cache_bind_texture(slot_index, slot->target, slot->texture); |
6600 | slot->target = 0; |
6601 | slot->texture = 0; |
6602 | } |
6603 | } |
6604 | |
6605 | /* called from _sg_gl_destroy_texture() */ |
6606 | _SOKOL_PRIVATE void _sg_gl_cache_invalidate_texture(GLuint tex) { |
6607 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
6608 | _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[i]; |
6609 | if (tex == slot->texture) { |
6610 | _sg_gl_cache_active_texture((GLenum)(GL_TEXTURE0 + i)); |
6611 | glBindTexture(slot->target, 0); |
6612 | slot->target = 0; |
6613 | slot->texture = 0; |
6614 | } |
6615 | } |
6616 | if (tex == _sg.gl.cache.stored_texture.texture) { |
6617 | _sg.gl.cache.stored_texture.target = 0; |
6618 | _sg.gl.cache.stored_texture.texture = 0; |
6619 | } |
6620 | } |
6621 | |
6622 | /* called from _sg_gl_discard_shader() */ |
6623 | _SOKOL_PRIVATE void _sg_gl_cache_invalidate_program(GLuint prog) { |
6624 | if (prog == _sg.gl.cache.prog) { |
6625 | _sg.gl.cache.prog = 0; |
6626 | glUseProgram(0); |
6627 | } |
6628 | } |
6629 | |
6630 | /* called from _sg_gl_discard_pipeline() */ |
6631 | _SOKOL_PRIVATE void _sg_gl_cache_invalidate_pipeline(_sg_pipeline_t* pip) { |
6632 | if (pip == _sg.gl.cache.cur_pipeline) { |
6633 | _sg.gl.cache.cur_pipeline = 0; |
6634 | _sg.gl.cache.cur_pipeline_id.id = SG_INVALID_ID; |
6635 | } |
6636 | } |
6637 | |
6638 | _SOKOL_PRIVATE void _sg_gl_reset_state_cache(void) { |
6639 | if (_sg.gl.cur_context) { |
6640 | _SG_GL_CHECK_ERROR(); |
6641 | #if !defined(SOKOL_GLES2) |
6642 | if (!_sg.gl.gles2) { |
6643 | glBindVertexArray(_sg.gl.cur_context->vao); |
6644 | _SG_GL_CHECK_ERROR(); |
6645 | } |
6646 | #endif |
6647 | _sg_clear(&_sg.gl.cache, sizeof(_sg.gl.cache)); |
6648 | _sg_gl_cache_clear_buffer_bindings(true); |
6649 | _SG_GL_CHECK_ERROR(); |
6650 | _sg_gl_cache_clear_texture_bindings(true); |
6651 | _SG_GL_CHECK_ERROR(); |
6652 | for (int i = 0; i < _sg.limits.max_vertex_attrs; i++) { |
6653 | _sg_gl_attr_t* attr = &_sg.gl.cache.attrs[i].gl_attr; |
6654 | attr->vb_index = -1; |
6655 | attr->divisor = -1; |
6656 | glDisableVertexAttribArray((GLuint)i); |
6657 | _SG_GL_CHECK_ERROR(); |
6658 | } |
6659 | _sg.gl.cache.cur_primitive_type = GL_TRIANGLES; |
6660 | |
6661 | /* shader program */ |
6662 | glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&_sg.gl.cache.prog); |
6663 | _SG_GL_CHECK_ERROR(); |
6664 | |
6665 | /* depth and stencil state */ |
6666 | _sg.gl.cache.depth.compare = SG_COMPAREFUNC_ALWAYS; |
6667 | _sg.gl.cache.stencil.front.compare = SG_COMPAREFUNC_ALWAYS; |
6668 | _sg.gl.cache.stencil.front.fail_op = SG_STENCILOP_KEEP; |
6669 | _sg.gl.cache.stencil.front.depth_fail_op = SG_STENCILOP_KEEP; |
6670 | _sg.gl.cache.stencil.front.pass_op = SG_STENCILOP_KEEP; |
6671 | _sg.gl.cache.stencil.back.compare = SG_COMPAREFUNC_ALWAYS; |
6672 | _sg.gl.cache.stencil.back.fail_op = SG_STENCILOP_KEEP; |
6673 | _sg.gl.cache.stencil.back.depth_fail_op = SG_STENCILOP_KEEP; |
6674 | _sg.gl.cache.stencil.back.pass_op = SG_STENCILOP_KEEP; |
6675 | glEnable(GL_DEPTH_TEST); |
6676 | glDepthFunc(GL_ALWAYS); |
6677 | glDepthMask(GL_FALSE); |
6678 | glDisable(GL_STENCIL_TEST); |
6679 | glStencilFunc(GL_ALWAYS, 0, 0); |
6680 | glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); |
6681 | glStencilMask(0); |
6682 | |
6683 | /* blend state */ |
6684 | _sg.gl.cache.blend.src_factor_rgb = SG_BLENDFACTOR_ONE; |
6685 | _sg.gl.cache.blend.dst_factor_rgb = SG_BLENDFACTOR_ZERO; |
6686 | _sg.gl.cache.blend.op_rgb = SG_BLENDOP_ADD; |
6687 | _sg.gl.cache.blend.src_factor_alpha = SG_BLENDFACTOR_ONE; |
6688 | _sg.gl.cache.blend.dst_factor_alpha = SG_BLENDFACTOR_ZERO; |
6689 | _sg.gl.cache.blend.op_alpha = SG_BLENDOP_ADD; |
6690 | glDisable(GL_BLEND); |
6691 | glBlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO); |
6692 | glBlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD); |
6693 | glBlendColor(0.0f, 0.0f, 0.0f, 0.0f); |
6694 | |
6695 | /* standalone state */ |
6696 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
6697 | _sg.gl.cache.color_write_mask[i] = SG_COLORMASK_RGBA; |
6698 | } |
6699 | _sg.gl.cache.cull_mode = SG_CULLMODE_NONE; |
6700 | _sg.gl.cache.face_winding = SG_FACEWINDING_CW; |
6701 | _sg.gl.cache.sample_count = 1; |
6702 | glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); |
6703 | glPolygonOffset(0.0f, 0.0f); |
6704 | glDisable(GL_POLYGON_OFFSET_FILL); |
6705 | glDisable(GL_CULL_FACE); |
6706 | glFrontFace(GL_CW); |
6707 | glCullFace(GL_BACK); |
6708 | glEnable(GL_SCISSOR_TEST); |
6709 | glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); |
6710 | glEnable(GL_DITHER); |
6711 | glDisable(GL_POLYGON_OFFSET_FILL); |
6712 | #if defined(SOKOL_GLCORE33) |
6713 | glEnable(GL_MULTISAMPLE); |
6714 | glEnable(GL_PROGRAM_POINT_SIZE); |
6715 | #endif |
6716 | } |
6717 | } |
6718 | |
6719 | _SOKOL_PRIVATE void _sg_gl_setup_backend(const sg_desc* desc) { |
6720 | /* assumes that _sg.gl is already zero-initialized */ |
6721 | _sg.gl.valid = true; |
6722 | #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
6723 | _sg.gl.gles2 = desc->context.gl.force_gles2; |
6724 | #else |
6725 | _SOKOL_UNUSED(desc); |
6726 | _sg.gl.gles2 = false; |
6727 | #endif |
6728 | |
6729 | #if defined(_SOKOL_USE_WIN32_GL_LOADER) |
6730 | _sg_gl_load_opengl(); |
6731 | #endif |
6732 | |
6733 | /* clear initial GL error state */ |
6734 | #if defined(SOKOL_DEBUG) |
6735 | while (glGetError() != GL_NO_ERROR); |
6736 | #endif |
6737 | #if defined(SOKOL_GLCORE33) |
6738 | _sg_gl_init_caps_glcore33(); |
6739 | #elif defined(SOKOL_GLES3) |
6740 | if (_sg.gl.gles2) { |
6741 | _sg_gl_init_caps_gles2(); |
6742 | } |
6743 | else { |
6744 | _sg_gl_init_caps_gles3(); |
6745 | } |
6746 | #else |
6747 | _sg_gl_init_caps_gles2(); |
6748 | #endif |
6749 | } |
6750 | |
6751 | _SOKOL_PRIVATE void _sg_gl_discard_backend(void) { |
6752 | SOKOL_ASSERT(_sg.gl.valid); |
6753 | _sg.gl.valid = false; |
6754 | #if defined(_SOKOL_USE_WIN32_GL_LOADER) |
6755 | _sg_gl_unload_opengl(); |
6756 | #endif |
6757 | } |
6758 | |
6759 | _SOKOL_PRIVATE void _sg_gl_activate_context(_sg_context_t* ctx) { |
6760 | SOKOL_ASSERT(_sg.gl.valid); |
6761 | /* NOTE: ctx can be 0 to unset the current context */ |
6762 | _sg.gl.cur_context = ctx; |
6763 | _sg_gl_reset_state_cache(); |
6764 | } |
6765 | |
6766 | /*-- GL backend resource creation and destruction ----------------------------*/ |
6767 | _SOKOL_PRIVATE sg_resource_state _sg_gl_create_context(_sg_context_t* ctx) { |
6768 | SOKOL_ASSERT(ctx); |
6769 | SOKOL_ASSERT(0 == ctx->default_framebuffer); |
6770 | _SG_GL_CHECK_ERROR(); |
6771 | glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&ctx->default_framebuffer); |
6772 | _SG_GL_CHECK_ERROR(); |
6773 | #if !defined(SOKOL_GLES2) |
6774 | if (!_sg.gl.gles2) { |
6775 | SOKOL_ASSERT(0 == ctx->vao); |
6776 | glGenVertexArrays(1, &ctx->vao); |
6777 | glBindVertexArray(ctx->vao); |
6778 | _SG_GL_CHECK_ERROR(); |
6779 | } |
6780 | #endif |
6781 | return SG_RESOURCESTATE_VALID; |
6782 | } |
6783 | |
6784 | _SOKOL_PRIVATE void _sg_gl_discard_context(_sg_context_t* ctx) { |
6785 | SOKOL_ASSERT(ctx); |
6786 | #if !defined(SOKOL_GLES2) |
6787 | if (!_sg.gl.gles2) { |
6788 | if (ctx->vao) { |
6789 | glDeleteVertexArrays(1, &ctx->vao); |
6790 | } |
6791 | _SG_GL_CHECK_ERROR(); |
6792 | } |
6793 | #else |
6794 | _SOKOL_UNUSED(ctx); |
6795 | #endif |
6796 | } |
6797 | |
6798 | _SOKOL_PRIVATE sg_resource_state _sg_gl_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
6799 | SOKOL_ASSERT(buf && desc); |
6800 | _SG_GL_CHECK_ERROR(); |
6801 | _sg_buffer_common_init(&buf->cmn, desc); |
6802 | buf->gl.ext_buffers = (0 != desc->gl_buffers[0]); |
6803 | GLenum gl_target = _sg_gl_buffer_target(buf->cmn.type); |
6804 | GLenum gl_usage = _sg_gl_usage(buf->cmn.usage); |
6805 | for (int slot = 0; slot < buf->cmn.num_slots; slot++) { |
6806 | GLuint gl_buf = 0; |
6807 | if (buf->gl.ext_buffers) { |
6808 | SOKOL_ASSERT(desc->gl_buffers[slot]); |
6809 | gl_buf = desc->gl_buffers[slot]; |
6810 | } |
6811 | else { |
6812 | glGenBuffers(1, &gl_buf); |
6813 | SOKOL_ASSERT(gl_buf); |
6814 | _sg_gl_cache_store_buffer_binding(gl_target); |
6815 | _sg_gl_cache_bind_buffer(gl_target, gl_buf); |
6816 | glBufferData(gl_target, buf->cmn.size, 0, gl_usage); |
6817 | if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { |
6818 | SOKOL_ASSERT(desc->data.ptr); |
6819 | glBufferSubData(gl_target, 0, buf->cmn.size, desc->data.ptr); |
6820 | } |
6821 | _sg_gl_cache_restore_buffer_binding(gl_target); |
6822 | } |
6823 | buf->gl.buf[slot] = gl_buf; |
6824 | } |
6825 | _SG_GL_CHECK_ERROR(); |
6826 | return SG_RESOURCESTATE_VALID; |
6827 | } |
6828 | |
6829 | _SOKOL_PRIVATE void _sg_gl_discard_buffer(_sg_buffer_t* buf) { |
6830 | SOKOL_ASSERT(buf); |
6831 | _SG_GL_CHECK_ERROR(); |
6832 | for (int slot = 0; slot < buf->cmn.num_slots; slot++) { |
6833 | if (buf->gl.buf[slot]) { |
6834 | _sg_gl_cache_invalidate_buffer(buf->gl.buf[slot]); |
6835 | if (!buf->gl.ext_buffers) { |
6836 | glDeleteBuffers(1, &buf->gl.buf[slot]); |
6837 | } |
6838 | } |
6839 | } |
6840 | _SG_GL_CHECK_ERROR(); |
6841 | } |
6842 | |
6843 | _SOKOL_PRIVATE bool _sg_gl_supported_texture_format(sg_pixel_format fmt) { |
6844 | const int fmt_index = (int) fmt; |
6845 | SOKOL_ASSERT((fmt_index > SG_PIXELFORMAT_NONE) && (fmt_index < _SG_PIXELFORMAT_NUM)); |
6846 | return _sg.formats[fmt_index].sample; |
6847 | } |
6848 | |
6849 | _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_image_desc* desc) { |
6850 | SOKOL_ASSERT(img && desc); |
6851 | _SG_GL_CHECK_ERROR(); |
6852 | _sg_image_common_init(&img->cmn, desc); |
6853 | img->gl.ext_textures = (0 != desc->gl_textures[0]); |
6854 | |
6855 | /* check if texture format is support */ |
6856 | if (!_sg_gl_supported_texture_format(img->cmn.pixel_format)) { |
6857 | SG_LOG("texture format not supported by GL context\n"); |
6858 | return SG_RESOURCESTATE_FAILED; |
6859 | } |
6860 | /* check for optional texture types */ |
6861 | if ((img->cmn.type == SG_IMAGETYPE_3D) && !_sg.features.imagetype_3d) { |
6862 | SG_LOG("3D textures not supported by GL context\n"); |
6863 | return SG_RESOURCESTATE_FAILED; |
6864 | } |
6865 | if ((img->cmn.type == SG_IMAGETYPE_ARRAY) && !_sg.features.imagetype_array) { |
6866 | SG_LOG("array textures not supported by GL context\n"); |
6867 | return SG_RESOURCESTATE_FAILED; |
6868 | } |
6869 | |
6870 | #if !defined(SOKOL_GLES2) |
6871 | bool msaa = false; |
6872 | if (!_sg.gl.gles2) { |
6873 | msaa = (img->cmn.sample_count > 1) && (_sg.features.msaa_render_targets); |
6874 | } |
6875 | #endif |
6876 | |
6877 | if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { |
6878 | /* special case depth-stencil-buffer? */ |
6879 | SOKOL_ASSERT((img->cmn.usage == SG_USAGE_IMMUTABLE) && (img->cmn.num_slots == 1)); |
6880 | SOKOL_ASSERT(!img->gl.ext_textures); /* cannot provide external texture for depth images */ |
6881 | glGenRenderbuffers(1, &img->gl.depth_render_buffer); |
6882 | glBindRenderbuffer(GL_RENDERBUFFER, img->gl.depth_render_buffer); |
6883 | GLenum gl_depth_format = _sg_gl_depth_attachment_format(img->cmn.pixel_format); |
6884 | #if !defined(SOKOL_GLES2) |
6885 | if (!_sg.gl.gles2 && msaa) { |
6886 | glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->cmn.sample_count, gl_depth_format, img->cmn.width, img->cmn.height); |
6887 | } |
6888 | else |
6889 | #endif |
6890 | { |
6891 | glRenderbufferStorage(GL_RENDERBUFFER, gl_depth_format, img->cmn.width, img->cmn.height); |
6892 | } |
6893 | } |
6894 | else { |
6895 | /* regular color texture */ |
6896 | img->gl.target = _sg_gl_texture_target(img->cmn.type); |
6897 | const GLenum gl_internal_format = _sg_gl_teximage_internal_format(img->cmn.pixel_format); |
6898 | |
6899 | /* if this is a MSAA render target, need to create a separate render buffer */ |
6900 | #if !defined(SOKOL_GLES2) |
6901 | if (!_sg.gl.gles2 && img->cmn.render_target && msaa) { |
6902 | glGenRenderbuffers(1, &img->gl.msaa_render_buffer); |
6903 | glBindRenderbuffer(GL_RENDERBUFFER, img->gl.msaa_render_buffer); |
6904 | glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->cmn.sample_count, gl_internal_format, img->cmn.width, img->cmn.height); |
6905 | } |
6906 | #endif |
6907 | |
6908 | if (img->gl.ext_textures) { |
6909 | /* inject externally GL textures */ |
6910 | for (int slot = 0; slot < img->cmn.num_slots; slot++) { |
6911 | SOKOL_ASSERT(desc->gl_textures[slot]); |
6912 | img->gl.tex[slot] = desc->gl_textures[slot]; |
6913 | } |
6914 | if (desc->gl_texture_target) { |
6915 | img->gl.target = (GLenum)desc->gl_texture_target; |
6916 | } |
6917 | } |
6918 | else { |
6919 | /* create our own GL texture(s) */ |
6920 | const GLenum gl_format = _sg_gl_teximage_format(img->cmn.pixel_format); |
6921 | const bool is_compressed = _sg_is_compressed_pixel_format(img->cmn.pixel_format); |
6922 | for (int slot = 0; slot < img->cmn.num_slots; slot++) { |
6923 | glGenTextures(1, &img->gl.tex[slot]); |
6924 | SOKOL_ASSERT(img->gl.tex[slot]); |
6925 | _sg_gl_cache_store_texture_binding(0); |
6926 | _sg_gl_cache_bind_texture(0, img->gl.target, img->gl.tex[slot]); |
6927 | GLenum gl_min_filter = _sg_gl_filter(img->cmn.min_filter); |
6928 | GLenum gl_mag_filter = _sg_gl_filter(img->cmn.mag_filter); |
6929 | glTexParameteri(img->gl.target, GL_TEXTURE_MIN_FILTER, (GLint)gl_min_filter); |
6930 | glTexParameteri(img->gl.target, GL_TEXTURE_MAG_FILTER, (GLint)gl_mag_filter); |
6931 | if (_sg.gl.ext_anisotropic && (img->cmn.max_anisotropy > 1)) { |
6932 | GLint max_aniso = (GLint) img->cmn.max_anisotropy; |
6933 | if (max_aniso > _sg.gl.max_anisotropy) { |
6934 | max_aniso = _sg.gl.max_anisotropy; |
6935 | } |
6936 | glTexParameteri(img->gl.target, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_aniso); |
6937 | } |
6938 | if (img->cmn.type == SG_IMAGETYPE_CUBE) { |
6939 | glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
6940 | glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
6941 | } |
6942 | else { |
6943 | glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, (GLint)_sg_gl_wrap(img->cmn.wrap_u)); |
6944 | glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, (GLint)_sg_gl_wrap(img->cmn.wrap_v)); |
6945 | #if !defined(SOKOL_GLES2) |
6946 | if (!_sg.gl.gles2 && (img->cmn.type == SG_IMAGETYPE_3D)) { |
6947 | glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_R, (GLint)_sg_gl_wrap(img->cmn.wrap_w)); |
6948 | } |
6949 | #endif |
6950 | #if defined(SOKOL_GLCORE33) |
6951 | float border[4]; |
6952 | switch (img->cmn.border_color) { |
6953 | case SG_BORDERCOLOR_TRANSPARENT_BLACK: |
6954 | border[0] = 0.0f; border[1] = 0.0f; border[2] = 0.0f; border[3] = 0.0f; |
6955 | break; |
6956 | case SG_BORDERCOLOR_OPAQUE_WHITE: |
6957 | border[0] = 1.0f; border[1] = 1.0f; border[2] = 1.0f; border[3] = 1.0f; |
6958 | break; |
6959 | default: |
6960 | border[0] = 0.0f; border[1] = 0.0f; border[2] = 0.0f; border[3] = 1.0f; |
6961 | break; |
6962 | } |
6963 | glTexParameterfv(img->gl.target, GL_TEXTURE_BORDER_COLOR, border); |
6964 | #endif |
6965 | } |
6966 | #if !defined(SOKOL_GLES2) |
6967 | if (!_sg.gl.gles2) { |
6968 | /* GL spec has strange defaults for mipmap min/max lod: -1000 to +1000 */ |
6969 | const float min_lod = _sg_clamp(desc->min_lod, 0.0f, 1000.0f); |
6970 | const float max_lod = _sg_clamp(desc->max_lod, 0.0f, 1000.0f); |
6971 | glTexParameterf(img->gl.target, GL_TEXTURE_MIN_LOD, min_lod); |
6972 | glTexParameterf(img->gl.target, GL_TEXTURE_MAX_LOD, max_lod); |
6973 | } |
6974 | #endif |
6975 | const int num_faces = img->cmn.type == SG_IMAGETYPE_CUBE ? 6 : 1; |
6976 | int data_index = 0; |
6977 | for (int face_index = 0; face_index < num_faces; face_index++) { |
6978 | for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, data_index++) { |
6979 | GLenum gl_img_target = img->gl.target; |
6980 | if (SG_IMAGETYPE_CUBE == img->cmn.type) { |
6981 | gl_img_target = _sg_gl_cubeface_target(face_index); |
6982 | } |
6983 | const GLvoid* data_ptr = desc->data.subimage[face_index][mip_index].ptr; |
6984 | int mip_width = img->cmn.width >> mip_index; |
6985 | if (mip_width == 0) { |
6986 | mip_width = 1; |
6987 | } |
6988 | int mip_height = img->cmn.height >> mip_index; |
6989 | if (mip_height == 0) { |
6990 | mip_height = 1; |
6991 | } |
6992 | if ((SG_IMAGETYPE_2D == img->cmn.type) || (SG_IMAGETYPE_CUBE == img->cmn.type)) { |
6993 | if (is_compressed) { |
6994 | const GLsizei data_size = (GLsizei) desc->data.subimage[face_index][mip_index].size; |
6995 | glCompressedTexImage2D(gl_img_target, mip_index, gl_internal_format, |
6996 | mip_width, mip_height, 0, data_size, data_ptr); |
6997 | } |
6998 | else { |
6999 | const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format); |
7000 | glTexImage2D(gl_img_target, mip_index, (GLint)gl_internal_format, |
7001 | mip_width, mip_height, 0, gl_format, gl_type, data_ptr); |
7002 | } |
7003 | } |
7004 | #if !defined(SOKOL_GLES2) |
7005 | else if (!_sg.gl.gles2 && ((SG_IMAGETYPE_3D == img->cmn.type) || (SG_IMAGETYPE_ARRAY == img->cmn.type))) { |
7006 | int mip_depth = img->cmn.num_slices; |
7007 | if (SG_IMAGETYPE_3D == img->cmn.type) { |
7008 | mip_depth >>= mip_index; |
7009 | } |
7010 | if (mip_depth == 0) { |
7011 | mip_depth = 1; |
7012 | } |
7013 | if (is_compressed) { |
7014 | const GLsizei data_size = (GLsizei) desc->data.subimage[face_index][mip_index].size; |
7015 | glCompressedTexImage3D(gl_img_target, mip_index, gl_internal_format, |
7016 | mip_width, mip_height, mip_depth, 0, data_size, data_ptr); |
7017 | } |
7018 | else { |
7019 | const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format); |
7020 | glTexImage3D(gl_img_target, mip_index, (GLint)gl_internal_format, |
7021 | mip_width, mip_height, mip_depth, 0, gl_format, gl_type, data_ptr); |
7022 | } |
7023 | } |
7024 | #endif |
7025 | } |
7026 | } |
7027 | _sg_gl_cache_restore_texture_binding(0); |
7028 | } |
7029 | } |
7030 | } |
7031 | _SG_GL_CHECK_ERROR(); |
7032 | return SG_RESOURCESTATE_VALID; |
7033 | } |
7034 | |
7035 | _SOKOL_PRIVATE void _sg_gl_discard_image(_sg_image_t* img) { |
7036 | SOKOL_ASSERT(img); |
7037 | _SG_GL_CHECK_ERROR(); |
7038 | for (int slot = 0; slot < img->cmn.num_slots; slot++) { |
7039 | if (img->gl.tex[slot]) { |
7040 | _sg_gl_cache_invalidate_texture(img->gl.tex[slot]); |
7041 | if (!img->gl.ext_textures) { |
7042 | glDeleteTextures(1, &img->gl.tex[slot]); |
7043 | } |
7044 | } |
7045 | } |
7046 | if (img->gl.depth_render_buffer) { |
7047 | glDeleteRenderbuffers(1, &img->gl.depth_render_buffer); |
7048 | } |
7049 | if (img->gl.msaa_render_buffer) { |
7050 | glDeleteRenderbuffers(1, &img->gl.msaa_render_buffer); |
7051 | } |
7052 | _SG_GL_CHECK_ERROR(); |
7053 | } |
7054 | |
7055 | _SOKOL_PRIVATE GLuint _sg_gl_compile_shader(sg_shader_stage stage, const char* src) { |
7056 | SOKOL_ASSERT(src); |
7057 | _SG_GL_CHECK_ERROR(); |
7058 | GLuint gl_shd = glCreateShader(_sg_gl_shader_stage(stage)); |
7059 | glShaderSource(gl_shd, 1, &src, 0); |
7060 | glCompileShader(gl_shd); |
7061 | GLint compile_status = 0; |
7062 | glGetShaderiv(gl_shd, GL_COMPILE_STATUS, &compile_status); |
7063 | if (!compile_status) { |
7064 | /* compilation failed, log error and delete shader */ |
7065 | GLint log_len = 0; |
7066 | glGetShaderiv(gl_shd, GL_INFO_LOG_LENGTH, &log_len); |
7067 | if (log_len > 0) { |
7068 | GLchar* log_buf = (GLchar*) _sg_malloc((size_t)log_len); |
7069 | glGetShaderInfoLog(gl_shd, log_len, &log_len, log_buf); |
7070 | SG_LOG(log_buf); |
7071 | _sg_free(log_buf); |
7072 | } |
7073 | glDeleteShader(gl_shd); |
7074 | gl_shd = 0; |
7075 | } |
7076 | _SG_GL_CHECK_ERROR(); |
7077 | return gl_shd; |
7078 | } |
7079 | |
7080 | _SOKOL_PRIVATE sg_resource_state _sg_gl_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
7081 | SOKOL_ASSERT(shd && desc); |
7082 | SOKOL_ASSERT(!shd->gl.prog); |
7083 | _SG_GL_CHECK_ERROR(); |
7084 | |
7085 | _sg_shader_common_init(&shd->cmn, desc); |
7086 | |
7087 | /* copy vertex attribute names over, these are required for GLES2, and optional for GLES3 and GL3.x */ |
7088 | for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { |
7089 | _sg_strcpy(&shd->gl.attrs[i].name, desc->attrs[i].name); |
7090 | } |
7091 | |
7092 | GLuint gl_vs = _sg_gl_compile_shader(SG_SHADERSTAGE_VS, desc->vs.source); |
7093 | GLuint gl_fs = _sg_gl_compile_shader(SG_SHADERSTAGE_FS, desc->fs.source); |
7094 | if (!(gl_vs && gl_fs)) { |
7095 | return SG_RESOURCESTATE_FAILED; |
7096 | } |
7097 | GLuint gl_prog = glCreateProgram(); |
7098 | glAttachShader(gl_prog, gl_vs); |
7099 | glAttachShader(gl_prog, gl_fs); |
7100 | glLinkProgram(gl_prog); |
7101 | glDeleteShader(gl_vs); |
7102 | glDeleteShader(gl_fs); |
7103 | _SG_GL_CHECK_ERROR(); |
7104 | |
7105 | GLint link_status; |
7106 | glGetProgramiv(gl_prog, GL_LINK_STATUS, &link_status); |
7107 | if (!link_status) { |
7108 | GLint log_len = 0; |
7109 | glGetProgramiv(gl_prog, GL_INFO_LOG_LENGTH, &log_len); |
7110 | if (log_len > 0) { |
7111 | GLchar* log_buf = (GLchar*) _sg_malloc((size_t)log_len); |
7112 | glGetProgramInfoLog(gl_prog, log_len, &log_len, log_buf); |
7113 | SG_LOG(log_buf); |
7114 | _sg_free(log_buf); |
7115 | } |
7116 | glDeleteProgram(gl_prog); |
7117 | return SG_RESOURCESTATE_FAILED; |
7118 | } |
7119 | shd->gl.prog = gl_prog; |
7120 | |
7121 | /* resolve uniforms */ |
7122 | _SG_GL_CHECK_ERROR(); |
7123 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
7124 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs; |
7125 | _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index]; |
7126 | for (int ub_index = 0; ub_index < shd->cmn.stage[stage_index].num_uniform_blocks; ub_index++) { |
7127 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
7128 | SOKOL_ASSERT(ub_desc->size > 0); |
7129 | _sg_gl_uniform_block_t* ub = &gl_stage->uniform_blocks[ub_index]; |
7130 | SOKOL_ASSERT(ub->num_uniforms == 0); |
7131 | uint32_t cur_uniform_offset = 0; |
7132 | for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { |
7133 | const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; |
7134 | if (u_desc->type == SG_UNIFORMTYPE_INVALID) { |
7135 | break; |
7136 | } |
7137 | const uint32_t u_align = _sg_uniform_alignment(u_desc->type, u_desc->array_count, ub_desc->layout); |
7138 | const uint32_t u_size = _sg_uniform_size(u_desc->type, u_desc->array_count, ub_desc->layout); |
7139 | cur_uniform_offset = _sg_align_u32(cur_uniform_offset, u_align); |
7140 | _sg_gl_uniform_t* u = &ub->uniforms[u_index]; |
7141 | u->type = u_desc->type; |
7142 | u->count = (uint16_t) u_desc->array_count; |
7143 | u->offset = (uint16_t) cur_uniform_offset; |
7144 | cur_uniform_offset += u_size; |
7145 | if (u_desc->name) { |
7146 | u->gl_loc = glGetUniformLocation(gl_prog, u_desc->name); |
7147 | } |
7148 | else { |
7149 | u->gl_loc = u_index; |
7150 | } |
7151 | ub->num_uniforms++; |
7152 | } |
7153 | if (ub_desc->layout == SG_UNIFORMLAYOUT_STD140) { |
7154 | cur_uniform_offset = _sg_align_u32(cur_uniform_offset, 16); |
7155 | } |
7156 | SOKOL_ASSERT(ub_desc->size == (size_t)cur_uniform_offset); |
7157 | _SOKOL_UNUSED(cur_uniform_offset); |
7158 | } |
7159 | } |
7160 | |
7161 | /* resolve image locations */ |
7162 | _SG_GL_CHECK_ERROR(); |
7163 | GLuint cur_prog = 0; |
7164 | glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&cur_prog); |
7165 | glUseProgram(gl_prog); |
7166 | int gl_tex_slot = 0; |
7167 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
7168 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs; |
7169 | _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index]; |
7170 | for (int img_index = 0; img_index < shd->cmn.stage[stage_index].num_images; img_index++) { |
7171 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
7172 | SOKOL_ASSERT(img_desc->image_type != _SG_IMAGETYPE_DEFAULT); |
7173 | _sg_gl_shader_image_t* gl_img = &gl_stage->images[img_index]; |
7174 | GLint gl_loc = img_index; |
7175 | if (img_desc->name) { |
7176 | gl_loc = glGetUniformLocation(gl_prog, img_desc->name); |
7177 | } |
7178 | if (gl_loc != -1) { |
7179 | gl_img->gl_tex_slot = gl_tex_slot++; |
7180 | glUniform1i(gl_loc, gl_img->gl_tex_slot); |
7181 | } |
7182 | else { |
7183 | gl_img->gl_tex_slot = -1; |
7184 | } |
7185 | } |
7186 | } |
7187 | /* it's legal to call glUseProgram with 0 */ |
7188 | glUseProgram(cur_prog); |
7189 | _SG_GL_CHECK_ERROR(); |
7190 | return SG_RESOURCESTATE_VALID; |
7191 | } |
7192 | |
7193 | _SOKOL_PRIVATE void _sg_gl_discard_shader(_sg_shader_t* shd) { |
7194 | SOKOL_ASSERT(shd); |
7195 | _SG_GL_CHECK_ERROR(); |
7196 | if (shd->gl.prog) { |
7197 | _sg_gl_cache_invalidate_program(shd->gl.prog); |
7198 | glDeleteProgram(shd->gl.prog); |
7199 | } |
7200 | _SG_GL_CHECK_ERROR(); |
7201 | } |
7202 | |
7203 | _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { |
7204 | SOKOL_ASSERT(pip && shd && desc); |
7205 | SOKOL_ASSERT(!pip->shader && pip->cmn.shader_id.id == SG_INVALID_ID); |
7206 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
7207 | SOKOL_ASSERT(shd->gl.prog); |
7208 | pip->shader = shd; |
7209 | _sg_pipeline_common_init(&pip->cmn, desc); |
7210 | pip->gl.primitive_type = desc->primitive_type; |
7211 | pip->gl.depth = desc->depth; |
7212 | pip->gl.stencil = desc->stencil; |
7213 | // FIXME: blend color and write mask per draw-buffer-attachment (requires GL4) |
7214 | pip->gl.blend = desc->colors[0].blend; |
7215 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7216 | pip->gl.color_write_mask[i] = desc->colors[i].write_mask; |
7217 | } |
7218 | pip->gl.cull_mode = desc->cull_mode; |
7219 | pip->gl.face_winding = desc->face_winding; |
7220 | pip->gl.sample_count = desc->sample_count; |
7221 | pip->gl.alpha_to_coverage_enabled = desc->alpha_to_coverage_enabled; |
7222 | |
7223 | /* resolve vertex attributes */ |
7224 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
7225 | pip->gl.attrs[attr_index].vb_index = -1; |
7226 | } |
7227 | for (int attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) { |
7228 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
7229 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
7230 | break; |
7231 | } |
7232 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
7233 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; |
7234 | const sg_vertex_step step_func = l_desc->step_func; |
7235 | const int step_rate = l_desc->step_rate; |
7236 | GLint attr_loc = attr_index; |
7237 | if (!_sg_strempty(&shd->gl.attrs[attr_index].name)) { |
7238 | attr_loc = glGetAttribLocation(pip->shader->gl.prog, _sg_strptr(&shd->gl.attrs[attr_index].name)); |
7239 | } |
7240 | SOKOL_ASSERT(attr_loc < (GLint)_sg.limits.max_vertex_attrs); |
7241 | if (attr_loc != -1) { |
7242 | _sg_gl_attr_t* gl_attr = &pip->gl.attrs[attr_loc]; |
7243 | SOKOL_ASSERT(gl_attr->vb_index == -1); |
7244 | gl_attr->vb_index = (int8_t) a_desc->buffer_index; |
7245 | if (step_func == SG_VERTEXSTEP_PER_VERTEX) { |
7246 | gl_attr->divisor = 0; |
7247 | } |
7248 | else { |
7249 | gl_attr->divisor = (int8_t) step_rate; |
7250 | pip->cmn.use_instanced_draw = true; |
7251 | } |
7252 | SOKOL_ASSERT(l_desc->stride > 0); |
7253 | gl_attr->stride = (uint8_t) l_desc->stride; |
7254 | gl_attr->offset = a_desc->offset; |
7255 | gl_attr->size = (uint8_t) _sg_gl_vertexformat_size(a_desc->format); |
7256 | gl_attr->type = _sg_gl_vertexformat_type(a_desc->format); |
7257 | gl_attr->normalized = _sg_gl_vertexformat_normalized(a_desc->format); |
7258 | pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; |
7259 | } |
7260 | else { |
7261 | SG_LOG("Vertex attribute not found in shader: "); |
7262 | SG_LOG(_sg_strptr(&shd->gl.attrs[attr_index].name)); |
7263 | } |
7264 | } |
7265 | return SG_RESOURCESTATE_VALID; |
7266 | } |
7267 | |
7268 | _SOKOL_PRIVATE void _sg_gl_discard_pipeline(_sg_pipeline_t* pip) { |
7269 | SOKOL_ASSERT(pip); |
7270 | _sg_gl_cache_invalidate_pipeline(pip); |
7271 | } |
7272 | |
7273 | /* |
7274 | _sg_create_pass |
7275 | |
7276 | att_imgs must point to a _sg_image* att_imgs[SG_MAX_COLOR_ATTACHMENTS+1] array, |
7277 | first entries are the color attachment images (or nullptr), last entry |
7278 | is the depth-stencil image (or nullptr). |
7279 | */ |
7280 | _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { |
7281 | SOKOL_ASSERT(pass && att_images && desc); |
7282 | SOKOL_ASSERT(att_images && att_images[0]); |
7283 | _SG_GL_CHECK_ERROR(); |
7284 | |
7285 | _sg_pass_common_init(&pass->cmn, desc); |
7286 | |
7287 | /* copy image pointers */ |
7288 | const sg_pass_attachment_desc* att_desc; |
7289 | for (int i = 0; i < pass->cmn.num_color_atts; i++) { |
7290 | att_desc = &desc->color_attachments[i]; |
7291 | SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); |
7292 | SOKOL_ASSERT(0 == pass->gl.color_atts[i].image); |
7293 | SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); |
7294 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format)); |
7295 | pass->gl.color_atts[i].image = att_images[i]; |
7296 | } |
7297 | SOKOL_ASSERT(0 == pass->gl.ds_att.image); |
7298 | att_desc = &desc->depth_stencil_attachment; |
7299 | if (att_desc->image.id != SG_INVALID_ID) { |
7300 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
7301 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
7302 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); |
7303 | pass->gl.ds_att.image = att_images[ds_img_index]; |
7304 | } |
7305 | |
7306 | /* store current framebuffer binding (restored at end of function) */ |
7307 | GLuint gl_orig_fb; |
7308 | glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&gl_orig_fb); |
7309 | |
7310 | /* create a framebuffer object */ |
7311 | glGenFramebuffers(1, &pass->gl.fb); |
7312 | glBindFramebuffer(GL_FRAMEBUFFER, pass->gl.fb); |
7313 | |
7314 | /* attach msaa render buffer or textures */ |
7315 | const bool is_msaa = (0 != att_images[0]->gl.msaa_render_buffer); |
7316 | if (is_msaa) { |
7317 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7318 | const _sg_image_t* att_img = pass->gl.color_atts[i].image; |
7319 | if (att_img) { |
7320 | const GLuint gl_render_buffer = att_img->gl.msaa_render_buffer; |
7321 | SOKOL_ASSERT(gl_render_buffer); |
7322 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, (GLenum)(GL_COLOR_ATTACHMENT0+i), GL_RENDERBUFFER, gl_render_buffer); |
7323 | } |
7324 | } |
7325 | } |
7326 | else { |
7327 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7328 | const _sg_image_t* att_img = pass->gl.color_atts[i].image; |
7329 | const int mip_level = pass->cmn.color_atts[i].mip_level; |
7330 | const int slice = pass->cmn.color_atts[i].slice; |
7331 | if (att_img) { |
7332 | const GLuint gl_tex = att_img->gl.tex[0]; |
7333 | SOKOL_ASSERT(gl_tex); |
7334 | const GLenum gl_att = (GLenum)(GL_COLOR_ATTACHMENT0 + i); |
7335 | switch (att_img->cmn.type) { |
7336 | case SG_IMAGETYPE_2D: |
7337 | glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, GL_TEXTURE_2D, gl_tex, mip_level); |
7338 | break; |
7339 | case SG_IMAGETYPE_CUBE: |
7340 | glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, _sg_gl_cubeface_target(slice), gl_tex, mip_level); |
7341 | break; |
7342 | default: |
7343 | /* 3D- or array-texture */ |
7344 | #if !defined(SOKOL_GLES2) |
7345 | if (!_sg.gl.gles2) { |
7346 | glFramebufferTextureLayer(GL_FRAMEBUFFER, gl_att, gl_tex, mip_level, slice); |
7347 | } |
7348 | #endif |
7349 | break; |
7350 | } |
7351 | } |
7352 | } |
7353 | } |
7354 | |
7355 | /* attach depth-stencil buffer to framebuffer */ |
7356 | if (pass->gl.ds_att.image) { |
7357 | const GLuint gl_render_buffer = pass->gl.ds_att.image->gl.depth_render_buffer; |
7358 | SOKOL_ASSERT(gl_render_buffer); |
7359 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer); |
7360 | if (_sg_is_depth_stencil_format(pass->gl.ds_att.image->cmn.pixel_format)) { |
7361 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer); |
7362 | } |
7363 | } |
7364 | |
7365 | /* check if framebuffer is complete */ |
7366 | if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { |
7367 | SG_LOG("Framebuffer completeness check failed!\n"); |
7368 | return SG_RESOURCESTATE_FAILED; |
7369 | } |
7370 | |
7371 | /* setup color attachments for the framebuffer */ |
7372 | #if !defined(SOKOL_GLES2) |
7373 | if (!_sg.gl.gles2) { |
7374 | GLenum att[SG_MAX_COLOR_ATTACHMENTS] = { |
7375 | GL_COLOR_ATTACHMENT0, |
7376 | GL_COLOR_ATTACHMENT1, |
7377 | GL_COLOR_ATTACHMENT2, |
7378 | GL_COLOR_ATTACHMENT3 |
7379 | }; |
7380 | glDrawBuffers(pass->cmn.num_color_atts, att); |
7381 | } |
7382 | #endif |
7383 | |
7384 | /* create MSAA resolve framebuffers if necessary */ |
7385 | if (is_msaa) { |
7386 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7387 | _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[i]; |
7388 | _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; |
7389 | if (gl_att->image) { |
7390 | SOKOL_ASSERT(0 == gl_att->gl_msaa_resolve_buffer); |
7391 | glGenFramebuffers(1, &gl_att->gl_msaa_resolve_buffer); |
7392 | glBindFramebuffer(GL_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer); |
7393 | const GLuint gl_tex = gl_att->image->gl.tex[0]; |
7394 | SOKOL_ASSERT(gl_tex); |
7395 | switch (gl_att->image->cmn.type) { |
7396 | case SG_IMAGETYPE_2D: |
7397 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, |
7398 | GL_TEXTURE_2D, gl_tex, cmn_att->mip_level); |
7399 | break; |
7400 | case SG_IMAGETYPE_CUBE: |
7401 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, |
7402 | _sg_gl_cubeface_target(cmn_att->slice), gl_tex, cmn_att->mip_level); |
7403 | break; |
7404 | default: |
7405 | #if !defined(SOKOL_GLES2) |
7406 | if (!_sg.gl.gles2) { |
7407 | glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, gl_tex, cmn_att->mip_level, cmn_att->slice); |
7408 | } |
7409 | #endif |
7410 | break; |
7411 | } |
7412 | /* check if framebuffer is complete */ |
7413 | if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { |
7414 | SG_LOG("Framebuffer completeness check failed (msaa resolve buffer)!\n"); |
7415 | return SG_RESOURCESTATE_FAILED; |
7416 | } |
7417 | /* setup color attachments for the framebuffer */ |
7418 | #if !defined(SOKOL_GLES2) |
7419 | if (!_sg.gl.gles2) { |
7420 | const GLenum gl_draw_bufs = GL_COLOR_ATTACHMENT0; |
7421 | glDrawBuffers(1, &gl_draw_bufs); |
7422 | } |
7423 | #endif |
7424 | } |
7425 | } |
7426 | } |
7427 | |
7428 | /* restore original framebuffer binding */ |
7429 | glBindFramebuffer(GL_FRAMEBUFFER, gl_orig_fb); |
7430 | _SG_GL_CHECK_ERROR(); |
7431 | return SG_RESOURCESTATE_VALID; |
7432 | } |
7433 | |
7434 | _SOKOL_PRIVATE void _sg_gl_discard_pass(_sg_pass_t* pass) { |
7435 | SOKOL_ASSERT(pass); |
7436 | SOKOL_ASSERT(pass != _sg.gl.cur_pass); |
7437 | _SG_GL_CHECK_ERROR(); |
7438 | if (0 != pass->gl.fb) { |
7439 | glDeleteFramebuffers(1, &pass->gl.fb); |
7440 | } |
7441 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7442 | if (pass->gl.color_atts[i].gl_msaa_resolve_buffer) { |
7443 | glDeleteFramebuffers(1, &pass->gl.color_atts[i].gl_msaa_resolve_buffer); |
7444 | } |
7445 | } |
7446 | if (pass->gl.ds_att.gl_msaa_resolve_buffer) { |
7447 | glDeleteFramebuffers(1, &pass->gl.ds_att.gl_msaa_resolve_buffer); |
7448 | } |
7449 | _SG_GL_CHECK_ERROR(); |
7450 | } |
7451 | |
7452 | _SOKOL_PRIVATE _sg_image_t* _sg_gl_pass_color_image(const _sg_pass_t* pass, int index) { |
7453 | SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); |
7454 | /* NOTE: may return null */ |
7455 | return pass->gl.color_atts[index].image; |
7456 | } |
7457 | |
7458 | _SOKOL_PRIVATE _sg_image_t* _sg_gl_pass_ds_image(const _sg_pass_t* pass) { |
7459 | /* NOTE: may return null */ |
7460 | SOKOL_ASSERT(pass); |
7461 | return pass->gl.ds_att.image; |
7462 | } |
7463 | |
7464 | _SOKOL_PRIVATE void _sg_gl_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { |
7465 | /* FIXME: what if a texture used as render target is still bound, should we |
7466 | unbind all currently bound textures in begin pass? */ |
7467 | SOKOL_ASSERT(action); |
7468 | SOKOL_ASSERT(!_sg.gl.in_pass); |
7469 | _SG_GL_CHECK_ERROR(); |
7470 | _sg.gl.in_pass = true; |
7471 | _sg.gl.cur_pass = pass; /* can be 0 */ |
7472 | if (pass) { |
7473 | _sg.gl.cur_pass_id.id = pass->slot.id; |
7474 | } |
7475 | else { |
7476 | _sg.gl.cur_pass_id.id = SG_INVALID_ID; |
7477 | } |
7478 | _sg.gl.cur_pass_width = w; |
7479 | _sg.gl.cur_pass_height = h; |
7480 | |
7481 | /* number of color attachments */ |
7482 | const int num_color_atts = pass ? pass->cmn.num_color_atts : 1; |
7483 | |
7484 | /* bind the render pass framebuffer */ |
7485 | if (pass) { |
7486 | /* offscreen pass */ |
7487 | SOKOL_ASSERT(pass->gl.fb); |
7488 | glBindFramebuffer(GL_FRAMEBUFFER, pass->gl.fb); |
7489 | } |
7490 | else { |
7491 | /* default pass */ |
7492 | SOKOL_ASSERT(_sg.gl.cur_context); |
7493 | glBindFramebuffer(GL_FRAMEBUFFER, _sg.gl.cur_context->default_framebuffer); |
7494 | } |
7495 | glViewport(0, 0, w, h); |
7496 | glScissor(0, 0, w, h); |
7497 | |
7498 | /* clear color and depth-stencil attachments if needed */ |
7499 | bool clear_color = false; |
7500 | for (int i = 0; i < num_color_atts; i++) { |
7501 | if (SG_ACTION_CLEAR == action->colors[i].action) { |
7502 | clear_color = true; |
7503 | break; |
7504 | } |
7505 | } |
7506 | const bool clear_depth = (action->depth.action == SG_ACTION_CLEAR); |
7507 | const bool clear_stencil = (action->stencil.action == SG_ACTION_CLEAR); |
7508 | |
7509 | bool need_pip_cache_flush = false; |
7510 | if (clear_color) { |
7511 | bool need_color_mask_flush = false; |
7512 | // NOTE: not a bug to iterate over all possible color attachments |
7513 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7514 | if (SG_COLORMASK_RGBA != _sg.gl.cache.color_write_mask[i]) { |
7515 | need_pip_cache_flush = true; |
7516 | need_color_mask_flush = true; |
7517 | _sg.gl.cache.color_write_mask[i] = SG_COLORMASK_RGBA; |
7518 | } |
7519 | } |
7520 | if (need_color_mask_flush) { |
7521 | glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); |
7522 | } |
7523 | } |
7524 | if (clear_depth) { |
7525 | if (!_sg.gl.cache.depth.write_enabled) { |
7526 | need_pip_cache_flush = true; |
7527 | _sg.gl.cache.depth.write_enabled = true; |
7528 | glDepthMask(GL_TRUE); |
7529 | } |
7530 | if (_sg.gl.cache.depth.compare != SG_COMPAREFUNC_ALWAYS) { |
7531 | need_pip_cache_flush = true; |
7532 | _sg.gl.cache.depth.compare = SG_COMPAREFUNC_ALWAYS; |
7533 | glDepthFunc(GL_ALWAYS); |
7534 | } |
7535 | } |
7536 | if (clear_stencil) { |
7537 | if (_sg.gl.cache.stencil.write_mask != 0xFF) { |
7538 | need_pip_cache_flush = true; |
7539 | _sg.gl.cache.stencil.write_mask = 0xFF; |
7540 | glStencilMask(0xFF); |
7541 | } |
7542 | } |
7543 | if (need_pip_cache_flush) { |
7544 | /* we messed with the state cache directly, need to clear cached |
7545 | pipeline to force re-evaluation in next sg_apply_pipeline() */ |
7546 | _sg.gl.cache.cur_pipeline = 0; |
7547 | _sg.gl.cache.cur_pipeline_id.id = SG_INVALID_ID; |
7548 | } |
7549 | bool use_mrt_clear = (0 != pass); |
7550 | #if defined(SOKOL_GLES2) |
7551 | use_mrt_clear = false; |
7552 | #else |
7553 | if (_sg.gl.gles2) { |
7554 | use_mrt_clear = false; |
7555 | } |
7556 | #endif |
7557 | if (!use_mrt_clear) { |
7558 | GLbitfield clear_mask = 0; |
7559 | if (clear_color) { |
7560 | clear_mask |= GL_COLOR_BUFFER_BIT; |
7561 | const sg_color c = action->colors[0].value; |
7562 | glClearColor(c.r, c.g, c.b, c.a); |
7563 | } |
7564 | if (clear_depth) { |
7565 | clear_mask |= GL_DEPTH_BUFFER_BIT; |
7566 | #ifdef SOKOL_GLCORE33 |
7567 | glClearDepth(action->depth.value); |
7568 | #else |
7569 | glClearDepthf(action->depth.value); |
7570 | #endif |
7571 | } |
7572 | if (clear_stencil) { |
7573 | clear_mask |= GL_STENCIL_BUFFER_BIT; |
7574 | glClearStencil(action->stencil.value); |
7575 | } |
7576 | if (0 != clear_mask) { |
7577 | glClear(clear_mask); |
7578 | } |
7579 | } |
7580 | #if !defined SOKOL_GLES2 |
7581 | else { |
7582 | SOKOL_ASSERT(pass); |
7583 | for (int i = 0; i < num_color_atts; i++) { |
7584 | if (action->colors[i].action == SG_ACTION_CLEAR) { |
7585 | glClearBufferfv(GL_COLOR, i, &action->colors[i].value.r); |
7586 | } |
7587 | } |
7588 | if (pass->gl.ds_att.image) { |
7589 | if (clear_depth && clear_stencil) { |
7590 | glClearBufferfi(GL_DEPTH_STENCIL, 0, action->depth.value, action->stencil.value); |
7591 | } |
7592 | else if (clear_depth) { |
7593 | glClearBufferfv(GL_DEPTH, 0, &action->depth.value); |
7594 | } |
7595 | else if (clear_stencil) { |
7596 | GLint val = (GLint) action->stencil.value; |
7597 | glClearBufferiv(GL_STENCIL, 0, &val); |
7598 | } |
7599 | } |
7600 | } |
7601 | #endif |
7602 | _SG_GL_CHECK_ERROR(); |
7603 | } |
7604 | |
7605 | _SOKOL_PRIVATE void _sg_gl_end_pass(void) { |
7606 | SOKOL_ASSERT(_sg.gl.in_pass); |
7607 | _SG_GL_CHECK_ERROR(); |
7608 | |
7609 | /* if this was an offscreen pass, and MSAA rendering was used, need |
7610 | to resolve into the pass images */ |
7611 | #if !defined(SOKOL_GLES2) |
7612 | if (!_sg.gl.gles2 && _sg.gl.cur_pass) { |
7613 | /* check if the pass object is still valid */ |
7614 | const _sg_pass_t* pass = _sg.gl.cur_pass; |
7615 | SOKOL_ASSERT(pass->slot.id == _sg.gl.cur_pass_id.id); |
7616 | bool is_msaa = (0 != _sg.gl.cur_pass->gl.color_atts[0].gl_msaa_resolve_buffer); |
7617 | if (is_msaa) { |
7618 | SOKOL_ASSERT(pass->gl.fb); |
7619 | glBindFramebuffer(GL_READ_FRAMEBUFFER, pass->gl.fb); |
7620 | SOKOL_ASSERT(pass->gl.color_atts[0].image); |
7621 | const int w = pass->gl.color_atts[0].image->cmn.width; |
7622 | const int h = pass->gl.color_atts[0].image->cmn.height; |
7623 | for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { |
7624 | const _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[att_index]; |
7625 | if (gl_att->image) { |
7626 | SOKOL_ASSERT(gl_att->gl_msaa_resolve_buffer); |
7627 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer); |
7628 | glReadBuffer((GLenum)(GL_COLOR_ATTACHMENT0 + att_index)); |
7629 | glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); |
7630 | } |
7631 | else { |
7632 | break; |
7633 | } |
7634 | } |
7635 | } |
7636 | } |
7637 | #endif |
7638 | _sg.gl.cur_pass = 0; |
7639 | _sg.gl.cur_pass_id.id = SG_INVALID_ID; |
7640 | _sg.gl.cur_pass_width = 0; |
7641 | _sg.gl.cur_pass_height = 0; |
7642 | |
7643 | SOKOL_ASSERT(_sg.gl.cur_context); |
7644 | glBindFramebuffer(GL_FRAMEBUFFER, _sg.gl.cur_context->default_framebuffer); |
7645 | _sg.gl.in_pass = false; |
7646 | _SG_GL_CHECK_ERROR(); |
7647 | } |
7648 | |
7649 | _SOKOL_PRIVATE void _sg_gl_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
7650 | SOKOL_ASSERT(_sg.gl.in_pass); |
7651 | y = origin_top_left ? (_sg.gl.cur_pass_height - (y+h)) : y; |
7652 | glViewport(x, y, w, h); |
7653 | } |
7654 | |
7655 | _SOKOL_PRIVATE void _sg_gl_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
7656 | SOKOL_ASSERT(_sg.gl.in_pass); |
7657 | y = origin_top_left ? (_sg.gl.cur_pass_height - (y+h)) : y; |
7658 | glScissor(x, y, w, h); |
7659 | } |
7660 | |
7661 | _SOKOL_PRIVATE void _sg_gl_apply_pipeline(_sg_pipeline_t* pip) { |
7662 | SOKOL_ASSERT(pip); |
7663 | SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); |
7664 | _SG_GL_CHECK_ERROR(); |
7665 | if ((_sg.gl.cache.cur_pipeline != pip) || (_sg.gl.cache.cur_pipeline_id.id != pip->slot.id)) { |
7666 | _sg.gl.cache.cur_pipeline = pip; |
7667 | _sg.gl.cache.cur_pipeline_id.id = pip->slot.id; |
7668 | _sg.gl.cache.cur_primitive_type = _sg_gl_primitive_type(pip->gl.primitive_type); |
7669 | _sg.gl.cache.cur_index_type = _sg_gl_index_type(pip->cmn.index_type); |
7670 | |
7671 | /* update depth state */ |
7672 | { |
7673 | const sg_depth_state* state_ds = &pip->gl.depth; |
7674 | sg_depth_state* cache_ds = &_sg.gl.cache.depth; |
7675 | if (state_ds->compare != cache_ds->compare) { |
7676 | cache_ds->compare = state_ds->compare; |
7677 | glDepthFunc(_sg_gl_compare_func(state_ds->compare)); |
7678 | } |
7679 | if (state_ds->write_enabled != cache_ds->write_enabled) { |
7680 | cache_ds->write_enabled = state_ds->write_enabled; |
7681 | glDepthMask(state_ds->write_enabled); |
7682 | } |
7683 | if (!_sg_fequal(state_ds->bias, cache_ds->bias, 0.000001f) || |
7684 | !_sg_fequal(state_ds->bias_slope_scale, cache_ds->bias_slope_scale, 0.000001f)) |
7685 | { |
7686 | /* according to ANGLE's D3D11 backend: |
7687 | D3D11 SlopeScaledDepthBias ==> GL polygonOffsetFactor |
7688 | D3D11 DepthBias ==> GL polygonOffsetUnits |
7689 | DepthBiasClamp has no meaning on GL |
7690 | */ |
7691 | cache_ds->bias = state_ds->bias; |
7692 | cache_ds->bias_slope_scale = state_ds->bias_slope_scale; |
7693 | glPolygonOffset(state_ds->bias_slope_scale, state_ds->bias); |
7694 | bool po_enabled = true; |
7695 | if (_sg_fequal(state_ds->bias, 0.0f, 0.000001f) && |
7696 | _sg_fequal(state_ds->bias_slope_scale, 0.0f, 0.000001f)) |
7697 | { |
7698 | po_enabled = false; |
7699 | } |
7700 | if (po_enabled != _sg.gl.cache.polygon_offset_enabled) { |
7701 | _sg.gl.cache.polygon_offset_enabled = po_enabled; |
7702 | if (po_enabled) { |
7703 | glEnable(GL_POLYGON_OFFSET_FILL); |
7704 | } |
7705 | else { |
7706 | glDisable(GL_POLYGON_OFFSET_FILL); |
7707 | } |
7708 | } |
7709 | } |
7710 | } |
7711 | |
7712 | /* update stencil state */ |
7713 | { |
7714 | const sg_stencil_state* state_ss = &pip->gl.stencil; |
7715 | sg_stencil_state* cache_ss = &_sg.gl.cache.stencil; |
7716 | if (state_ss->enabled != cache_ss->enabled) { |
7717 | cache_ss->enabled = state_ss->enabled; |
7718 | if (state_ss->enabled) { |
7719 | glEnable(GL_STENCIL_TEST); |
7720 | } |
7721 | else { |
7722 | glDisable(GL_STENCIL_TEST); |
7723 | } |
7724 | } |
7725 | if (state_ss->write_mask != cache_ss->write_mask) { |
7726 | cache_ss->write_mask = state_ss->write_mask; |
7727 | glStencilMask(state_ss->write_mask); |
7728 | } |
7729 | for (int i = 0; i < 2; i++) { |
7730 | const sg_stencil_face_state* state_sfs = (i==0)? &state_ss->front : &state_ss->back; |
7731 | sg_stencil_face_state* cache_sfs = (i==0)? &cache_ss->front : &cache_ss->back; |
7732 | GLenum gl_face = (i==0)? GL_FRONT : GL_BACK; |
7733 | if ((state_sfs->compare != cache_sfs->compare) || |
7734 | (state_ss->read_mask != cache_ss->read_mask) || |
7735 | (state_ss->ref != cache_ss->ref)) |
7736 | { |
7737 | cache_sfs->compare = state_sfs->compare; |
7738 | glStencilFuncSeparate(gl_face, |
7739 | _sg_gl_compare_func(state_sfs->compare), |
7740 | state_ss->ref, |
7741 | state_ss->read_mask); |
7742 | } |
7743 | if ((state_sfs->fail_op != cache_sfs->fail_op) || |
7744 | (state_sfs->depth_fail_op != cache_sfs->depth_fail_op) || |
7745 | (state_sfs->pass_op != cache_sfs->pass_op)) |
7746 | { |
7747 | cache_sfs->fail_op = state_sfs->fail_op; |
7748 | cache_sfs->depth_fail_op = state_sfs->depth_fail_op; |
7749 | cache_sfs->pass_op = state_sfs->pass_op; |
7750 | glStencilOpSeparate(gl_face, |
7751 | _sg_gl_stencil_op(state_sfs->fail_op), |
7752 | _sg_gl_stencil_op(state_sfs->depth_fail_op), |
7753 | _sg_gl_stencil_op(state_sfs->pass_op)); |
7754 | } |
7755 | } |
7756 | cache_ss->read_mask = state_ss->read_mask; |
7757 | cache_ss->ref = state_ss->ref; |
7758 | } |
7759 | |
7760 | /* update blend state |
7761 | FIXME: separate blend state per color attachment not support, needs GL4 |
7762 | */ |
7763 | { |
7764 | const sg_blend_state* state_bs = &pip->gl.blend; |
7765 | sg_blend_state* cache_bs = &_sg.gl.cache.blend; |
7766 | if (state_bs->enabled != cache_bs->enabled) { |
7767 | cache_bs->enabled = state_bs->enabled; |
7768 | if (state_bs->enabled) { |
7769 | glEnable(GL_BLEND); |
7770 | } |
7771 | else { |
7772 | glDisable(GL_BLEND); |
7773 | } |
7774 | } |
7775 | if ((state_bs->src_factor_rgb != cache_bs->src_factor_rgb) || |
7776 | (state_bs->dst_factor_rgb != cache_bs->dst_factor_rgb) || |
7777 | (state_bs->src_factor_alpha != cache_bs->src_factor_alpha) || |
7778 | (state_bs->dst_factor_alpha != cache_bs->dst_factor_alpha)) |
7779 | { |
7780 | cache_bs->src_factor_rgb = state_bs->src_factor_rgb; |
7781 | cache_bs->dst_factor_rgb = state_bs->dst_factor_rgb; |
7782 | cache_bs->src_factor_alpha = state_bs->src_factor_alpha; |
7783 | cache_bs->dst_factor_alpha = state_bs->dst_factor_alpha; |
7784 | glBlendFuncSeparate(_sg_gl_blend_factor(state_bs->src_factor_rgb), |
7785 | _sg_gl_blend_factor(state_bs->dst_factor_rgb), |
7786 | _sg_gl_blend_factor(state_bs->src_factor_alpha), |
7787 | _sg_gl_blend_factor(state_bs->dst_factor_alpha)); |
7788 | } |
7789 | if ((state_bs->op_rgb != cache_bs->op_rgb) || (state_bs->op_alpha != cache_bs->op_alpha)) { |
7790 | cache_bs->op_rgb = state_bs->op_rgb; |
7791 | cache_bs->op_alpha = state_bs->op_alpha; |
7792 | glBlendEquationSeparate(_sg_gl_blend_op(state_bs->op_rgb), _sg_gl_blend_op(state_bs->op_alpha)); |
7793 | } |
7794 | } |
7795 | |
7796 | /* standalone state */ |
7797 | for (GLuint i = 0; i < (GLuint)pip->cmn.color_attachment_count; i++) { |
7798 | if (pip->gl.color_write_mask[i] != _sg.gl.cache.color_write_mask[i]) { |
7799 | const sg_color_mask cm = pip->gl.color_write_mask[i]; |
7800 | _sg.gl.cache.color_write_mask[i] = cm; |
7801 | #ifdef SOKOL_GLCORE33 |
7802 | glColorMaski(i, |
7803 | (cm & SG_COLORMASK_R) != 0, |
7804 | (cm & SG_COLORMASK_G) != 0, |
7805 | (cm & SG_COLORMASK_B) != 0, |
7806 | (cm & SG_COLORMASK_A) != 0); |
7807 | #else |
7808 | if (0 == i) { |
7809 | glColorMask((cm & SG_COLORMASK_R) != 0, |
7810 | (cm & SG_COLORMASK_G) != 0, |
7811 | (cm & SG_COLORMASK_B) != 0, |
7812 | (cm & SG_COLORMASK_A) != 0); |
7813 | } |
7814 | #endif |
7815 | } |
7816 | } |
7817 | |
7818 | if (!_sg_fequal(pip->cmn.blend_color.r, _sg.gl.cache.blend_color.r, 0.0001f) || |
7819 | !_sg_fequal(pip->cmn.blend_color.g, _sg.gl.cache.blend_color.g, 0.0001f) || |
7820 | !_sg_fequal(pip->cmn.blend_color.b, _sg.gl.cache.blend_color.b, 0.0001f) || |
7821 | !_sg_fequal(pip->cmn.blend_color.a, _sg.gl.cache.blend_color.a, 0.0001f)) |
7822 | { |
7823 | sg_color c = pip->cmn.blend_color; |
7824 | _sg.gl.cache.blend_color = c; |
7825 | glBlendColor(c.r, c.g, c.b, c.a); |
7826 | } |
7827 | if (pip->gl.cull_mode != _sg.gl.cache.cull_mode) { |
7828 | _sg.gl.cache.cull_mode = pip->gl.cull_mode; |
7829 | if (SG_CULLMODE_NONE == pip->gl.cull_mode) { |
7830 | glDisable(GL_CULL_FACE); |
7831 | } |
7832 | else { |
7833 | glEnable(GL_CULL_FACE); |
7834 | GLenum gl_mode = (SG_CULLMODE_FRONT == pip->gl.cull_mode) ? GL_FRONT : GL_BACK; |
7835 | glCullFace(gl_mode); |
7836 | } |
7837 | } |
7838 | if (pip->gl.face_winding != _sg.gl.cache.face_winding) { |
7839 | _sg.gl.cache.face_winding = pip->gl.face_winding; |
7840 | GLenum gl_winding = (SG_FACEWINDING_CW == pip->gl.face_winding) ? GL_CW : GL_CCW; |
7841 | glFrontFace(gl_winding); |
7842 | } |
7843 | if (pip->gl.alpha_to_coverage_enabled != _sg.gl.cache.alpha_to_coverage_enabled) { |
7844 | _sg.gl.cache.alpha_to_coverage_enabled = pip->gl.alpha_to_coverage_enabled; |
7845 | if (pip->gl.alpha_to_coverage_enabled) { |
7846 | glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE); |
7847 | } |
7848 | else { |
7849 | glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); |
7850 | } |
7851 | } |
7852 | #ifdef SOKOL_GLCORE33 |
7853 | if (pip->gl.sample_count != _sg.gl.cache.sample_count) { |
7854 | _sg.gl.cache.sample_count = pip->gl.sample_count; |
7855 | if (pip->gl.sample_count > 1) { |
7856 | glEnable(GL_MULTISAMPLE); |
7857 | } |
7858 | else { |
7859 | glDisable(GL_MULTISAMPLE); |
7860 | } |
7861 | } |
7862 | #endif |
7863 | |
7864 | /* bind shader program */ |
7865 | if (pip->shader->gl.prog != _sg.gl.cache.prog) { |
7866 | _sg.gl.cache.prog = pip->shader->gl.prog; |
7867 | glUseProgram(pip->shader->gl.prog); |
7868 | } |
7869 | } |
7870 | _SG_GL_CHECK_ERROR(); |
7871 | } |
7872 | |
7873 | _SOKOL_PRIVATE void _sg_gl_apply_bindings( |
7874 | _sg_pipeline_t* pip, |
7875 | _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, |
7876 | _sg_buffer_t* ib, int ib_offset, |
7877 | _sg_image_t** vs_imgs, int num_vs_imgs, |
7878 | _sg_image_t** fs_imgs, int num_fs_imgs) |
7879 | { |
7880 | SOKOL_ASSERT(pip); |
7881 | _SOKOL_UNUSED(num_fs_imgs); |
7882 | _SOKOL_UNUSED(num_vs_imgs); |
7883 | _SOKOL_UNUSED(num_vbs); |
7884 | _SG_GL_CHECK_ERROR(); |
7885 | |
7886 | /* bind textures */ |
7887 | _SG_GL_CHECK_ERROR(); |
7888 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
7889 | const _sg_shader_stage_t* stage = &pip->shader->cmn.stage[stage_index]; |
7890 | const _sg_gl_shader_stage_t* gl_stage = &pip->shader->gl.stage[stage_index]; |
7891 | _sg_image_t** imgs = (stage_index == SG_SHADERSTAGE_VS)? vs_imgs : fs_imgs; |
7892 | SOKOL_ASSERT(((stage_index == SG_SHADERSTAGE_VS)? num_vs_imgs : num_fs_imgs) == stage->num_images); |
7893 | for (int img_index = 0; img_index < stage->num_images; img_index++) { |
7894 | const _sg_gl_shader_image_t* gl_shd_img = &gl_stage->images[img_index]; |
7895 | if (gl_shd_img->gl_tex_slot != -1) { |
7896 | _sg_image_t* img = imgs[img_index]; |
7897 | const GLuint gl_tex = img->gl.tex[img->cmn.active_slot]; |
7898 | SOKOL_ASSERT(img && img->gl.target); |
7899 | SOKOL_ASSERT((gl_shd_img->gl_tex_slot != -1) && gl_tex); |
7900 | _sg_gl_cache_bind_texture(gl_shd_img->gl_tex_slot, img->gl.target, gl_tex); |
7901 | } |
7902 | } |
7903 | } |
7904 | _SG_GL_CHECK_ERROR(); |
7905 | |
7906 | /* index buffer (can be 0) */ |
7907 | const GLuint gl_ib = ib ? ib->gl.buf[ib->cmn.active_slot] : 0; |
7908 | _sg_gl_cache_bind_buffer(GL_ELEMENT_ARRAY_BUFFER, gl_ib); |
7909 | _sg.gl.cache.cur_ib_offset = ib_offset; |
7910 | |
7911 | /* vertex attributes */ |
7912 | for (GLuint attr_index = 0; attr_index < (GLuint)_sg.limits.max_vertex_attrs; attr_index++) { |
7913 | _sg_gl_attr_t* attr = &pip->gl.attrs[attr_index]; |
7914 | _sg_gl_cache_attr_t* cache_attr = &_sg.gl.cache.attrs[attr_index]; |
7915 | bool cache_attr_dirty = false; |
7916 | int vb_offset = 0; |
7917 | GLuint gl_vb = 0; |
7918 | if (attr->vb_index >= 0) { |
7919 | /* attribute is enabled */ |
7920 | SOKOL_ASSERT(attr->vb_index < num_vbs); |
7921 | _sg_buffer_t* vb = vbs[attr->vb_index]; |
7922 | SOKOL_ASSERT(vb); |
7923 | gl_vb = vb->gl.buf[vb->cmn.active_slot]; |
7924 | vb_offset = vb_offsets[attr->vb_index] + attr->offset; |
7925 | if ((gl_vb != cache_attr->gl_vbuf) || |
7926 | (attr->size != cache_attr->gl_attr.size) || |
7927 | (attr->type != cache_attr->gl_attr.type) || |
7928 | (attr->normalized != cache_attr->gl_attr.normalized) || |
7929 | (attr->stride != cache_attr->gl_attr.stride) || |
7930 | (vb_offset != cache_attr->gl_attr.offset) || |
7931 | (cache_attr->gl_attr.divisor != attr->divisor)) |
7932 | { |
7933 | _sg_gl_cache_bind_buffer(GL_ARRAY_BUFFER, gl_vb); |
7934 | glVertexAttribPointer(attr_index, attr->size, attr->type, |
7935 | attr->normalized, attr->stride, |
7936 | (const GLvoid*)(GLintptr)vb_offset); |
7937 | #if defined(_SOKOL_GL_INSTANCING_ENABLED) |
7938 | if (_sg.features.instancing) { |
7939 | glVertexAttribDivisor(attr_index, (GLuint)attr->divisor); |
7940 | } |
7941 | #endif |
7942 | cache_attr_dirty = true; |
7943 | } |
7944 | if (cache_attr->gl_attr.vb_index == -1) { |
7945 | glEnableVertexAttribArray(attr_index); |
7946 | cache_attr_dirty = true; |
7947 | } |
7948 | } |
7949 | else { |
7950 | /* attribute is disabled */ |
7951 | if (cache_attr->gl_attr.vb_index != -1) { |
7952 | glDisableVertexAttribArray(attr_index); |
7953 | cache_attr_dirty = true; |
7954 | } |
7955 | } |
7956 | if (cache_attr_dirty) { |
7957 | cache_attr->gl_attr = *attr; |
7958 | cache_attr->gl_attr.offset = vb_offset; |
7959 | cache_attr->gl_vbuf = gl_vb; |
7960 | } |
7961 | } |
7962 | _SG_GL_CHECK_ERROR(); |
7963 | } |
7964 | |
7965 | _SOKOL_PRIVATE void _sg_gl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
7966 | SOKOL_ASSERT(_sg.gl.cache.cur_pipeline); |
7967 | SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->slot.id == _sg.gl.cache.cur_pipeline_id.id); |
7968 | SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->slot.id == _sg.gl.cache.cur_pipeline->cmn.shader_id.id); |
7969 | SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks > ub_index); |
7970 | SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size == data->size); |
7971 | const _sg_gl_shader_stage_t* gl_stage = &_sg.gl.cache.cur_pipeline->shader->gl.stage[stage_index]; |
7972 | const _sg_gl_uniform_block_t* gl_ub = &gl_stage->uniform_blocks[ub_index]; |
7973 | for (int u_index = 0; u_index < gl_ub->num_uniforms; u_index++) { |
7974 | const _sg_gl_uniform_t* u = &gl_ub->uniforms[u_index]; |
7975 | SOKOL_ASSERT(u->type != SG_UNIFORMTYPE_INVALID); |
7976 | if (u->gl_loc == -1) { |
7977 | continue; |
7978 | } |
7979 | GLfloat* fptr = (GLfloat*) (((uint8_t*)data->ptr) + u->offset); |
7980 | GLint* iptr = (GLint*) (((uint8_t*)data->ptr) + u->offset); |
7981 | switch (u->type) { |
7982 | case SG_UNIFORMTYPE_INVALID: |
7983 | break; |
7984 | case SG_UNIFORMTYPE_FLOAT: |
7985 | glUniform1fv(u->gl_loc, u->count, fptr); |
7986 | break; |
7987 | case SG_UNIFORMTYPE_FLOAT2: |
7988 | glUniform2fv(u->gl_loc, u->count, fptr); |
7989 | break; |
7990 | case SG_UNIFORMTYPE_FLOAT3: |
7991 | glUniform3fv(u->gl_loc, u->count, fptr); |
7992 | break; |
7993 | case SG_UNIFORMTYPE_FLOAT4: |
7994 | glUniform4fv(u->gl_loc, u->count, fptr); |
7995 | break; |
7996 | case SG_UNIFORMTYPE_INT: |
7997 | glUniform1iv(u->gl_loc, u->count, iptr); |
7998 | break; |
7999 | case SG_UNIFORMTYPE_INT2: |
8000 | glUniform2iv(u->gl_loc, u->count, iptr); |
8001 | break; |
8002 | case SG_UNIFORMTYPE_INT3: |
8003 | glUniform3iv(u->gl_loc, u->count, iptr); |
8004 | break; |
8005 | case SG_UNIFORMTYPE_INT4: |
8006 | glUniform4iv(u->gl_loc, u->count, iptr); |
8007 | break; |
8008 | case SG_UNIFORMTYPE_MAT4: |
8009 | glUniformMatrix4fv(u->gl_loc, u->count, GL_FALSE, fptr); |
8010 | break; |
8011 | default: |
8012 | SOKOL_UNREACHABLE; |
8013 | break; |
8014 | } |
8015 | } |
8016 | } |
8017 | |
8018 | _SOKOL_PRIVATE void _sg_gl_draw(int base_element, int num_elements, int num_instances) { |
8019 | SOKOL_ASSERT(_sg.gl.cache.cur_pipeline); |
8020 | const GLenum i_type = _sg.gl.cache.cur_index_type; |
8021 | const GLenum p_type = _sg.gl.cache.cur_primitive_type; |
8022 | if (0 != i_type) { |
8023 | /* indexed rendering */ |
8024 | const int i_size = (i_type == GL_UNSIGNED_SHORT) ? 2 : 4; |
8025 | const int ib_offset = _sg.gl.cache.cur_ib_offset; |
8026 | const GLvoid* indices = (const GLvoid*)(GLintptr)(base_element*i_size+ib_offset); |
8027 | if (_sg.gl.cache.cur_pipeline->cmn.use_instanced_draw) { |
8028 | if (_sg.features.instancing) { |
8029 | glDrawElementsInstanced(p_type, num_elements, i_type, indices, num_instances); |
8030 | } |
8031 | } |
8032 | else { |
8033 | glDrawElements(p_type, num_elements, i_type, indices); |
8034 | } |
8035 | } |
8036 | else { |
8037 | /* non-indexed rendering */ |
8038 | if (_sg.gl.cache.cur_pipeline->cmn.use_instanced_draw) { |
8039 | if (_sg.features.instancing) { |
8040 | glDrawArraysInstanced(p_type, base_element, num_elements, num_instances); |
8041 | } |
8042 | } |
8043 | else { |
8044 | glDrawArrays(p_type, base_element, num_elements); |
8045 | } |
8046 | } |
8047 | } |
8048 | |
8049 | _SOKOL_PRIVATE void _sg_gl_commit(void) { |
8050 | SOKOL_ASSERT(!_sg.gl.in_pass); |
8051 | /* "soft" clear bindings (only those that are actually bound) */ |
8052 | _sg_gl_cache_clear_buffer_bindings(false); |
8053 | _sg_gl_cache_clear_texture_bindings(false); |
8054 | } |
8055 | |
8056 | _SOKOL_PRIVATE void _sg_gl_update_buffer(_sg_buffer_t* buf, const sg_range* data) { |
8057 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
8058 | /* only one update per buffer per frame allowed */ |
8059 | if (++buf->cmn.active_slot >= buf->cmn.num_slots) { |
8060 | buf->cmn.active_slot = 0; |
8061 | } |
8062 | GLenum gl_tgt = _sg_gl_buffer_target(buf->cmn.type); |
8063 | SOKOL_ASSERT(buf->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES); |
8064 | GLuint gl_buf = buf->gl.buf[buf->cmn.active_slot]; |
8065 | SOKOL_ASSERT(gl_buf); |
8066 | _SG_GL_CHECK_ERROR(); |
8067 | _sg_gl_cache_store_buffer_binding(gl_tgt); |
8068 | _sg_gl_cache_bind_buffer(gl_tgt, gl_buf); |
8069 | glBufferSubData(gl_tgt, 0, (GLsizeiptr)data->size, data->ptr); |
8070 | _sg_gl_cache_restore_buffer_binding(gl_tgt); |
8071 | _SG_GL_CHECK_ERROR(); |
8072 | } |
8073 | |
8074 | _SOKOL_PRIVATE int _sg_gl_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { |
8075 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
8076 | if (new_frame) { |
8077 | if (++buf->cmn.active_slot >= buf->cmn.num_slots) { |
8078 | buf->cmn.active_slot = 0; |
8079 | } |
8080 | } |
8081 | GLenum gl_tgt = _sg_gl_buffer_target(buf->cmn.type); |
8082 | SOKOL_ASSERT(buf->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES); |
8083 | GLuint gl_buf = buf->gl.buf[buf->cmn.active_slot]; |
8084 | SOKOL_ASSERT(gl_buf); |
8085 | _SG_GL_CHECK_ERROR(); |
8086 | _sg_gl_cache_store_buffer_binding(gl_tgt); |
8087 | _sg_gl_cache_bind_buffer(gl_tgt, gl_buf); |
8088 | glBufferSubData(gl_tgt, buf->cmn.append_pos, (GLsizeiptr)data->size, data->ptr); |
8089 | _sg_gl_cache_restore_buffer_binding(gl_tgt); |
8090 | _SG_GL_CHECK_ERROR(); |
8091 | /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ |
8092 | return _sg_roundup((int)data->size, 4); |
8093 | } |
8094 | |
8095 | _SOKOL_PRIVATE void _sg_gl_update_image(_sg_image_t* img, const sg_image_data* data) { |
8096 | SOKOL_ASSERT(img && data); |
8097 | /* only one update per image per frame allowed */ |
8098 | if (++img->cmn.active_slot >= img->cmn.num_slots) { |
8099 | img->cmn.active_slot = 0; |
8100 | } |
8101 | SOKOL_ASSERT(img->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES); |
8102 | SOKOL_ASSERT(0 != img->gl.tex[img->cmn.active_slot]); |
8103 | _sg_gl_cache_store_texture_binding(0); |
8104 | _sg_gl_cache_bind_texture(0, img->gl.target, img->gl.tex[img->cmn.active_slot]); |
8105 | const GLenum gl_img_format = _sg_gl_teximage_format(img->cmn.pixel_format); |
8106 | const GLenum gl_img_type = _sg_gl_teximage_type(img->cmn.pixel_format); |
8107 | const int num_faces = img->cmn.type == SG_IMAGETYPE_CUBE ? 6 : 1; |
8108 | const int num_mips = img->cmn.num_mipmaps; |
8109 | for (int face_index = 0; face_index < num_faces; face_index++) { |
8110 | for (int mip_index = 0; mip_index < num_mips; mip_index++) { |
8111 | GLenum gl_img_target = img->gl.target; |
8112 | if (SG_IMAGETYPE_CUBE == img->cmn.type) { |
8113 | gl_img_target = _sg_gl_cubeface_target(face_index); |
8114 | } |
8115 | const GLvoid* data_ptr = data->subimage[face_index][mip_index].ptr; |
8116 | int mip_width = img->cmn.width >> mip_index; |
8117 | if (mip_width == 0) { |
8118 | mip_width = 1; |
8119 | } |
8120 | int mip_height = img->cmn.height >> mip_index; |
8121 | if (mip_height == 0) { |
8122 | mip_height = 1; |
8123 | } |
8124 | if ((SG_IMAGETYPE_2D == img->cmn.type) || (SG_IMAGETYPE_CUBE == img->cmn.type)) { |
8125 | glTexSubImage2D(gl_img_target, mip_index, |
8126 | 0, 0, |
8127 | mip_width, mip_height, |
8128 | gl_img_format, gl_img_type, |
8129 | data_ptr); |
8130 | } |
8131 | #if !defined(SOKOL_GLES2) |
8132 | else if (!_sg.gl.gles2 && ((SG_IMAGETYPE_3D == img->cmn.type) || (SG_IMAGETYPE_ARRAY == img->cmn.type))) { |
8133 | int mip_depth = img->cmn.num_slices >> mip_index; |
8134 | if (mip_depth == 0) { |
8135 | mip_depth = 1; |
8136 | } |
8137 | glTexSubImage3D(gl_img_target, mip_index, |
8138 | 0, 0, 0, |
8139 | mip_width, mip_height, mip_depth, |
8140 | gl_img_format, gl_img_type, |
8141 | data_ptr); |
8142 | |
8143 | } |
8144 | #endif |
8145 | } |
8146 | } |
8147 | _sg_gl_cache_restore_texture_binding(0); |
8148 | } |
8149 | |
8150 | /*== D3D11 BACKEND IMPLEMENTATION ============================================*/ |
8151 | #elif defined(SOKOL_D3D11) |
8152 | |
8153 | #if defined(__cplusplus) |
8154 | #define _sg_d3d11_AddRef(self) (self)->AddRef() |
8155 | #else |
8156 | #define _sg_d3d11_AddRef(self) (self)->lpVtbl->AddRef(self) |
8157 | #endif |
8158 | |
8159 | #if defined(__cplusplus) |
8160 | #define _sg_d3d11_Release(self) (self)->Release() |
8161 | #else |
8162 | #define _sg_d3d11_Release(self) (self)->lpVtbl->Release(self) |
8163 | #endif |
8164 | |
8165 | /*-- D3D11 C/C++ wrappers ----------------------------------------------------*/ |
8166 | static inline HRESULT _sg_d3d11_CheckFormatSupport(ID3D11Device* self, DXGI_FORMAT Format, UINT* pFormatSupport) { |
8167 | #if defined(__cplusplus) |
8168 | return self->CheckFormatSupport(Format, pFormatSupport); |
8169 | #else |
8170 | return self->lpVtbl->CheckFormatSupport(self, Format, pFormatSupport); |
8171 | #endif |
8172 | } |
8173 | |
8174 | static inline void _sg_d3d11_OMSetRenderTargets(ID3D11DeviceContext* self, UINT NumViews, ID3D11RenderTargetView* const* ppRenderTargetViews, ID3D11DepthStencilView *pDepthStencilView) { |
8175 | #if defined(__cplusplus) |
8176 | self->OMSetRenderTargets(NumViews, ppRenderTargetViews, pDepthStencilView); |
8177 | #else |
8178 | self->lpVtbl->OMSetRenderTargets(self, NumViews, ppRenderTargetViews, pDepthStencilView); |
8179 | #endif |
8180 | } |
8181 | |
8182 | static inline void _sg_d3d11_RSSetState(ID3D11DeviceContext* self, ID3D11RasterizerState* pRasterizerState) { |
8183 | #if defined(__cplusplus) |
8184 | self->RSSetState(pRasterizerState); |
8185 | #else |
8186 | self->lpVtbl->RSSetState(self, pRasterizerState); |
8187 | #endif |
8188 | } |
8189 | |
8190 | static inline void _sg_d3d11_OMSetDepthStencilState(ID3D11DeviceContext* self, ID3D11DepthStencilState* pDepthStencilState, UINT StencilRef) { |
8191 | #if defined(__cplusplus) |
8192 | self->OMSetDepthStencilState(pDepthStencilState, StencilRef); |
8193 | #else |
8194 | self->lpVtbl->OMSetDepthStencilState(self, pDepthStencilState, StencilRef); |
8195 | #endif |
8196 | } |
8197 | |
8198 | static inline void _sg_d3d11_OMSetBlendState(ID3D11DeviceContext* self, ID3D11BlendState* pBlendState, const FLOAT BlendFactor[4], UINT SampleMask) { |
8199 | #if defined(__cplusplus) |
8200 | self->OMSetBlendState(pBlendState, BlendFactor, SampleMask); |
8201 | #else |
8202 | self->lpVtbl->OMSetBlendState(self, pBlendState, BlendFactor, SampleMask); |
8203 | #endif |
8204 | } |
8205 | |
8206 | static inline void _sg_d3d11_IASetVertexBuffers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumBuffers, ID3D11Buffer* const* ppVertexBuffers, const UINT* pStrides, const UINT* pOffsets) { |
8207 | #if defined(__cplusplus) |
8208 | self->IASetVertexBuffers(StartSlot, NumBuffers, ppVertexBuffers, pStrides, pOffsets); |
8209 | #else |
8210 | self->lpVtbl->IASetVertexBuffers(self, StartSlot, NumBuffers, ppVertexBuffers, pStrides, pOffsets); |
8211 | #endif |
8212 | } |
8213 | |
8214 | static inline void _sg_d3d11_IASetIndexBuffer(ID3D11DeviceContext* self, ID3D11Buffer* pIndexBuffer, DXGI_FORMAT Format, UINT Offset) { |
8215 | #if defined(__cplusplus) |
8216 | self->IASetIndexBuffer(pIndexBuffer, Format, Offset); |
8217 | #else |
8218 | self->lpVtbl->IASetIndexBuffer(self, pIndexBuffer, Format, Offset); |
8219 | #endif |
8220 | } |
8221 | |
8222 | static inline void _sg_d3d11_IASetInputLayout(ID3D11DeviceContext* self, ID3D11InputLayout* pInputLayout) { |
8223 | #if defined(__cplusplus) |
8224 | self->IASetInputLayout(pInputLayout); |
8225 | #else |
8226 | self->lpVtbl->IASetInputLayout(self, pInputLayout); |
8227 | #endif |
8228 | } |
8229 | |
8230 | static inline void _sg_d3d11_VSSetShader(ID3D11DeviceContext* self, ID3D11VertexShader* pVertexShader, ID3D11ClassInstance* const* ppClassInstances, UINT NumClassInstances) { |
8231 | #if defined(__cplusplus) |
8232 | self->VSSetShader(pVertexShader, ppClassInstances, NumClassInstances); |
8233 | #else |
8234 | self->lpVtbl->VSSetShader(self, pVertexShader, ppClassInstances, NumClassInstances); |
8235 | #endif |
8236 | } |
8237 | |
8238 | static inline void _sg_d3d11_PSSetShader(ID3D11DeviceContext* self, ID3D11PixelShader* pPixelShader, ID3D11ClassInstance* const* ppClassInstances, UINT NumClassInstances) { |
8239 | #if defined(__cplusplus) |
8240 | self->PSSetShader(pPixelShader, ppClassInstances, NumClassInstances); |
8241 | #else |
8242 | self->lpVtbl->PSSetShader(self, pPixelShader, ppClassInstances, NumClassInstances); |
8243 | #endif |
8244 | } |
8245 | |
8246 | static inline void _sg_d3d11_VSSetConstantBuffers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumBuffers, ID3D11Buffer* const* ppConstantBuffers) { |
8247 | #if defined(__cplusplus) |
8248 | self->VSSetConstantBuffers(StartSlot, NumBuffers, ppConstantBuffers); |
8249 | #else |
8250 | self->lpVtbl->VSSetConstantBuffers(self, StartSlot, NumBuffers, ppConstantBuffers); |
8251 | #endif |
8252 | } |
8253 | |
8254 | static inline void _sg_d3d11_PSSetConstantBuffers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumBuffers, ID3D11Buffer* const* ppConstantBuffers) { |
8255 | #if defined(__cplusplus) |
8256 | self->PSSetConstantBuffers(StartSlot, NumBuffers, ppConstantBuffers); |
8257 | #else |
8258 | self->lpVtbl->PSSetConstantBuffers(self, StartSlot, NumBuffers, ppConstantBuffers); |
8259 | #endif |
8260 | } |
8261 | |
8262 | static inline void _sg_d3d11_VSSetShaderResources(ID3D11DeviceContext* self, UINT StartSlot, UINT NumViews, ID3D11ShaderResourceView* const* ppShaderResourceViews) { |
8263 | #if defined(__cplusplus) |
8264 | self->VSSetShaderResources(StartSlot, NumViews, ppShaderResourceViews); |
8265 | #else |
8266 | self->lpVtbl->VSSetShaderResources(self, StartSlot, NumViews, ppShaderResourceViews); |
8267 | #endif |
8268 | } |
8269 | |
8270 | static inline void _sg_d3d11_PSSetShaderResources(ID3D11DeviceContext* self, UINT StartSlot, UINT NumViews, ID3D11ShaderResourceView* const* ppShaderResourceViews) { |
8271 | #if defined(__cplusplus) |
8272 | self->PSSetShaderResources(StartSlot, NumViews, ppShaderResourceViews); |
8273 | #else |
8274 | self->lpVtbl->PSSetShaderResources(self, StartSlot, NumViews, ppShaderResourceViews); |
8275 | #endif |
8276 | } |
8277 | |
8278 | static inline void _sg_d3d11_VSSetSamplers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumSamplers, ID3D11SamplerState* const* ppSamplers) { |
8279 | #if defined(__cplusplus) |
8280 | self->VSSetSamplers(StartSlot, NumSamplers, ppSamplers); |
8281 | #else |
8282 | self->lpVtbl->VSSetSamplers(self, StartSlot, NumSamplers, ppSamplers); |
8283 | #endif |
8284 | } |
8285 | |
8286 | static inline void _sg_d3d11_PSSetSamplers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumSamplers, ID3D11SamplerState* const* ppSamplers) { |
8287 | #if defined(__cplusplus) |
8288 | self->PSSetSamplers(StartSlot, NumSamplers, ppSamplers); |
8289 | #else |
8290 | self->lpVtbl->PSSetSamplers(self, StartSlot, NumSamplers, ppSamplers); |
8291 | #endif |
8292 | } |
8293 | |
8294 | static inline HRESULT _sg_d3d11_CreateBuffer(ID3D11Device* self, const D3D11_BUFFER_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Buffer** ppBuffer) { |
8295 | #if defined(__cplusplus) |
8296 | return self->CreateBuffer(pDesc, pInitialData, ppBuffer); |
8297 | #else |
8298 | return self->lpVtbl->CreateBuffer(self, pDesc, pInitialData, ppBuffer); |
8299 | #endif |
8300 | } |
8301 | |
8302 | static inline HRESULT _sg_d3d11_CreateTexture2D(ID3D11Device* self, const D3D11_TEXTURE2D_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Texture2D** ppTexture2D) { |
8303 | #if defined(__cplusplus) |
8304 | return self->CreateTexture2D(pDesc, pInitialData, ppTexture2D); |
8305 | #else |
8306 | return self->lpVtbl->CreateTexture2D(self, pDesc, pInitialData, ppTexture2D); |
8307 | #endif |
8308 | } |
8309 | |
8310 | static inline HRESULT _sg_d3d11_CreateShaderResourceView(ID3D11Device* self, ID3D11Resource* pResource, const D3D11_SHADER_RESOURCE_VIEW_DESC* pDesc, ID3D11ShaderResourceView** ppSRView) { |
8311 | #if defined(__cplusplus) |
8312 | return self->CreateShaderResourceView(pResource, pDesc, ppSRView); |
8313 | #else |
8314 | return self->lpVtbl->CreateShaderResourceView(self, pResource, pDesc, ppSRView); |
8315 | #endif |
8316 | } |
8317 | |
8318 | static inline void _sg_d3d11_GetResource(ID3D11View* self, ID3D11Resource** ppResource) { |
8319 | #if defined(__cplusplus) |
8320 | self->GetResource(ppResource); |
8321 | #else |
8322 | self->lpVtbl->GetResource(self, ppResource); |
8323 | #endif |
8324 | } |
8325 | |
8326 | static inline HRESULT _sg_d3d11_CreateTexture3D(ID3D11Device* self, const D3D11_TEXTURE3D_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Texture3D** ppTexture3D) { |
8327 | #if defined(__cplusplus) |
8328 | return self->CreateTexture3D(pDesc, pInitialData, ppTexture3D); |
8329 | #else |
8330 | return self->lpVtbl->CreateTexture3D(self, pDesc, pInitialData, ppTexture3D); |
8331 | #endif |
8332 | } |
8333 | |
8334 | static inline HRESULT _sg_d3d11_CreateSamplerState(ID3D11Device* self, const D3D11_SAMPLER_DESC* pSamplerDesc, ID3D11SamplerState** ppSamplerState) { |
8335 | #if defined(__cplusplus) |
8336 | return self->CreateSamplerState(pSamplerDesc, ppSamplerState); |
8337 | #else |
8338 | return self->lpVtbl->CreateSamplerState(self, pSamplerDesc, ppSamplerState); |
8339 | #endif |
8340 | } |
8341 | |
8342 | static inline LPVOID _sg_d3d11_GetBufferPointer(ID3D10Blob* self) { |
8343 | #if defined(__cplusplus) |
8344 | return self->GetBufferPointer(); |
8345 | #else |
8346 | return self->lpVtbl->GetBufferPointer(self); |
8347 | #endif |
8348 | } |
8349 | |
8350 | static inline SIZE_T _sg_d3d11_GetBufferSize(ID3D10Blob* self) { |
8351 | #if defined(__cplusplus) |
8352 | return self->GetBufferSize(); |
8353 | #else |
8354 | return self->lpVtbl->GetBufferSize(self); |
8355 | #endif |
8356 | } |
8357 | |
8358 | static inline HRESULT _sg_d3d11_CreateVertexShader(ID3D11Device* self, const void* pShaderBytecode, SIZE_T BytecodeLength, ID3D11ClassLinkage* pClassLinkage, ID3D11VertexShader** ppVertexShader) { |
8359 | #if defined(__cplusplus) |
8360 | return self->CreateVertexShader(pShaderBytecode, BytecodeLength, pClassLinkage, ppVertexShader); |
8361 | #else |
8362 | return self->lpVtbl->CreateVertexShader(self, pShaderBytecode, BytecodeLength, pClassLinkage, ppVertexShader); |
8363 | #endif |
8364 | } |
8365 | |
8366 | static inline HRESULT _sg_d3d11_CreatePixelShader(ID3D11Device* self, const void* pShaderBytecode, SIZE_T BytecodeLength, ID3D11ClassLinkage* pClassLinkage, ID3D11PixelShader** ppPixelShader) { |
8367 | #if defined(__cplusplus) |
8368 | return self->CreatePixelShader(pShaderBytecode, BytecodeLength, pClassLinkage, ppPixelShader); |
8369 | #else |
8370 | return self->lpVtbl->CreatePixelShader(self, pShaderBytecode, BytecodeLength, pClassLinkage, ppPixelShader); |
8371 | #endif |
8372 | } |
8373 | |
8374 | static inline HRESULT _sg_d3d11_CreateInputLayout(ID3D11Device* self, const D3D11_INPUT_ELEMENT_DESC* pInputElementDescs, UINT NumElements, const void* pShaderBytecodeWithInputSignature, SIZE_T BytecodeLength, ID3D11InputLayout **ppInputLayout) { |
8375 | #if defined(__cplusplus) |
8376 | return self->CreateInputLayout(pInputElementDescs, NumElements, pShaderBytecodeWithInputSignature, BytecodeLength, ppInputLayout); |
8377 | #else |
8378 | return self->lpVtbl->CreateInputLayout(self, pInputElementDescs, NumElements, pShaderBytecodeWithInputSignature, BytecodeLength, ppInputLayout); |
8379 | #endif |
8380 | } |
8381 | |
8382 | static inline HRESULT _sg_d3d11_CreateRasterizerState(ID3D11Device* self, const D3D11_RASTERIZER_DESC* pRasterizerDesc, ID3D11RasterizerState** ppRasterizerState) { |
8383 | #if defined(__cplusplus) |
8384 | return self->CreateRasterizerState(pRasterizerDesc, ppRasterizerState); |
8385 | #else |
8386 | return self->lpVtbl->CreateRasterizerState(self, pRasterizerDesc, ppRasterizerState); |
8387 | #endif |
8388 | } |
8389 | |
8390 | static inline HRESULT _sg_d3d11_CreateDepthStencilState(ID3D11Device* self, const D3D11_DEPTH_STENCIL_DESC* pDepthStencilDesc, ID3D11DepthStencilState** ppDepthStencilState) { |
8391 | #if defined(__cplusplus) |
8392 | return self->CreateDepthStencilState(pDepthStencilDesc, ppDepthStencilState); |
8393 | #else |
8394 | return self->lpVtbl->CreateDepthStencilState(self, pDepthStencilDesc, ppDepthStencilState); |
8395 | #endif |
8396 | } |
8397 | |
8398 | static inline HRESULT _sg_d3d11_CreateBlendState(ID3D11Device* self, const D3D11_BLEND_DESC* pBlendStateDesc, ID3D11BlendState** ppBlendState) { |
8399 | #if defined(__cplusplus) |
8400 | return self->CreateBlendState(pBlendStateDesc, ppBlendState); |
8401 | #else |
8402 | return self->lpVtbl->CreateBlendState(self, pBlendStateDesc, ppBlendState); |
8403 | #endif |
8404 | } |
8405 | |
8406 | static inline HRESULT _sg_d3d11_CreateRenderTargetView(ID3D11Device* self, ID3D11Resource *pResource, const D3D11_RENDER_TARGET_VIEW_DESC* pDesc, ID3D11RenderTargetView** ppRTView) { |
8407 | #if defined(__cplusplus) |
8408 | return self->CreateRenderTargetView(pResource, pDesc, ppRTView); |
8409 | #else |
8410 | return self->lpVtbl->CreateRenderTargetView(self, pResource, pDesc, ppRTView); |
8411 | #endif |
8412 | } |
8413 | |
8414 | static inline HRESULT _sg_d3d11_CreateDepthStencilView(ID3D11Device* self, ID3D11Resource* pResource, const D3D11_DEPTH_STENCIL_VIEW_DESC* pDesc, ID3D11DepthStencilView** ppDepthStencilView) { |
8415 | #if defined(__cplusplus) |
8416 | return self->CreateDepthStencilView(pResource, pDesc, ppDepthStencilView); |
8417 | #else |
8418 | return self->lpVtbl->CreateDepthStencilView(self, pResource, pDesc, ppDepthStencilView); |
8419 | #endif |
8420 | } |
8421 | |
8422 | static inline void _sg_d3d11_RSSetViewports(ID3D11DeviceContext* self, UINT NumViewports, const D3D11_VIEWPORT* pViewports) { |
8423 | #if defined(__cplusplus) |
8424 | self->RSSetViewports(NumViewports, pViewports); |
8425 | #else |
8426 | self->lpVtbl->RSSetViewports(self, NumViewports, pViewports); |
8427 | #endif |
8428 | } |
8429 | |
8430 | static inline void _sg_d3d11_RSSetScissorRects(ID3D11DeviceContext* self, UINT NumRects, const D3D11_RECT* pRects) { |
8431 | #if defined(__cplusplus) |
8432 | self->RSSetScissorRects(NumRects, pRects); |
8433 | #else |
8434 | self->lpVtbl->RSSetScissorRects(self, NumRects, pRects); |
8435 | #endif |
8436 | } |
8437 | |
8438 | static inline void _sg_d3d11_ClearRenderTargetView(ID3D11DeviceContext* self, ID3D11RenderTargetView* pRenderTargetView, const FLOAT ColorRGBA[4]) { |
8439 | #if defined(__cplusplus) |
8440 | self->ClearRenderTargetView(pRenderTargetView, ColorRGBA); |
8441 | #else |
8442 | self->lpVtbl->ClearRenderTargetView(self, pRenderTargetView, ColorRGBA); |
8443 | #endif |
8444 | } |
8445 | |
8446 | static inline void _sg_d3d11_ClearDepthStencilView(ID3D11DeviceContext* self, ID3D11DepthStencilView* pDepthStencilView, UINT ClearFlags, FLOAT Depth, UINT8 Stencil) { |
8447 | #if defined(__cplusplus) |
8448 | self->ClearDepthStencilView(pDepthStencilView, ClearFlags, Depth, Stencil); |
8449 | #else |
8450 | self->lpVtbl->ClearDepthStencilView(self, pDepthStencilView, ClearFlags, Depth, Stencil); |
8451 | #endif |
8452 | } |
8453 | |
8454 | static inline void _sg_d3d11_ResolveSubresource(ID3D11DeviceContext* self, ID3D11Resource* pDstResource, UINT DstSubresource, ID3D11Resource* pSrcResource, UINT SrcSubresource, DXGI_FORMAT Format) { |
8455 | #if defined(__cplusplus) |
8456 | self->ResolveSubresource(pDstResource, DstSubresource, pSrcResource, SrcSubresource, Format); |
8457 | #else |
8458 | self->lpVtbl->ResolveSubresource(self, pDstResource, DstSubresource, pSrcResource, SrcSubresource, Format); |
8459 | #endif |
8460 | } |
8461 | |
8462 | static inline void _sg_d3d11_IASetPrimitiveTopology(ID3D11DeviceContext* self, D3D11_PRIMITIVE_TOPOLOGY Topology) { |
8463 | #if defined(__cplusplus) |
8464 | self->IASetPrimitiveTopology(Topology); |
8465 | #else |
8466 | self->lpVtbl->IASetPrimitiveTopology(self, Topology); |
8467 | #endif |
8468 | } |
8469 | |
8470 | static inline void _sg_d3d11_UpdateSubresource(ID3D11DeviceContext* self, ID3D11Resource* pDstResource, UINT DstSubresource, const D3D11_BOX* pDstBox, const void* pSrcData, UINT SrcRowPitch, UINT SrcDepthPitch) { |
8471 | #if defined(__cplusplus) |
8472 | self->UpdateSubresource(pDstResource, DstSubresource, pDstBox, pSrcData, SrcRowPitch, SrcDepthPitch); |
8473 | #else |
8474 | self->lpVtbl->UpdateSubresource(self, pDstResource, DstSubresource, pDstBox, pSrcData, SrcRowPitch, SrcDepthPitch); |
8475 | #endif |
8476 | } |
8477 | |
8478 | static inline void _sg_d3d11_DrawIndexed(ID3D11DeviceContext* self, UINT IndexCount, UINT StartIndexLocation, INT BaseVertexLocation) { |
8479 | #if defined(__cplusplus) |
8480 | self->DrawIndexed(IndexCount, StartIndexLocation, BaseVertexLocation); |
8481 | #else |
8482 | self->lpVtbl->DrawIndexed(self, IndexCount, StartIndexLocation, BaseVertexLocation); |
8483 | #endif |
8484 | } |
8485 | |
8486 | static inline void _sg_d3d11_DrawIndexedInstanced(ID3D11DeviceContext* self, UINT IndexCountPerInstance, UINT InstanceCount, UINT StartIndexLocation, INT BaseVertexLocation, UINT StartInstanceLocation) { |
8487 | #if defined(__cplusplus) |
8488 | self->DrawIndexedInstanced(IndexCountPerInstance, InstanceCount, StartIndexLocation, BaseVertexLocation, StartInstanceLocation); |
8489 | #else |
8490 | self->lpVtbl->DrawIndexedInstanced(self, IndexCountPerInstance, InstanceCount, StartIndexLocation, BaseVertexLocation, StartInstanceLocation); |
8491 | #endif |
8492 | } |
8493 | |
8494 | static inline void _sg_d3d11_Draw(ID3D11DeviceContext* self, UINT VertexCount, UINT StartVertexLocation) { |
8495 | #if defined(__cplusplus) |
8496 | self->Draw(VertexCount, StartVertexLocation); |
8497 | #else |
8498 | self->lpVtbl->Draw(self, VertexCount, StartVertexLocation); |
8499 | #endif |
8500 | } |
8501 | |
8502 | static inline void _sg_d3d11_DrawInstanced(ID3D11DeviceContext* self, UINT VertexCountPerInstance, UINT InstanceCount, UINT StartVertexLocation, UINT StartInstanceLocation) { |
8503 | #if defined(__cplusplus) |
8504 | self->DrawInstanced(VertexCountPerInstance, InstanceCount, StartVertexLocation, StartInstanceLocation); |
8505 | #else |
8506 | self->lpVtbl->DrawInstanced(self, VertexCountPerInstance, InstanceCount, StartVertexLocation, StartInstanceLocation); |
8507 | #endif |
8508 | } |
8509 | |
8510 | static inline HRESULT _sg_d3d11_Map(ID3D11DeviceContext* self, ID3D11Resource* pResource, UINT Subresource, D3D11_MAP MapType, UINT MapFlags, D3D11_MAPPED_SUBRESOURCE* pMappedResource) { |
8511 | #if defined(__cplusplus) |
8512 | return self->Map(pResource, Subresource, MapType, MapFlags, pMappedResource); |
8513 | #else |
8514 | return self->lpVtbl->Map(self, pResource, Subresource, MapType, MapFlags, pMappedResource); |
8515 | #endif |
8516 | } |
8517 | |
8518 | static inline void _sg_d3d11_Unmap(ID3D11DeviceContext* self, ID3D11Resource* pResource, UINT Subresource) { |
8519 | #if defined(__cplusplus) |
8520 | self->Unmap(pResource, Subresource); |
8521 | #else |
8522 | self->lpVtbl->Unmap(self, pResource, Subresource); |
8523 | #endif |
8524 | } |
8525 | |
8526 | static inline void _sg_d3d11_ClearState(ID3D11DeviceContext* self) { |
8527 | #if defined(__cplusplus) |
8528 | self->ClearState(); |
8529 | #else |
8530 | self->lpVtbl->ClearState(self); |
8531 | #endif |
8532 | } |
8533 | |
8534 | /*-- enum translation functions ----------------------------------------------*/ |
8535 | _SOKOL_PRIVATE D3D11_USAGE _sg_d3d11_usage(sg_usage usg) { |
8536 | switch (usg) { |
8537 | case SG_USAGE_IMMUTABLE: |
8538 | return D3D11_USAGE_IMMUTABLE; |
8539 | case SG_USAGE_DYNAMIC: |
8540 | case SG_USAGE_STREAM: |
8541 | return D3D11_USAGE_DYNAMIC; |
8542 | default: |
8543 | SOKOL_UNREACHABLE; |
8544 | return (D3D11_USAGE) 0; |
8545 | } |
8546 | } |
8547 | |
8548 | _SOKOL_PRIVATE UINT _sg_d3d11_cpu_access_flags(sg_usage usg) { |
8549 | switch (usg) { |
8550 | case SG_USAGE_IMMUTABLE: |
8551 | return 0; |
8552 | case SG_USAGE_DYNAMIC: |
8553 | case SG_USAGE_STREAM: |
8554 | return D3D11_CPU_ACCESS_WRITE; |
8555 | default: |
8556 | SOKOL_UNREACHABLE; |
8557 | return 0; |
8558 | } |
8559 | } |
8560 | |
8561 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_pixel_format(sg_pixel_format fmt) { |
8562 | switch (fmt) { |
8563 | case SG_PIXELFORMAT_R8: return DXGI_FORMAT_R8_UNORM; |
8564 | case SG_PIXELFORMAT_R8SN: return DXGI_FORMAT_R8_SNORM; |
8565 | case SG_PIXELFORMAT_R8UI: return DXGI_FORMAT_R8_UINT; |
8566 | case SG_PIXELFORMAT_R8SI: return DXGI_FORMAT_R8_SINT; |
8567 | case SG_PIXELFORMAT_R16: return DXGI_FORMAT_R16_UNORM; |
8568 | case SG_PIXELFORMAT_R16SN: return DXGI_FORMAT_R16_SNORM; |
8569 | case SG_PIXELFORMAT_R16UI: return DXGI_FORMAT_R16_UINT; |
8570 | case SG_PIXELFORMAT_R16SI: return DXGI_FORMAT_R16_SINT; |
8571 | case SG_PIXELFORMAT_R16F: return DXGI_FORMAT_R16_FLOAT; |
8572 | case SG_PIXELFORMAT_RG8: return DXGI_FORMAT_R8G8_UNORM; |
8573 | case SG_PIXELFORMAT_RG8SN: return DXGI_FORMAT_R8G8_SNORM; |
8574 | case SG_PIXELFORMAT_RG8UI: return DXGI_FORMAT_R8G8_UINT; |
8575 | case SG_PIXELFORMAT_RG8SI: return DXGI_FORMAT_R8G8_SINT; |
8576 | case SG_PIXELFORMAT_R32UI: return DXGI_FORMAT_R32_UINT; |
8577 | case SG_PIXELFORMAT_R32SI: return DXGI_FORMAT_R32_SINT; |
8578 | case SG_PIXELFORMAT_R32F: return DXGI_FORMAT_R32_FLOAT; |
8579 | case SG_PIXELFORMAT_RG16: return DXGI_FORMAT_R16G16_UNORM; |
8580 | case SG_PIXELFORMAT_RG16SN: return DXGI_FORMAT_R16G16_SNORM; |
8581 | case SG_PIXELFORMAT_RG16UI: return DXGI_FORMAT_R16G16_UINT; |
8582 | case SG_PIXELFORMAT_RG16SI: return DXGI_FORMAT_R16G16_SINT; |
8583 | case SG_PIXELFORMAT_RG16F: return DXGI_FORMAT_R16G16_FLOAT; |
8584 | case SG_PIXELFORMAT_RGBA8: return DXGI_FORMAT_R8G8B8A8_UNORM; |
8585 | case SG_PIXELFORMAT_RGBA8SN: return DXGI_FORMAT_R8G8B8A8_SNORM; |
8586 | case SG_PIXELFORMAT_RGBA8UI: return DXGI_FORMAT_R8G8B8A8_UINT; |
8587 | case SG_PIXELFORMAT_RGBA8SI: return DXGI_FORMAT_R8G8B8A8_SINT; |
8588 | case SG_PIXELFORMAT_BGRA8: return DXGI_FORMAT_B8G8R8A8_UNORM; |
8589 | case SG_PIXELFORMAT_RGB10A2: return DXGI_FORMAT_R10G10B10A2_UNORM; |
8590 | case SG_PIXELFORMAT_RG11B10F: return DXGI_FORMAT_R11G11B10_FLOAT; |
8591 | case SG_PIXELFORMAT_RGB9E5: return DXGI_FORMAT_R9G9B9E5_SHAREDEXP; |
8592 | case SG_PIXELFORMAT_RG32UI: return DXGI_FORMAT_R32G32_UINT; |
8593 | case SG_PIXELFORMAT_RG32SI: return DXGI_FORMAT_R32G32_SINT; |
8594 | case SG_PIXELFORMAT_RG32F: return DXGI_FORMAT_R32G32_FLOAT; |
8595 | case SG_PIXELFORMAT_RGBA16: return DXGI_FORMAT_R16G16B16A16_UNORM; |
8596 | case SG_PIXELFORMAT_RGBA16SN: return DXGI_FORMAT_R16G16B16A16_SNORM; |
8597 | case SG_PIXELFORMAT_RGBA16UI: return DXGI_FORMAT_R16G16B16A16_UINT; |
8598 | case SG_PIXELFORMAT_RGBA16SI: return DXGI_FORMAT_R16G16B16A16_SINT; |
8599 | case SG_PIXELFORMAT_RGBA16F: return DXGI_FORMAT_R16G16B16A16_FLOAT; |
8600 | case SG_PIXELFORMAT_RGBA32UI: return DXGI_FORMAT_R32G32B32A32_UINT; |
8601 | case SG_PIXELFORMAT_RGBA32SI: return DXGI_FORMAT_R32G32B32A32_SINT; |
8602 | case SG_PIXELFORMAT_RGBA32F: return DXGI_FORMAT_R32G32B32A32_FLOAT; |
8603 | case SG_PIXELFORMAT_DEPTH: return DXGI_FORMAT_D32_FLOAT; |
8604 | case SG_PIXELFORMAT_DEPTH_STENCIL: return DXGI_FORMAT_D24_UNORM_S8_UINT; |
8605 | case SG_PIXELFORMAT_BC1_RGBA: return DXGI_FORMAT_BC1_UNORM; |
8606 | case SG_PIXELFORMAT_BC2_RGBA: return DXGI_FORMAT_BC2_UNORM; |
8607 | case SG_PIXELFORMAT_BC3_RGBA: return DXGI_FORMAT_BC3_UNORM; |
8608 | case SG_PIXELFORMAT_BC4_R: return DXGI_FORMAT_BC4_UNORM; |
8609 | case SG_PIXELFORMAT_BC4_RSN: return DXGI_FORMAT_BC4_SNORM; |
8610 | case SG_PIXELFORMAT_BC5_RG: return DXGI_FORMAT_BC5_UNORM; |
8611 | case SG_PIXELFORMAT_BC5_RGSN: return DXGI_FORMAT_BC5_SNORM; |
8612 | case SG_PIXELFORMAT_BC6H_RGBF: return DXGI_FORMAT_BC6H_SF16; |
8613 | case SG_PIXELFORMAT_BC6H_RGBUF: return DXGI_FORMAT_BC6H_UF16; |
8614 | case SG_PIXELFORMAT_BC7_RGBA: return DXGI_FORMAT_BC7_UNORM; |
8615 | default: return DXGI_FORMAT_UNKNOWN; |
8616 | }; |
8617 | } |
8618 | |
8619 | _SOKOL_PRIVATE D3D11_PRIMITIVE_TOPOLOGY _sg_d3d11_primitive_topology(sg_primitive_type prim_type) { |
8620 | switch (prim_type) { |
8621 | case SG_PRIMITIVETYPE_POINTS: return D3D11_PRIMITIVE_TOPOLOGY_POINTLIST; |
8622 | case SG_PRIMITIVETYPE_LINES: return D3D11_PRIMITIVE_TOPOLOGY_LINELIST; |
8623 | case SG_PRIMITIVETYPE_LINE_STRIP: return D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP; |
8624 | case SG_PRIMITIVETYPE_TRIANGLES: return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST; |
8625 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; |
8626 | default: SOKOL_UNREACHABLE; return (D3D11_PRIMITIVE_TOPOLOGY) 0; |
8627 | } |
8628 | } |
8629 | |
8630 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_index_format(sg_index_type index_type) { |
8631 | switch (index_type) { |
8632 | case SG_INDEXTYPE_NONE: return DXGI_FORMAT_UNKNOWN; |
8633 | case SG_INDEXTYPE_UINT16: return DXGI_FORMAT_R16_UINT; |
8634 | case SG_INDEXTYPE_UINT32: return DXGI_FORMAT_R32_UINT; |
8635 | default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0; |
8636 | } |
8637 | } |
8638 | |
8639 | _SOKOL_PRIVATE D3D11_FILTER _sg_d3d11_filter(sg_filter min_f, sg_filter mag_f, uint32_t max_anisotropy) { |
8640 | if (max_anisotropy > 1) { |
8641 | return D3D11_FILTER_ANISOTROPIC; |
8642 | } |
8643 | else if (mag_f == SG_FILTER_NEAREST) { |
8644 | switch (min_f) { |
8645 | case SG_FILTER_NEAREST: |
8646 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
8647 | return D3D11_FILTER_MIN_MAG_MIP_POINT; |
8648 | case SG_FILTER_LINEAR: |
8649 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
8650 | return D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT; |
8651 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
8652 | return D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR; |
8653 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
8654 | return D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR; |
8655 | default: |
8656 | SOKOL_UNREACHABLE; break; |
8657 | } |
8658 | } |
8659 | else if (mag_f == SG_FILTER_LINEAR) { |
8660 | switch (min_f) { |
8661 | case SG_FILTER_NEAREST: |
8662 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
8663 | return D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT; |
8664 | case SG_FILTER_LINEAR: |
8665 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
8666 | return D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT; |
8667 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
8668 | return D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR; |
8669 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
8670 | return D3D11_FILTER_MIN_MAG_MIP_LINEAR; |
8671 | default: |
8672 | SOKOL_UNREACHABLE; break; |
8673 | } |
8674 | } |
8675 | /* invalid value for mag filter */ |
8676 | SOKOL_UNREACHABLE; |
8677 | return D3D11_FILTER_MIN_MAG_MIP_POINT; |
8678 | } |
8679 | |
8680 | _SOKOL_PRIVATE D3D11_TEXTURE_ADDRESS_MODE _sg_d3d11_address_mode(sg_wrap m) { |
8681 | switch (m) { |
8682 | case SG_WRAP_REPEAT: return D3D11_TEXTURE_ADDRESS_WRAP; |
8683 | case SG_WRAP_CLAMP_TO_EDGE: return D3D11_TEXTURE_ADDRESS_CLAMP; |
8684 | case SG_WRAP_CLAMP_TO_BORDER: return D3D11_TEXTURE_ADDRESS_BORDER; |
8685 | case SG_WRAP_MIRRORED_REPEAT: return D3D11_TEXTURE_ADDRESS_MIRROR; |
8686 | default: SOKOL_UNREACHABLE; return (D3D11_TEXTURE_ADDRESS_MODE) 0; |
8687 | } |
8688 | } |
8689 | |
8690 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_vertex_format(sg_vertex_format fmt) { |
8691 | switch (fmt) { |
8692 | case SG_VERTEXFORMAT_FLOAT: return DXGI_FORMAT_R32_FLOAT; |
8693 | case SG_VERTEXFORMAT_FLOAT2: return DXGI_FORMAT_R32G32_FLOAT; |
8694 | case SG_VERTEXFORMAT_FLOAT3: return DXGI_FORMAT_R32G32B32_FLOAT; |
8695 | case SG_VERTEXFORMAT_FLOAT4: return DXGI_FORMAT_R32G32B32A32_FLOAT; |
8696 | case SG_VERTEXFORMAT_BYTE4: return DXGI_FORMAT_R8G8B8A8_SINT; |
8697 | case SG_VERTEXFORMAT_BYTE4N: return DXGI_FORMAT_R8G8B8A8_SNORM; |
8698 | case SG_VERTEXFORMAT_UBYTE4: return DXGI_FORMAT_R8G8B8A8_UINT; |
8699 | case SG_VERTEXFORMAT_UBYTE4N: return DXGI_FORMAT_R8G8B8A8_UNORM; |
8700 | case SG_VERTEXFORMAT_SHORT2: return DXGI_FORMAT_R16G16_SINT; |
8701 | case SG_VERTEXFORMAT_SHORT2N: return DXGI_FORMAT_R16G16_SNORM; |
8702 | case SG_VERTEXFORMAT_USHORT2N: return DXGI_FORMAT_R16G16_UNORM; |
8703 | case SG_VERTEXFORMAT_SHORT4: return DXGI_FORMAT_R16G16B16A16_SINT; |
8704 | case SG_VERTEXFORMAT_SHORT4N: return DXGI_FORMAT_R16G16B16A16_SNORM; |
8705 | case SG_VERTEXFORMAT_USHORT4N: return DXGI_FORMAT_R16G16B16A16_UNORM; |
8706 | case SG_VERTEXFORMAT_UINT10_N2: return DXGI_FORMAT_R10G10B10A2_UNORM; |
8707 | default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0; |
8708 | } |
8709 | } |
8710 | |
8711 | _SOKOL_PRIVATE D3D11_INPUT_CLASSIFICATION _sg_d3d11_input_classification(sg_vertex_step step) { |
8712 | switch (step) { |
8713 | case SG_VERTEXSTEP_PER_VERTEX: return D3D11_INPUT_PER_VERTEX_DATA; |
8714 | case SG_VERTEXSTEP_PER_INSTANCE: return D3D11_INPUT_PER_INSTANCE_DATA; |
8715 | default: SOKOL_UNREACHABLE; return (D3D11_INPUT_CLASSIFICATION) 0; |
8716 | } |
8717 | } |
8718 | |
8719 | _SOKOL_PRIVATE D3D11_CULL_MODE _sg_d3d11_cull_mode(sg_cull_mode m) { |
8720 | switch (m) { |
8721 | case SG_CULLMODE_NONE: return D3D11_CULL_NONE; |
8722 | case SG_CULLMODE_FRONT: return D3D11_CULL_FRONT; |
8723 | case SG_CULLMODE_BACK: return D3D11_CULL_BACK; |
8724 | default: SOKOL_UNREACHABLE; return (D3D11_CULL_MODE) 0; |
8725 | } |
8726 | } |
8727 | |
8728 | _SOKOL_PRIVATE D3D11_COMPARISON_FUNC _sg_d3d11_compare_func(sg_compare_func f) { |
8729 | switch (f) { |
8730 | case SG_COMPAREFUNC_NEVER: return D3D11_COMPARISON_NEVER; |
8731 | case SG_COMPAREFUNC_LESS: return D3D11_COMPARISON_LESS; |
8732 | case SG_COMPAREFUNC_EQUAL: return D3D11_COMPARISON_EQUAL; |
8733 | case SG_COMPAREFUNC_LESS_EQUAL: return D3D11_COMPARISON_LESS_EQUAL; |
8734 | case SG_COMPAREFUNC_GREATER: return D3D11_COMPARISON_GREATER; |
8735 | case SG_COMPAREFUNC_NOT_EQUAL: return D3D11_COMPARISON_NOT_EQUAL; |
8736 | case SG_COMPAREFUNC_GREATER_EQUAL: return D3D11_COMPARISON_GREATER_EQUAL; |
8737 | case SG_COMPAREFUNC_ALWAYS: return D3D11_COMPARISON_ALWAYS; |
8738 | default: SOKOL_UNREACHABLE; return (D3D11_COMPARISON_FUNC) 0; |
8739 | } |
8740 | } |
8741 | |
8742 | _SOKOL_PRIVATE D3D11_STENCIL_OP _sg_d3d11_stencil_op(sg_stencil_op op) { |
8743 | switch (op) { |
8744 | case SG_STENCILOP_KEEP: return D3D11_STENCIL_OP_KEEP; |
8745 | case SG_STENCILOP_ZERO: return D3D11_STENCIL_OP_ZERO; |
8746 | case SG_STENCILOP_REPLACE: return D3D11_STENCIL_OP_REPLACE; |
8747 | case SG_STENCILOP_INCR_CLAMP: return D3D11_STENCIL_OP_INCR_SAT; |
8748 | case SG_STENCILOP_DECR_CLAMP: return D3D11_STENCIL_OP_DECR_SAT; |
8749 | case SG_STENCILOP_INVERT: return D3D11_STENCIL_OP_INVERT; |
8750 | case SG_STENCILOP_INCR_WRAP: return D3D11_STENCIL_OP_INCR; |
8751 | case SG_STENCILOP_DECR_WRAP: return D3D11_STENCIL_OP_DECR; |
8752 | default: SOKOL_UNREACHABLE; return (D3D11_STENCIL_OP) 0; |
8753 | } |
8754 | } |
8755 | |
8756 | _SOKOL_PRIVATE D3D11_BLEND _sg_d3d11_blend_factor(sg_blend_factor f) { |
8757 | switch (f) { |
8758 | case SG_BLENDFACTOR_ZERO: return D3D11_BLEND_ZERO; |
8759 | case SG_BLENDFACTOR_ONE: return D3D11_BLEND_ONE; |
8760 | case SG_BLENDFACTOR_SRC_COLOR: return D3D11_BLEND_SRC_COLOR; |
8761 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return D3D11_BLEND_INV_SRC_COLOR; |
8762 | case SG_BLENDFACTOR_SRC_ALPHA: return D3D11_BLEND_SRC_ALPHA; |
8763 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return D3D11_BLEND_INV_SRC_ALPHA; |
8764 | case SG_BLENDFACTOR_DST_COLOR: return D3D11_BLEND_DEST_COLOR; |
8765 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return D3D11_BLEND_INV_DEST_COLOR; |
8766 | case SG_BLENDFACTOR_DST_ALPHA: return D3D11_BLEND_DEST_ALPHA; |
8767 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return D3D11_BLEND_INV_DEST_ALPHA; |
8768 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return D3D11_BLEND_SRC_ALPHA_SAT; |
8769 | case SG_BLENDFACTOR_BLEND_COLOR: return D3D11_BLEND_BLEND_FACTOR; |
8770 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return D3D11_BLEND_INV_BLEND_FACTOR; |
8771 | case SG_BLENDFACTOR_BLEND_ALPHA: return D3D11_BLEND_BLEND_FACTOR; |
8772 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return D3D11_BLEND_INV_BLEND_FACTOR; |
8773 | default: SOKOL_UNREACHABLE; return (D3D11_BLEND) 0; |
8774 | } |
8775 | } |
8776 | |
8777 | _SOKOL_PRIVATE D3D11_BLEND_OP _sg_d3d11_blend_op(sg_blend_op op) { |
8778 | switch (op) { |
8779 | case SG_BLENDOP_ADD: return D3D11_BLEND_OP_ADD; |
8780 | case SG_BLENDOP_SUBTRACT: return D3D11_BLEND_OP_SUBTRACT; |
8781 | case SG_BLENDOP_REVERSE_SUBTRACT: return D3D11_BLEND_OP_REV_SUBTRACT; |
8782 | default: SOKOL_UNREACHABLE; return (D3D11_BLEND_OP) 0; |
8783 | } |
8784 | } |
8785 | |
8786 | _SOKOL_PRIVATE UINT8 _sg_d3d11_color_write_mask(sg_color_mask m) { |
8787 | UINT8 res = 0; |
8788 | if (m & SG_COLORMASK_R) { |
8789 | res |= D3D11_COLOR_WRITE_ENABLE_RED; |
8790 | } |
8791 | if (m & SG_COLORMASK_G) { |
8792 | res |= D3D11_COLOR_WRITE_ENABLE_GREEN; |
8793 | } |
8794 | if (m & SG_COLORMASK_B) { |
8795 | res |= D3D11_COLOR_WRITE_ENABLE_BLUE; |
8796 | } |
8797 | if (m & SG_COLORMASK_A) { |
8798 | res |= D3D11_COLOR_WRITE_ENABLE_ALPHA; |
8799 | } |
8800 | return res; |
8801 | } |
8802 | |
8803 | /* see: https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-resources-limits#resource-limits-for-feature-level-11-hardware */ |
8804 | _SOKOL_PRIVATE void _sg_d3d11_init_caps(void) { |
8805 | _sg.backend = SG_BACKEND_D3D11; |
8806 | |
8807 | _sg.features.instancing = true; |
8808 | _sg.features.origin_top_left = true; |
8809 | _sg.features.multiple_render_targets = true; |
8810 | _sg.features.msaa_render_targets = true; |
8811 | _sg.features.imagetype_3d = true; |
8812 | _sg.features.imagetype_array = true; |
8813 | _sg.features.image_clamp_to_border = true; |
8814 | _sg.features.mrt_independent_blend_state = true; |
8815 | _sg.features.mrt_independent_write_mask = true; |
8816 | |
8817 | _sg.limits.max_image_size_2d = 16 * 1024; |
8818 | _sg.limits.max_image_size_cube = 16 * 1024; |
8819 | _sg.limits.max_image_size_3d = 2 * 1024; |
8820 | _sg.limits.max_image_size_array = 16 * 1024; |
8821 | _sg.limits.max_image_array_layers = 2 * 1024; |
8822 | _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES; |
8823 | |
8824 | /* see: https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_format_support */ |
8825 | for (int fmt = (SG_PIXELFORMAT_NONE+1); fmt < _SG_PIXELFORMAT_NUM; fmt++) { |
8826 | UINT dxgi_fmt_caps = 0; |
8827 | const DXGI_FORMAT dxgi_fmt = _sg_d3d11_pixel_format((sg_pixel_format)fmt); |
8828 | if (dxgi_fmt != DXGI_FORMAT_UNKNOWN) { |
8829 | HRESULT hr = _sg_d3d11_CheckFormatSupport(_sg.d3d11.dev, dxgi_fmt, &dxgi_fmt_caps); |
8830 | SOKOL_ASSERT(SUCCEEDED(hr) || (E_FAIL == hr)); |
8831 | if (!SUCCEEDED(hr)) { |
8832 | dxgi_fmt_caps = 0; |
8833 | } |
8834 | } |
8835 | sg_pixelformat_info* info = &_sg.formats[fmt]; |
8836 | info->sample = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_TEXTURE2D); |
8837 | info->filter = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_SHADER_SAMPLE); |
8838 | info->render = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_RENDER_TARGET); |
8839 | info->blend = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_BLENDABLE); |
8840 | info->msaa = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET); |
8841 | info->depth = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_DEPTH_STENCIL); |
8842 | if (info->depth) { |
8843 | info->render = true; |
8844 | } |
8845 | } |
8846 | } |
8847 | |
8848 | _SOKOL_PRIVATE void _sg_d3d11_setup_backend(const sg_desc* desc) { |
8849 | /* assume _sg.d3d11 already is zero-initialized */ |
8850 | SOKOL_ASSERT(desc); |
8851 | SOKOL_ASSERT(desc->context.d3d11.device); |
8852 | SOKOL_ASSERT(desc->context.d3d11.device_context); |
8853 | SOKOL_ASSERT(desc->context.d3d11.render_target_view_cb || desc->context.d3d11.render_target_view_userdata_cb); |
8854 | SOKOL_ASSERT(desc->context.d3d11.depth_stencil_view_cb || desc->context.d3d11.depth_stencil_view_userdata_cb); |
8855 | _sg.d3d11.valid = true; |
8856 | _sg.d3d11.dev = (ID3D11Device*) desc->context.d3d11.device; |
8857 | _sg.d3d11.ctx = (ID3D11DeviceContext*) desc->context.d3d11.device_context; |
8858 | _sg.d3d11.rtv_cb = desc->context.d3d11.render_target_view_cb; |
8859 | _sg.d3d11.rtv_userdata_cb = desc->context.d3d11.render_target_view_userdata_cb; |
8860 | _sg.d3d11.dsv_cb = desc->context.d3d11.depth_stencil_view_cb; |
8861 | _sg.d3d11.dsv_userdata_cb = desc->context.d3d11.depth_stencil_view_userdata_cb; |
8862 | _sg.d3d11.user_data = desc->context.d3d11.user_data; |
8863 | _sg_d3d11_init_caps(); |
8864 | } |
8865 | |
8866 | _SOKOL_PRIVATE void _sg_d3d11_discard_backend(void) { |
8867 | SOKOL_ASSERT(_sg.d3d11.valid); |
8868 | _sg.d3d11.valid = false; |
8869 | } |
8870 | |
8871 | _SOKOL_PRIVATE void _sg_d3d11_clear_state(void) { |
8872 | /* clear all the device context state, so that resource refs don't keep stuck in the d3d device context */ |
8873 | _sg_d3d11_ClearState(_sg.d3d11.ctx); |
8874 | } |
8875 | |
8876 | _SOKOL_PRIVATE void _sg_d3d11_reset_state_cache(void) { |
8877 | /* just clear the d3d11 device context state */ |
8878 | _sg_d3d11_clear_state(); |
8879 | } |
8880 | |
8881 | _SOKOL_PRIVATE void _sg_d3d11_activate_context(_sg_context_t* ctx) { |
8882 | _SOKOL_UNUSED(ctx); |
8883 | _sg_d3d11_clear_state(); |
8884 | } |
8885 | |
8886 | _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_context(_sg_context_t* ctx) { |
8887 | SOKOL_ASSERT(ctx); |
8888 | _SOKOL_UNUSED(ctx); |
8889 | return SG_RESOURCESTATE_VALID; |
8890 | } |
8891 | |
8892 | _SOKOL_PRIVATE void _sg_d3d11_discard_context(_sg_context_t* ctx) { |
8893 | SOKOL_ASSERT(ctx); |
8894 | _SOKOL_UNUSED(ctx); |
8895 | /* empty */ |
8896 | } |
8897 | |
8898 | _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
8899 | SOKOL_ASSERT(buf && desc); |
8900 | SOKOL_ASSERT(!buf->d3d11.buf); |
8901 | _sg_buffer_common_init(&buf->cmn, desc); |
8902 | const bool injected = (0 != desc->d3d11_buffer); |
8903 | if (injected) { |
8904 | buf->d3d11.buf = (ID3D11Buffer*) desc->d3d11_buffer; |
8905 | _sg_d3d11_AddRef(buf->d3d11.buf); |
8906 | } |
8907 | else { |
8908 | D3D11_BUFFER_DESC d3d11_desc; |
8909 | _sg_clear(&d3d11_desc, sizeof(d3d11_desc)); |
8910 | d3d11_desc.ByteWidth = (UINT)buf->cmn.size; |
8911 | d3d11_desc.Usage = _sg_d3d11_usage(buf->cmn.usage); |
8912 | d3d11_desc.BindFlags = buf->cmn.type == SG_BUFFERTYPE_VERTEXBUFFER ? D3D11_BIND_VERTEX_BUFFER : D3D11_BIND_INDEX_BUFFER; |
8913 | d3d11_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(buf->cmn.usage); |
8914 | D3D11_SUBRESOURCE_DATA* init_data_ptr = 0; |
8915 | D3D11_SUBRESOURCE_DATA init_data; |
8916 | _sg_clear(&init_data, sizeof(init_data)); |
8917 | if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { |
8918 | SOKOL_ASSERT(desc->data.ptr); |
8919 | init_data.pSysMem = desc->data.ptr; |
8920 | init_data_ptr = &init_data; |
8921 | } |
8922 | HRESULT hr = _sg_d3d11_CreateBuffer(_sg.d3d11.dev, &d3d11_desc, init_data_ptr, &buf->d3d11.buf); |
8923 | if (!(SUCCEEDED(hr) && buf->d3d11.buf)) { |
8924 | SG_LOG("failed to create D3D11 buffer\n"); |
8925 | return SG_RESOURCESTATE_FAILED; |
8926 | } |
8927 | } |
8928 | return SG_RESOURCESTATE_VALID; |
8929 | } |
8930 | |
8931 | _SOKOL_PRIVATE void _sg_d3d11_discard_buffer(_sg_buffer_t* buf) { |
8932 | SOKOL_ASSERT(buf); |
8933 | if (buf->d3d11.buf) { |
8934 | _sg_d3d11_Release(buf->d3d11.buf); |
8935 | } |
8936 | } |
8937 | |
8938 | _SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image_t* img, const sg_image_data* data) { |
8939 | const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; |
8940 | const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices:1; |
8941 | int subres_index = 0; |
8942 | for (int face_index = 0; face_index < num_faces; face_index++) { |
8943 | for (int slice_index = 0; slice_index < num_slices; slice_index++) { |
8944 | for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, subres_index++) { |
8945 | SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS)); |
8946 | D3D11_SUBRESOURCE_DATA* subres_data = &_sg.d3d11.subres_data[subres_index]; |
8947 | const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1; |
8948 | const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1; |
8949 | const sg_range* subimg_data = &(data->subimage[face_index][mip_index]); |
8950 | const size_t slice_size = subimg_data->size / (size_t)num_slices; |
8951 | const size_t slice_offset = slice_size * (size_t)slice_index; |
8952 | const uint8_t* ptr = (const uint8_t*) subimg_data->ptr; |
8953 | subres_data->pSysMem = ptr + slice_offset; |
8954 | subres_data->SysMemPitch = (UINT)_sg_row_pitch(img->cmn.pixel_format, mip_width, 1); |
8955 | if (img->cmn.type == SG_IMAGETYPE_3D) { |
8956 | /* FIXME? const int mip_depth = ((img->depth>>mip_index)>0) ? img->depth>>mip_index : 1; */ |
8957 | subres_data->SysMemSlicePitch = (UINT)_sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); |
8958 | } |
8959 | else { |
8960 | subres_data->SysMemSlicePitch = 0; |
8961 | } |
8962 | } |
8963 | } |
8964 | } |
8965 | } |
8966 | |
8967 | _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const sg_image_desc* desc) { |
8968 | SOKOL_ASSERT(img && desc); |
8969 | SOKOL_ASSERT(!img->d3d11.tex2d && !img->d3d11.tex3d && !img->d3d11.texds && !img->d3d11.texmsaa); |
8970 | SOKOL_ASSERT(!img->d3d11.srv && !img->d3d11.smp); |
8971 | HRESULT hr; |
8972 | |
8973 | _sg_image_common_init(&img->cmn, desc); |
8974 | const bool injected = (0 != desc->d3d11_texture) || (0 != desc->d3d11_shader_resource_view); |
8975 | const bool msaa = (img->cmn.sample_count > 1); |
8976 | img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format); |
8977 | |
8978 | /* special case depth-stencil buffer? */ |
8979 | if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { |
8980 | /* create only a depth-texture */ |
8981 | SOKOL_ASSERT(!injected); |
8982 | if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) { |
8983 | SG_LOG("trying to create a D3D11 depth-texture with unsupported pixel format\n"); |
8984 | return SG_RESOURCESTATE_FAILED; |
8985 | } |
8986 | D3D11_TEXTURE2D_DESC d3d11_desc; |
8987 | _sg_clear(&d3d11_desc, sizeof(d3d11_desc)); |
8988 | d3d11_desc.Width = (UINT)img->cmn.width; |
8989 | d3d11_desc.Height = (UINT)img->cmn.height; |
8990 | d3d11_desc.MipLevels = 1; |
8991 | d3d11_desc.ArraySize = 1; |
8992 | d3d11_desc.Format = img->d3d11.format; |
8993 | d3d11_desc.Usage = D3D11_USAGE_DEFAULT; |
8994 | d3d11_desc.BindFlags = D3D11_BIND_DEPTH_STENCIL; |
8995 | d3d11_desc.SampleDesc.Count = (UINT)img->cmn.sample_count; |
8996 | d3d11_desc.SampleDesc.Quality = (UINT) (msaa ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0); |
8997 | hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_desc, NULL, &img->d3d11.texds); |
8998 | if (!(SUCCEEDED(hr) && img->d3d11.texds)) { |
8999 | SG_LOG("failed to create D3D11 texture 2D\n"); |
9000 | return SG_RESOURCESTATE_FAILED; |
9001 | } |
9002 | } |
9003 | else { |
9004 | /* create (or inject) color texture and shader-resource-view */ |
9005 | |
9006 | /* prepare initial content pointers */ |
9007 | D3D11_SUBRESOURCE_DATA* init_data = 0; |
9008 | if (!injected && (img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) { |
9009 | _sg_d3d11_fill_subres_data(img, &desc->data); |
9010 | init_data = _sg.d3d11.subres_data; |
9011 | } |
9012 | if (img->cmn.type != SG_IMAGETYPE_3D) { |
9013 | /* 2D-, cube- or array-texture */ |
9014 | /* if this is an MSAA render target, the following texture will be the 'resolve-texture' */ |
9015 | |
9016 | /* first check for injected texture and/or resource view */ |
9017 | if (injected) { |
9018 | img->d3d11.tex2d = (ID3D11Texture2D*) desc->d3d11_texture; |
9019 | img->d3d11.srv = (ID3D11ShaderResourceView*) desc->d3d11_shader_resource_view; |
9020 | if (img->d3d11.tex2d) { |
9021 | _sg_d3d11_AddRef(img->d3d11.tex2d); |
9022 | } |
9023 | else { |
9024 | /* if only a shader-resource-view was provided, but no texture, lookup |
9025 | the texture from the shader-resource-view, this also bumps the refcount |
9026 | */ |
9027 | SOKOL_ASSERT(img->d3d11.srv); |
9028 | _sg_d3d11_GetResource((ID3D11View*)img->d3d11.srv, (ID3D11Resource**)&img->d3d11.tex2d); |
9029 | SOKOL_ASSERT(img->d3d11.tex2d); |
9030 | } |
9031 | if (img->d3d11.srv) { |
9032 | _sg_d3d11_AddRef(img->d3d11.srv); |
9033 | } |
9034 | } |
9035 | |
9036 | /* if not injected, create texture */ |
9037 | if (0 == img->d3d11.tex2d) { |
9038 | D3D11_TEXTURE2D_DESC d3d11_tex_desc; |
9039 | _sg_clear(&d3d11_tex_desc, sizeof(d3d11_tex_desc)); |
9040 | d3d11_tex_desc.Width = (UINT)img->cmn.width; |
9041 | d3d11_tex_desc.Height = (UINT)img->cmn.height; |
9042 | d3d11_tex_desc.MipLevels = (UINT)img->cmn.num_mipmaps; |
9043 | switch (img->cmn.type) { |
9044 | case SG_IMAGETYPE_ARRAY: d3d11_tex_desc.ArraySize = (UINT)img->cmn.num_slices; break; |
9045 | case SG_IMAGETYPE_CUBE: d3d11_tex_desc.ArraySize = 6; break; |
9046 | default: d3d11_tex_desc.ArraySize = 1; break; |
9047 | } |
9048 | d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; |
9049 | d3d11_tex_desc.Format = img->d3d11.format; |
9050 | if (img->cmn.render_target) { |
9051 | d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; |
9052 | if (!msaa) { |
9053 | d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET; |
9054 | } |
9055 | d3d11_tex_desc.CPUAccessFlags = 0; |
9056 | } |
9057 | else { |
9058 | d3d11_tex_desc.Usage = _sg_d3d11_usage(img->cmn.usage); |
9059 | d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->cmn.usage); |
9060 | } |
9061 | if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) { |
9062 | /* trying to create a texture format that's not supported by D3D */ |
9063 | SG_LOG("trying to create a D3D11 texture with unsupported pixel format\n"); |
9064 | return SG_RESOURCESTATE_FAILED; |
9065 | } |
9066 | d3d11_tex_desc.SampleDesc.Count = 1; |
9067 | d3d11_tex_desc.SampleDesc.Quality = 0; |
9068 | d3d11_tex_desc.MiscFlags = (img->cmn.type == SG_IMAGETYPE_CUBE) ? D3D11_RESOURCE_MISC_TEXTURECUBE : 0; |
9069 | |
9070 | hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11.tex2d); |
9071 | if (!(SUCCEEDED(hr) && img->d3d11.tex2d)) { |
9072 | SG_LOG("failed to create D3D11 texture 2D\n"); |
9073 | return SG_RESOURCESTATE_FAILED; |
9074 | } |
9075 | } |
9076 | |
9077 | /* ...and similar, if not injected, create shader-resource-view */ |
9078 | if (0 == img->d3d11.srv) { |
9079 | D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc; |
9080 | _sg_clear(&d3d11_srv_desc, sizeof(d3d11_srv_desc)); |
9081 | d3d11_srv_desc.Format = img->d3d11.format; |
9082 | switch (img->cmn.type) { |
9083 | case SG_IMAGETYPE_2D: |
9084 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; |
9085 | d3d11_srv_desc.Texture2D.MipLevels = (UINT)img->cmn.num_mipmaps; |
9086 | break; |
9087 | case SG_IMAGETYPE_CUBE: |
9088 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE; |
9089 | d3d11_srv_desc.TextureCube.MipLevels = (UINT)img->cmn.num_mipmaps; |
9090 | break; |
9091 | case SG_IMAGETYPE_ARRAY: |
9092 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY; |
9093 | d3d11_srv_desc.Texture2DArray.MipLevels = (UINT)img->cmn.num_mipmaps; |
9094 | d3d11_srv_desc.Texture2DArray.ArraySize = (UINT)img->cmn.num_slices; |
9095 | break; |
9096 | default: |
9097 | SOKOL_UNREACHABLE; break; |
9098 | } |
9099 | hr = _sg_d3d11_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex2d, &d3d11_srv_desc, &img->d3d11.srv); |
9100 | if (!(SUCCEEDED(hr) && img->d3d11.srv)) { |
9101 | SG_LOG("failed to create D3D11 resource view\n"); |
9102 | return SG_RESOURCESTATE_FAILED; |
9103 | } |
9104 | } |
9105 | } |
9106 | else { |
9107 | /* 3D texture - same procedure, first check if injected, than create non-injected */ |
9108 | if (injected) { |
9109 | img->d3d11.tex3d = (ID3D11Texture3D*) desc->d3d11_texture; |
9110 | img->d3d11.srv = (ID3D11ShaderResourceView*) desc->d3d11_shader_resource_view; |
9111 | if (img->d3d11.tex3d) { |
9112 | _sg_d3d11_AddRef(img->d3d11.tex3d); |
9113 | } |
9114 | else { |
9115 | SOKOL_ASSERT(img->d3d11.srv); |
9116 | _sg_d3d11_GetResource((ID3D11View*)img->d3d11.srv, (ID3D11Resource**)&img->d3d11.tex3d); |
9117 | SOKOL_ASSERT(img->d3d11.tex3d); |
9118 | } |
9119 | if (img->d3d11.srv) { |
9120 | _sg_d3d11_AddRef(img->d3d11.srv); |
9121 | } |
9122 | } |
9123 | |
9124 | if (0 == img->d3d11.tex3d) { |
9125 | D3D11_TEXTURE3D_DESC d3d11_tex_desc; |
9126 | _sg_clear(&d3d11_tex_desc, sizeof(d3d11_tex_desc)); |
9127 | d3d11_tex_desc.Width = (UINT)img->cmn.width; |
9128 | d3d11_tex_desc.Height = (UINT)img->cmn.height; |
9129 | d3d11_tex_desc.Depth = (UINT)img->cmn.num_slices; |
9130 | d3d11_tex_desc.MipLevels = (UINT)img->cmn.num_mipmaps; |
9131 | d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; |
9132 | d3d11_tex_desc.Format = img->d3d11.format; |
9133 | if (img->cmn.render_target) { |
9134 | d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; |
9135 | if (!msaa) { |
9136 | d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET; |
9137 | } |
9138 | d3d11_tex_desc.CPUAccessFlags = 0; |
9139 | } |
9140 | else { |
9141 | d3d11_tex_desc.Usage = _sg_d3d11_usage(img->cmn.usage); |
9142 | d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->cmn.usage); |
9143 | } |
9144 | if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) { |
9145 | /* trying to create a texture format that's not supported by D3D */ |
9146 | SG_LOG("trying to create a D3D11 texture with unsupported pixel format\n"); |
9147 | return SG_RESOURCESTATE_FAILED; |
9148 | } |
9149 | hr = _sg_d3d11_CreateTexture3D(_sg.d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11.tex3d); |
9150 | if (!(SUCCEEDED(hr) && img->d3d11.tex3d)) { |
9151 | SG_LOG("failed to create D3D11 texture 3D\n"); |
9152 | return SG_RESOURCESTATE_FAILED; |
9153 | } |
9154 | } |
9155 | |
9156 | if (0 == img->d3d11.srv) { |
9157 | D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc; |
9158 | _sg_clear(&d3d11_srv_desc, sizeof(d3d11_srv_desc)); |
9159 | d3d11_srv_desc.Format = img->d3d11.format; |
9160 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D; |
9161 | d3d11_srv_desc.Texture3D.MipLevels = (UINT)img->cmn.num_mipmaps; |
9162 | hr = _sg_d3d11_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex3d, &d3d11_srv_desc, &img->d3d11.srv); |
9163 | if (!(SUCCEEDED(hr) && img->d3d11.srv)) { |
9164 | SG_LOG("failed to create D3D11 resource view\n"); |
9165 | return SG_RESOURCESTATE_FAILED; |
9166 | } |
9167 | } |
9168 | } |
9169 | |
9170 | /* also need to create a separate MSAA render target texture? */ |
9171 | if (msaa) { |
9172 | D3D11_TEXTURE2D_DESC d3d11_tex_desc; |
9173 | _sg_clear(&d3d11_tex_desc, sizeof(d3d11_tex_desc)); |
9174 | d3d11_tex_desc.Width = (UINT)img->cmn.width; |
9175 | d3d11_tex_desc.Height = (UINT)img->cmn.height; |
9176 | d3d11_tex_desc.MipLevels = 1; |
9177 | d3d11_tex_desc.ArraySize = 1; |
9178 | d3d11_tex_desc.Format = img->d3d11.format; |
9179 | d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; |
9180 | d3d11_tex_desc.BindFlags = D3D11_BIND_RENDER_TARGET; |
9181 | d3d11_tex_desc.CPUAccessFlags = 0; |
9182 | d3d11_tex_desc.SampleDesc.Count = (UINT)img->cmn.sample_count; |
9183 | d3d11_tex_desc.SampleDesc.Quality = (UINT)D3D11_STANDARD_MULTISAMPLE_PATTERN; |
9184 | hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, NULL, &img->d3d11.texmsaa); |
9185 | if (!(SUCCEEDED(hr) && img->d3d11.texmsaa)) { |
9186 | SG_LOG("failed to create D3D11 texture 2D\n"); |
9187 | return SG_RESOURCESTATE_FAILED; |
9188 | } |
9189 | } |
9190 | |
9191 | /* sampler state object, note D3D11 implements an internal shared-pool for sampler objects */ |
9192 | D3D11_SAMPLER_DESC d3d11_smp_desc; |
9193 | _sg_clear(&d3d11_smp_desc, sizeof(d3d11_smp_desc)); |
9194 | d3d11_smp_desc.Filter = _sg_d3d11_filter(img->cmn.min_filter, img->cmn.mag_filter, img->cmn.max_anisotropy); |
9195 | d3d11_smp_desc.AddressU = _sg_d3d11_address_mode(img->cmn.wrap_u); |
9196 | d3d11_smp_desc.AddressV = _sg_d3d11_address_mode(img->cmn.wrap_v); |
9197 | d3d11_smp_desc.AddressW = _sg_d3d11_address_mode(img->cmn.wrap_w); |
9198 | switch (img->cmn.border_color) { |
9199 | case SG_BORDERCOLOR_TRANSPARENT_BLACK: |
9200 | /* all 0.0f */ |
9201 | break; |
9202 | case SG_BORDERCOLOR_OPAQUE_WHITE: |
9203 | for (int i = 0; i < 4; i++) { |
9204 | d3d11_smp_desc.BorderColor[i] = 1.0f; |
9205 | } |
9206 | break; |
9207 | default: |
9208 | /* opaque black */ |
9209 | d3d11_smp_desc.BorderColor[3] = 1.0f; |
9210 | break; |
9211 | } |
9212 | d3d11_smp_desc.MaxAnisotropy = img->cmn.max_anisotropy; |
9213 | d3d11_smp_desc.ComparisonFunc = D3D11_COMPARISON_NEVER; |
9214 | d3d11_smp_desc.MinLOD = desc->min_lod; |
9215 | d3d11_smp_desc.MaxLOD = desc->max_lod; |
9216 | hr = _sg_d3d11_CreateSamplerState(_sg.d3d11.dev, &d3d11_smp_desc, &img->d3d11.smp); |
9217 | if (!(SUCCEEDED(hr) && img->d3d11.smp)) { |
9218 | SG_LOG("failed to create D3D11 sampler state\n"); |
9219 | return SG_RESOURCESTATE_FAILED; |
9220 | } |
9221 | } |
9222 | return SG_RESOURCESTATE_VALID; |
9223 | } |
9224 | |
9225 | _SOKOL_PRIVATE void _sg_d3d11_discard_image(_sg_image_t* img) { |
9226 | SOKOL_ASSERT(img); |
9227 | if (img->d3d11.tex2d) { |
9228 | _sg_d3d11_Release(img->d3d11.tex2d); |
9229 | } |
9230 | if (img->d3d11.tex3d) { |
9231 | _sg_d3d11_Release(img->d3d11.tex3d); |
9232 | } |
9233 | if (img->d3d11.texds) { |
9234 | _sg_d3d11_Release(img->d3d11.texds); |
9235 | } |
9236 | if (img->d3d11.texmsaa) { |
9237 | _sg_d3d11_Release(img->d3d11.texmsaa); |
9238 | } |
9239 | if (img->d3d11.srv) { |
9240 | _sg_d3d11_Release(img->d3d11.srv); |
9241 | } |
9242 | if (img->d3d11.smp) { |
9243 | _sg_d3d11_Release(img->d3d11.smp); |
9244 | } |
9245 | } |
9246 | |
9247 | _SOKOL_PRIVATE bool _sg_d3d11_load_d3dcompiler_dll(void) { |
9248 | /* on UWP, don't do anything (not tested) */ |
9249 | #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) |
9250 | return true; |
9251 | #else |
9252 | /* load DLL on demand */ |
9253 | if ((0 == _sg.d3d11.d3dcompiler_dll) && !_sg.d3d11.d3dcompiler_dll_load_failed) { |
9254 | _sg.d3d11.d3dcompiler_dll = LoadLibraryA("d3dcompiler_47.dll"); |
9255 | if (0 == _sg.d3d11.d3dcompiler_dll) { |
9256 | /* don't attempt to load missing DLL in the future */ |
9257 | SG_LOG("failed to load d3dcompiler_47.dll!\n"); |
9258 | _sg.d3d11.d3dcompiler_dll_load_failed = true; |
9259 | return false; |
9260 | } |
9261 | /* look up function pointers */ |
9262 | _sg.d3d11.D3DCompile_func = (pD3DCompile)(void*) GetProcAddress(_sg.d3d11.d3dcompiler_dll, "D3DCompile"); |
9263 | SOKOL_ASSERT(_sg.d3d11.D3DCompile_func); |
9264 | } |
9265 | return 0 != _sg.d3d11.d3dcompiler_dll; |
9266 | #endif |
9267 | } |
9268 | |
9269 | #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) |
9270 | #define _sg_d3d11_D3DCompile D3DCompile |
9271 | #else |
9272 | #define _sg_d3d11_D3DCompile _sg.d3d11.D3DCompile_func |
9273 | #endif |
9274 | |
9275 | _SOKOL_PRIVATE ID3DBlob* _sg_d3d11_compile_shader(const sg_shader_stage_desc* stage_desc) { |
9276 | if (!_sg_d3d11_load_d3dcompiler_dll()) { |
9277 | return NULL; |
9278 | } |
9279 | SOKOL_ASSERT(stage_desc->d3d11_target); |
9280 | ID3DBlob* output = NULL; |
9281 | ID3DBlob* errors_or_warnings = NULL; |
9282 | HRESULT hr = _sg_d3d11_D3DCompile( |
9283 | stage_desc->source, /* pSrcData */ |
9284 | strlen(stage_desc->source), /* SrcDataSize */ |
9285 | NULL, /* pSourceName */ |
9286 | NULL, /* pDefines */ |
9287 | NULL, /* pInclude */ |
9288 | stage_desc->entry ? stage_desc->entry : "main", /* pEntryPoint */ |
9289 | stage_desc->d3d11_target, /* pTarget (vs_5_0 or ps_5_0) */ |
9290 | D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR | D3DCOMPILE_OPTIMIZATION_LEVEL3, /* Flags1 */ |
9291 | 0, /* Flags2 */ |
9292 | &output, /* ppCode */ |
9293 | &errors_or_warnings); /* ppErrorMsgs */ |
9294 | if (errors_or_warnings) { |
9295 | SG_LOG((LPCSTR)_sg_d3d11_GetBufferPointer(errors_or_warnings)); |
9296 | _sg_d3d11_Release(errors_or_warnings); errors_or_warnings = NULL; |
9297 | } |
9298 | if (FAILED(hr)) { |
9299 | /* just in case, usually output is NULL here */ |
9300 | if (output) { |
9301 | _sg_d3d11_Release(output); |
9302 | output = NULL; |
9303 | } |
9304 | } |
9305 | return output; |
9306 | } |
9307 | |
9308 | _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
9309 | SOKOL_ASSERT(shd && desc); |
9310 | SOKOL_ASSERT(!shd->d3d11.vs && !shd->d3d11.fs && !shd->d3d11.vs_blob); |
9311 | HRESULT hr; |
9312 | |
9313 | _sg_shader_common_init(&shd->cmn, desc); |
9314 | |
9315 | /* copy vertex attribute semantic names and indices */ |
9316 | for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { |
9317 | _sg_strcpy(&shd->d3d11.attrs[i].sem_name, desc->attrs[i].sem_name); |
9318 | shd->d3d11.attrs[i].sem_index = desc->attrs[i].sem_index; |
9319 | } |
9320 | |
9321 | /* shader stage uniform blocks and image slots */ |
9322 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
9323 | _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; |
9324 | _sg_d3d11_shader_stage_t* d3d11_stage = &shd->d3d11.stage[stage_index]; |
9325 | for (int ub_index = 0; ub_index < cmn_stage->num_uniform_blocks; ub_index++) { |
9326 | const _sg_uniform_block_t* ub = &cmn_stage->uniform_blocks[ub_index]; |
9327 | |
9328 | /* create a D3D constant buffer for each uniform block */ |
9329 | SOKOL_ASSERT(0 == d3d11_stage->cbufs[ub_index]); |
9330 | D3D11_BUFFER_DESC cb_desc; |
9331 | _sg_clear(&cb_desc, sizeof(cb_desc)); |
9332 | cb_desc.ByteWidth = (UINT)_sg_roundup((int)ub->size, 16); |
9333 | cb_desc.Usage = D3D11_USAGE_DEFAULT; |
9334 | cb_desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER; |
9335 | hr = _sg_d3d11_CreateBuffer(_sg.d3d11.dev, &cb_desc, NULL, &d3d11_stage->cbufs[ub_index]); |
9336 | if (!(SUCCEEDED(hr) && d3d11_stage->cbufs[ub_index])) { |
9337 | SG_LOG("failed to create D3D11 buffer\n"); |
9338 | return SG_RESOURCESTATE_FAILED; |
9339 | } |
9340 | } |
9341 | } |
9342 | |
9343 | const void* vs_ptr = 0, *fs_ptr = 0; |
9344 | SIZE_T vs_length = 0, fs_length = 0; |
9345 | ID3DBlob* vs_blob = 0, *fs_blob = 0; |
9346 | if (desc->vs.bytecode.ptr && desc->fs.bytecode.ptr) { |
9347 | /* create from shader byte code */ |
9348 | vs_ptr = desc->vs.bytecode.ptr; |
9349 | fs_ptr = desc->fs.bytecode.ptr; |
9350 | vs_length = desc->vs.bytecode.size; |
9351 | fs_length = desc->fs.bytecode.size; |
9352 | } |
9353 | else { |
9354 | /* compile from shader source code */ |
9355 | vs_blob = _sg_d3d11_compile_shader(&desc->vs); |
9356 | fs_blob = _sg_d3d11_compile_shader(&desc->fs); |
9357 | if (vs_blob && fs_blob) { |
9358 | vs_ptr = _sg_d3d11_GetBufferPointer(vs_blob); |
9359 | vs_length = _sg_d3d11_GetBufferSize(vs_blob); |
9360 | fs_ptr = _sg_d3d11_GetBufferPointer(fs_blob); |
9361 | fs_length = _sg_d3d11_GetBufferSize(fs_blob); |
9362 | } |
9363 | } |
9364 | sg_resource_state result = SG_RESOURCESTATE_FAILED; |
9365 | if (vs_ptr && fs_ptr && (vs_length > 0) && (fs_length > 0)) { |
9366 | /* create the D3D vertex- and pixel-shader objects */ |
9367 | hr = _sg_d3d11_CreateVertexShader(_sg.d3d11.dev, vs_ptr, vs_length, NULL, &shd->d3d11.vs); |
9368 | bool vs_succeeded = SUCCEEDED(hr) && shd->d3d11.vs; |
9369 | hr = _sg_d3d11_CreatePixelShader(_sg.d3d11.dev, fs_ptr, fs_length, NULL, &shd->d3d11.fs); |
9370 | bool fs_succeeded = SUCCEEDED(hr) && shd->d3d11.fs; |
9371 | |
9372 | /* need to store the vertex shader byte code, this is needed later in sg_create_pipeline */ |
9373 | if (vs_succeeded && fs_succeeded) { |
9374 | shd->d3d11.vs_blob_length = vs_length; |
9375 | shd->d3d11.vs_blob = _sg_malloc((size_t)vs_length); |
9376 | SOKOL_ASSERT(shd->d3d11.vs_blob); |
9377 | memcpy(shd->d3d11.vs_blob, vs_ptr, vs_length); |
9378 | result = SG_RESOURCESTATE_VALID; |
9379 | } |
9380 | } |
9381 | if (vs_blob) { |
9382 | _sg_d3d11_Release(vs_blob); vs_blob = 0; |
9383 | } |
9384 | if (fs_blob) { |
9385 | _sg_d3d11_Release(fs_blob); fs_blob = 0; |
9386 | } |
9387 | return result; |
9388 | } |
9389 | |
9390 | _SOKOL_PRIVATE void _sg_d3d11_discard_shader(_sg_shader_t* shd) { |
9391 | SOKOL_ASSERT(shd); |
9392 | if (shd->d3d11.vs) { |
9393 | _sg_d3d11_Release(shd->d3d11.vs); |
9394 | } |
9395 | if (shd->d3d11.fs) { |
9396 | _sg_d3d11_Release(shd->d3d11.fs); |
9397 | } |
9398 | if (shd->d3d11.vs_blob) { |
9399 | _sg_free(shd->d3d11.vs_blob); |
9400 | } |
9401 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
9402 | _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; |
9403 | _sg_d3d11_shader_stage_t* d3d11_stage = &shd->d3d11.stage[stage_index]; |
9404 | for (int ub_index = 0; ub_index < cmn_stage->num_uniform_blocks; ub_index++) { |
9405 | if (d3d11_stage->cbufs[ub_index]) { |
9406 | _sg_d3d11_Release(d3d11_stage->cbufs[ub_index]); |
9407 | } |
9408 | } |
9409 | } |
9410 | } |
9411 | |
9412 | _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { |
9413 | SOKOL_ASSERT(pip && shd && desc); |
9414 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
9415 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_VALID); |
9416 | SOKOL_ASSERT(shd->d3d11.vs_blob && shd->d3d11.vs_blob_length > 0); |
9417 | SOKOL_ASSERT(!pip->d3d11.il && !pip->d3d11.rs && !pip->d3d11.dss && !pip->d3d11.bs); |
9418 | |
9419 | pip->shader = shd; |
9420 | _sg_pipeline_common_init(&pip->cmn, desc); |
9421 | pip->d3d11.index_format = _sg_d3d11_index_format(pip->cmn.index_type); |
9422 | pip->d3d11.topology = _sg_d3d11_primitive_topology(desc->primitive_type); |
9423 | pip->d3d11.stencil_ref = desc->stencil.ref; |
9424 | |
9425 | /* create input layout object */ |
9426 | HRESULT hr; |
9427 | D3D11_INPUT_ELEMENT_DESC d3d11_comps[SG_MAX_VERTEX_ATTRIBUTES]; |
9428 | _sg_clear(d3d11_comps, sizeof(d3d11_comps)); |
9429 | int attr_index = 0; |
9430 | for (; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
9431 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
9432 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
9433 | break; |
9434 | } |
9435 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
9436 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; |
9437 | const sg_vertex_step step_func = l_desc->step_func; |
9438 | const int step_rate = l_desc->step_rate; |
9439 | D3D11_INPUT_ELEMENT_DESC* d3d11_comp = &d3d11_comps[attr_index]; |
9440 | d3d11_comp->SemanticName = _sg_strptr(&shd->d3d11.attrs[attr_index].sem_name); |
9441 | d3d11_comp->SemanticIndex = (UINT)shd->d3d11.attrs[attr_index].sem_index; |
9442 | d3d11_comp->Format = _sg_d3d11_vertex_format(a_desc->format); |
9443 | d3d11_comp->InputSlot = (UINT)a_desc->buffer_index; |
9444 | d3d11_comp->AlignedByteOffset = (UINT)a_desc->offset; |
9445 | d3d11_comp->InputSlotClass = _sg_d3d11_input_classification(step_func); |
9446 | if (SG_VERTEXSTEP_PER_INSTANCE == step_func) { |
9447 | d3d11_comp->InstanceDataStepRate = (UINT)step_rate; |
9448 | pip->cmn.use_instanced_draw = true; |
9449 | } |
9450 | pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; |
9451 | } |
9452 | for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
9453 | if (pip->cmn.vertex_layout_valid[layout_index]) { |
9454 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; |
9455 | SOKOL_ASSERT(l_desc->stride > 0); |
9456 | pip->d3d11.vb_strides[layout_index] = (UINT)l_desc->stride; |
9457 | } |
9458 | else { |
9459 | pip->d3d11.vb_strides[layout_index] = 0; |
9460 | } |
9461 | } |
9462 | hr = _sg_d3d11_CreateInputLayout(_sg.d3d11.dev, |
9463 | d3d11_comps, /* pInputElementDesc */ |
9464 | (UINT)attr_index, /* NumElements */ |
9465 | shd->d3d11.vs_blob, /* pShaderByteCodeWithInputSignature */ |
9466 | shd->d3d11.vs_blob_length, /* BytecodeLength */ |
9467 | &pip->d3d11.il); |
9468 | if (!(SUCCEEDED(hr) && pip->d3d11.il)) { |
9469 | SG_LOG("failed to create D3D11 input layout\n"); |
9470 | return SG_RESOURCESTATE_FAILED; |
9471 | } |
9472 | |
9473 | /* create rasterizer state */ |
9474 | D3D11_RASTERIZER_DESC rs_desc; |
9475 | _sg_clear(&rs_desc, sizeof(rs_desc)); |
9476 | rs_desc.FillMode = D3D11_FILL_SOLID; |
9477 | rs_desc.CullMode = _sg_d3d11_cull_mode(desc->cull_mode); |
9478 | rs_desc.FrontCounterClockwise = desc->face_winding == SG_FACEWINDING_CCW; |
9479 | rs_desc.DepthBias = (INT) pip->cmn.depth_bias; |
9480 | rs_desc.DepthBiasClamp = pip->cmn.depth_bias_clamp; |
9481 | rs_desc.SlopeScaledDepthBias = pip->cmn.depth_bias_slope_scale; |
9482 | rs_desc.DepthClipEnable = TRUE; |
9483 | rs_desc.ScissorEnable = TRUE; |
9484 | rs_desc.MultisampleEnable = desc->sample_count > 1; |
9485 | rs_desc.AntialiasedLineEnable = FALSE; |
9486 | hr = _sg_d3d11_CreateRasterizerState(_sg.d3d11.dev, &rs_desc, &pip->d3d11.rs); |
9487 | if (!(SUCCEEDED(hr) && pip->d3d11.rs)) { |
9488 | SG_LOG("failed to create D3D11 rasterizer state\n"); |
9489 | return SG_RESOURCESTATE_FAILED; |
9490 | } |
9491 | |
9492 | /* create depth-stencil state */ |
9493 | D3D11_DEPTH_STENCIL_DESC dss_desc; |
9494 | _sg_clear(&dss_desc, sizeof(dss_desc)); |
9495 | dss_desc.DepthEnable = TRUE; |
9496 | dss_desc.DepthWriteMask = desc->depth.write_enabled ? D3D11_DEPTH_WRITE_MASK_ALL : D3D11_DEPTH_WRITE_MASK_ZERO; |
9497 | dss_desc.DepthFunc = _sg_d3d11_compare_func(desc->depth.compare); |
9498 | dss_desc.StencilEnable = desc->stencil.enabled; |
9499 | dss_desc.StencilReadMask = desc->stencil.read_mask; |
9500 | dss_desc.StencilWriteMask = desc->stencil.write_mask; |
9501 | const sg_stencil_face_state* sf = &desc->stencil.front; |
9502 | dss_desc.FrontFace.StencilFailOp = _sg_d3d11_stencil_op(sf->fail_op); |
9503 | dss_desc.FrontFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sf->depth_fail_op); |
9504 | dss_desc.FrontFace.StencilPassOp = _sg_d3d11_stencil_op(sf->pass_op); |
9505 | dss_desc.FrontFace.StencilFunc = _sg_d3d11_compare_func(sf->compare); |
9506 | const sg_stencil_face_state* sb = &desc->stencil.back; |
9507 | dss_desc.BackFace.StencilFailOp = _sg_d3d11_stencil_op(sb->fail_op); |
9508 | dss_desc.BackFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sb->depth_fail_op); |
9509 | dss_desc.BackFace.StencilPassOp = _sg_d3d11_stencil_op(sb->pass_op); |
9510 | dss_desc.BackFace.StencilFunc = _sg_d3d11_compare_func(sb->compare); |
9511 | hr = _sg_d3d11_CreateDepthStencilState(_sg.d3d11.dev, &dss_desc, &pip->d3d11.dss); |
9512 | if (!(SUCCEEDED(hr) && pip->d3d11.dss)) { |
9513 | SG_LOG("failed to create D3D11 depth stencil state\n"); |
9514 | return SG_RESOURCESTATE_FAILED; |
9515 | } |
9516 | |
9517 | /* create blend state */ |
9518 | D3D11_BLEND_DESC bs_desc; |
9519 | _sg_clear(&bs_desc, sizeof(bs_desc)); |
9520 | bs_desc.AlphaToCoverageEnable = desc->alpha_to_coverage_enabled; |
9521 | bs_desc.IndependentBlendEnable = TRUE; |
9522 | { |
9523 | int i = 0; |
9524 | for (i = 0; i < desc->color_count; i++) { |
9525 | const sg_blend_state* src = &desc->colors[i].blend; |
9526 | D3D11_RENDER_TARGET_BLEND_DESC* dst = &bs_desc.RenderTarget[i]; |
9527 | dst->BlendEnable = src->enabled; |
9528 | dst->SrcBlend = _sg_d3d11_blend_factor(src->src_factor_rgb); |
9529 | dst->DestBlend = _sg_d3d11_blend_factor(src->dst_factor_rgb); |
9530 | dst->BlendOp = _sg_d3d11_blend_op(src->op_rgb); |
9531 | dst->SrcBlendAlpha = _sg_d3d11_blend_factor(src->src_factor_alpha); |
9532 | dst->DestBlendAlpha = _sg_d3d11_blend_factor(src->dst_factor_alpha); |
9533 | dst->BlendOpAlpha = _sg_d3d11_blend_op(src->op_alpha); |
9534 | dst->RenderTargetWriteMask = _sg_d3d11_color_write_mask(desc->colors[i].write_mask); |
9535 | } |
9536 | for (; i < 8; i++) { |
9537 | D3D11_RENDER_TARGET_BLEND_DESC* dst = &bs_desc.RenderTarget[i]; |
9538 | dst->BlendEnable = FALSE; |
9539 | dst->SrcBlend = dst->SrcBlendAlpha = D3D11_BLEND_ONE; |
9540 | dst->DestBlend = dst->DestBlendAlpha = D3D11_BLEND_ZERO; |
9541 | dst->BlendOp = dst->BlendOpAlpha = D3D11_BLEND_OP_ADD; |
9542 | dst->RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL; |
9543 | } |
9544 | } |
9545 | hr = _sg_d3d11_CreateBlendState(_sg.d3d11.dev, &bs_desc, &pip->d3d11.bs); |
9546 | if (!(SUCCEEDED(hr) && pip->d3d11.bs)) { |
9547 | SG_LOG("failed to create D3D11 blend state\n"); |
9548 | return SG_RESOURCESTATE_FAILED; |
9549 | } |
9550 | |
9551 | return SG_RESOURCESTATE_VALID; |
9552 | } |
9553 | |
9554 | _SOKOL_PRIVATE void _sg_d3d11_discard_pipeline(_sg_pipeline_t* pip) { |
9555 | SOKOL_ASSERT(pip); |
9556 | if (pip == _sg.d3d11.cur_pipeline) { |
9557 | _sg.d3d11.cur_pipeline = 0; |
9558 | _sg.d3d11.cur_pipeline_id.id = SG_INVALID_ID; |
9559 | } |
9560 | if (pip->d3d11.il) { |
9561 | _sg_d3d11_Release(pip->d3d11.il); |
9562 | } |
9563 | if (pip->d3d11.rs) { |
9564 | _sg_d3d11_Release(pip->d3d11.rs); |
9565 | } |
9566 | if (pip->d3d11.dss) { |
9567 | _sg_d3d11_Release(pip->d3d11.dss); |
9568 | } |
9569 | if (pip->d3d11.bs) { |
9570 | _sg_d3d11_Release(pip->d3d11.bs); |
9571 | } |
9572 | } |
9573 | |
9574 | _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { |
9575 | SOKOL_ASSERT(pass && desc); |
9576 | SOKOL_ASSERT(att_images && att_images[0]); |
9577 | SOKOL_ASSERT(_sg.d3d11.dev); |
9578 | |
9579 | _sg_pass_common_init(&pass->cmn, desc); |
9580 | |
9581 | for (int i = 0; i < pass->cmn.num_color_atts; i++) { |
9582 | const sg_pass_attachment_desc* att_desc = &desc->color_attachments[i]; |
9583 | _SOKOL_UNUSED(att_desc); |
9584 | SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); |
9585 | _sg_image_t* att_img = att_images[i]; |
9586 | SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id)); |
9587 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_img->cmn.pixel_format)); |
9588 | SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].image); |
9589 | pass->d3d11.color_atts[i].image = att_img; |
9590 | |
9591 | /* create D3D11 render-target-view */ |
9592 | const _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; |
9593 | SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].rtv); |
9594 | ID3D11Resource* d3d11_res = 0; |
9595 | const bool is_msaa = att_img->cmn.sample_count > 1; |
9596 | D3D11_RENDER_TARGET_VIEW_DESC d3d11_rtv_desc; |
9597 | _sg_clear(&d3d11_rtv_desc, sizeof(d3d11_rtv_desc)); |
9598 | d3d11_rtv_desc.Format = att_img->d3d11.format; |
9599 | if ((att_img->cmn.type == SG_IMAGETYPE_2D) || is_msaa) { |
9600 | if (is_msaa) { |
9601 | d3d11_res = (ID3D11Resource*) att_img->d3d11.texmsaa; |
9602 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMS; |
9603 | } |
9604 | else { |
9605 | d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d; |
9606 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; |
9607 | d3d11_rtv_desc.Texture2D.MipSlice = (UINT)cmn_att->mip_level; |
9608 | } |
9609 | } |
9610 | else if ((att_img->cmn.type == SG_IMAGETYPE_CUBE) || (att_img->cmn.type == SG_IMAGETYPE_ARRAY)) { |
9611 | d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d; |
9612 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DARRAY; |
9613 | d3d11_rtv_desc.Texture2DArray.MipSlice = (UINT)cmn_att->mip_level; |
9614 | d3d11_rtv_desc.Texture2DArray.FirstArraySlice = (UINT)cmn_att->slice; |
9615 | d3d11_rtv_desc.Texture2DArray.ArraySize = 1; |
9616 | } |
9617 | else { |
9618 | SOKOL_ASSERT(att_img->cmn.type == SG_IMAGETYPE_3D); |
9619 | d3d11_res = (ID3D11Resource*) att_img->d3d11.tex3d; |
9620 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE3D; |
9621 | d3d11_rtv_desc.Texture3D.MipSlice = (UINT)cmn_att->mip_level; |
9622 | d3d11_rtv_desc.Texture3D.FirstWSlice = (UINT)cmn_att->slice; |
9623 | d3d11_rtv_desc.Texture3D.WSize = 1; |
9624 | } |
9625 | SOKOL_ASSERT(d3d11_res); |
9626 | HRESULT hr = _sg_d3d11_CreateRenderTargetView(_sg.d3d11.dev, d3d11_res, &d3d11_rtv_desc, &pass->d3d11.color_atts[i].rtv); |
9627 | if (!(SUCCEEDED(hr) && pass->d3d11.color_atts[i].rtv)) { |
9628 | SG_LOG("failed to create D3D11 render target view\n"); |
9629 | return SG_RESOURCESTATE_FAILED; |
9630 | } |
9631 | } |
9632 | |
9633 | /* optional depth-stencil image */ |
9634 | SOKOL_ASSERT(0 == pass->d3d11.ds_att.image); |
9635 | SOKOL_ASSERT(0 == pass->d3d11.ds_att.dsv); |
9636 | if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { |
9637 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
9638 | const sg_pass_attachment_desc* att_desc = &desc->depth_stencil_attachment; |
9639 | _SOKOL_UNUSED(att_desc); |
9640 | _sg_image_t* att_img = att_images[ds_img_index]; |
9641 | SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id)); |
9642 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_img->cmn.pixel_format)); |
9643 | SOKOL_ASSERT(0 == pass->d3d11.ds_att.image); |
9644 | pass->d3d11.ds_att.image = att_img; |
9645 | |
9646 | /* create D3D11 depth-stencil-view */ |
9647 | D3D11_DEPTH_STENCIL_VIEW_DESC d3d11_dsv_desc; |
9648 | _sg_clear(&d3d11_dsv_desc, sizeof(d3d11_dsv_desc)); |
9649 | d3d11_dsv_desc.Format = att_img->d3d11.format; |
9650 | const bool is_msaa = att_img->cmn.sample_count > 1; |
9651 | if (is_msaa) { |
9652 | d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS; |
9653 | } |
9654 | else { |
9655 | d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D; |
9656 | } |
9657 | ID3D11Resource* d3d11_res = (ID3D11Resource*) att_img->d3d11.texds; |
9658 | SOKOL_ASSERT(d3d11_res); |
9659 | HRESULT hr = _sg_d3d11_CreateDepthStencilView(_sg.d3d11.dev, d3d11_res, &d3d11_dsv_desc, &pass->d3d11.ds_att.dsv); |
9660 | if (!(SUCCEEDED(hr) && pass->d3d11.ds_att.dsv)) { |
9661 | SG_LOG("failed to create D3D11 depth stencil view\n"); |
9662 | return SG_RESOURCESTATE_FAILED; |
9663 | } |
9664 | } |
9665 | return SG_RESOURCESTATE_VALID; |
9666 | } |
9667 | |
9668 | _SOKOL_PRIVATE void _sg_d3d11_discard_pass(_sg_pass_t* pass) { |
9669 | SOKOL_ASSERT(pass); |
9670 | SOKOL_ASSERT(pass != _sg.d3d11.cur_pass); |
9671 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
9672 | if (pass->d3d11.color_atts[i].rtv) { |
9673 | _sg_d3d11_Release(pass->d3d11.color_atts[i].rtv); |
9674 | } |
9675 | } |
9676 | if (pass->d3d11.ds_att.dsv) { |
9677 | _sg_d3d11_Release(pass->d3d11.ds_att.dsv); |
9678 | } |
9679 | } |
9680 | |
9681 | _SOKOL_PRIVATE _sg_image_t* _sg_d3d11_pass_color_image(const _sg_pass_t* pass, int index) { |
9682 | SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); |
9683 | /* NOTE: may return null */ |
9684 | return pass->d3d11.color_atts[index].image; |
9685 | } |
9686 | |
9687 | _SOKOL_PRIVATE _sg_image_t* _sg_d3d11_pass_ds_image(const _sg_pass_t* pass) { |
9688 | /* NOTE: may return null */ |
9689 | SOKOL_ASSERT(pass); |
9690 | return pass->d3d11.ds_att.image; |
9691 | } |
9692 | |
9693 | _SOKOL_PRIVATE void _sg_d3d11_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { |
9694 | SOKOL_ASSERT(action); |
9695 | SOKOL_ASSERT(!_sg.d3d11.in_pass); |
9696 | SOKOL_ASSERT(_sg.d3d11.rtv_cb || _sg.d3d11.rtv_userdata_cb); |
9697 | SOKOL_ASSERT(_sg.d3d11.dsv_cb || _sg.d3d11.dsv_userdata_cb); |
9698 | _sg.d3d11.in_pass = true; |
9699 | _sg.d3d11.cur_width = w; |
9700 | _sg.d3d11.cur_height = h; |
9701 | if (pass) { |
9702 | _sg.d3d11.cur_pass = pass; |
9703 | _sg.d3d11.cur_pass_id.id = pass->slot.id; |
9704 | _sg.d3d11.num_rtvs = 0; |
9705 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
9706 | _sg.d3d11.cur_rtvs[i] = pass->d3d11.color_atts[i].rtv; |
9707 | if (_sg.d3d11.cur_rtvs[i]) { |
9708 | _sg.d3d11.num_rtvs++; |
9709 | } |
9710 | } |
9711 | _sg.d3d11.cur_dsv = pass->d3d11.ds_att.dsv; |
9712 | } |
9713 | else { |
9714 | /* render to default frame buffer */ |
9715 | _sg.d3d11.cur_pass = 0; |
9716 | _sg.d3d11.cur_pass_id.id = SG_INVALID_ID; |
9717 | _sg.d3d11.num_rtvs = 1; |
9718 | if (_sg.d3d11.rtv_cb) { |
9719 | _sg.d3d11.cur_rtvs[0] = (ID3D11RenderTargetView*) _sg.d3d11.rtv_cb(); |
9720 | } |
9721 | else { |
9722 | _sg.d3d11.cur_rtvs[0] = (ID3D11RenderTargetView*) _sg.d3d11.rtv_userdata_cb(_sg.d3d11.user_data); |
9723 | } |
9724 | for (int i = 1; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
9725 | _sg.d3d11.cur_rtvs[i] = 0; |
9726 | } |
9727 | if (_sg.d3d11.dsv_cb) { |
9728 | _sg.d3d11.cur_dsv = (ID3D11DepthStencilView*) _sg.d3d11.dsv_cb(); |
9729 | } |
9730 | else { |
9731 | _sg.d3d11.cur_dsv = (ID3D11DepthStencilView*) _sg.d3d11.dsv_userdata_cb(_sg.d3d11.user_data); |
9732 | } |
9733 | SOKOL_ASSERT(_sg.d3d11.cur_rtvs[0] && _sg.d3d11.cur_dsv); |
9734 | } |
9735 | /* apply the render-target- and depth-stencil-views */ |
9736 | _sg_d3d11_OMSetRenderTargets(_sg.d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg.d3d11.cur_rtvs, _sg.d3d11.cur_dsv); |
9737 | |
9738 | /* set viewport and scissor rect to cover whole screen */ |
9739 | D3D11_VIEWPORT vp; |
9740 | _sg_clear(&vp, sizeof(vp)); |
9741 | vp.Width = (FLOAT) w; |
9742 | vp.Height = (FLOAT) h; |
9743 | vp.MaxDepth = 1.0f; |
9744 | _sg_d3d11_RSSetViewports(_sg.d3d11.ctx, 1, &vp); |
9745 | D3D11_RECT rect; |
9746 | rect.left = 0; |
9747 | rect.top = 0; |
9748 | rect.right = w; |
9749 | rect.bottom = h; |
9750 | _sg_d3d11_RSSetScissorRects(_sg.d3d11.ctx, 1, &rect); |
9751 | |
9752 | /* perform clear action */ |
9753 | for (int i = 0; i < _sg.d3d11.num_rtvs; i++) { |
9754 | if (action->colors[i].action == SG_ACTION_CLEAR) { |
9755 | _sg_d3d11_ClearRenderTargetView(_sg.d3d11.ctx, _sg.d3d11.cur_rtvs[i], &action->colors[i].value.r); |
9756 | } |
9757 | } |
9758 | UINT ds_flags = 0; |
9759 | if (action->depth.action == SG_ACTION_CLEAR) { |
9760 | ds_flags |= D3D11_CLEAR_DEPTH; |
9761 | } |
9762 | if (action->stencil.action == SG_ACTION_CLEAR) { |
9763 | ds_flags |= D3D11_CLEAR_STENCIL; |
9764 | } |
9765 | if ((0 != ds_flags) && _sg.d3d11.cur_dsv) { |
9766 | _sg_d3d11_ClearDepthStencilView(_sg.d3d11.ctx, _sg.d3d11.cur_dsv, ds_flags, action->depth.value, action->stencil.value); |
9767 | } |
9768 | } |
9769 | |
9770 | /* D3D11CalcSubresource only exists for C++ */ |
9771 | _SOKOL_PRIVATE UINT _sg_d3d11_calcsubresource(UINT mip_slice, UINT array_slice, UINT mip_levels) { |
9772 | return mip_slice + array_slice * mip_levels; |
9773 | } |
9774 | |
9775 | _SOKOL_PRIVATE void _sg_d3d11_end_pass(void) { |
9776 | SOKOL_ASSERT(_sg.d3d11.in_pass && _sg.d3d11.ctx); |
9777 | _sg.d3d11.in_pass = false; |
9778 | |
9779 | /* need to resolve MSAA render target into texture? */ |
9780 | if (_sg.d3d11.cur_pass) { |
9781 | SOKOL_ASSERT(_sg.d3d11.cur_pass->slot.id == _sg.d3d11.cur_pass_id.id); |
9782 | for (int i = 0; i < _sg.d3d11.num_rtvs; i++) { |
9783 | _sg_pass_attachment_t* cmn_att = &_sg.d3d11.cur_pass->cmn.color_atts[i]; |
9784 | _sg_image_t* att_img = _sg.d3d11.cur_pass->d3d11.color_atts[i].image; |
9785 | SOKOL_ASSERT(att_img && (att_img->slot.id == cmn_att->image_id.id)); |
9786 | if (att_img->cmn.sample_count > 1) { |
9787 | /* FIXME: support MSAA resolve into 3D texture */ |
9788 | SOKOL_ASSERT(att_img->d3d11.tex2d && att_img->d3d11.texmsaa && !att_img->d3d11.tex3d); |
9789 | SOKOL_ASSERT(DXGI_FORMAT_UNKNOWN != att_img->d3d11.format); |
9790 | UINT dst_subres = _sg_d3d11_calcsubresource((UINT)cmn_att->mip_level, (UINT)cmn_att->slice, (UINT)att_img->cmn.num_mipmaps); |
9791 | _sg_d3d11_ResolveSubresource(_sg.d3d11.ctx, |
9792 | (ID3D11Resource*) att_img->d3d11.tex2d, /* pDstResource */ |
9793 | dst_subres, /* DstSubresource */ |
9794 | (ID3D11Resource*) att_img->d3d11.texmsaa, /* pSrcResource */ |
9795 | 0, /* SrcSubresource */ |
9796 | att_img->d3d11.format); |
9797 | } |
9798 | } |
9799 | } |
9800 | _sg.d3d11.cur_pass = 0; |
9801 | _sg.d3d11.cur_pass_id.id = SG_INVALID_ID; |
9802 | _sg.d3d11.cur_pipeline = 0; |
9803 | _sg.d3d11.cur_pipeline_id.id = SG_INVALID_ID; |
9804 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
9805 | _sg.d3d11.cur_rtvs[i] = 0; |
9806 | } |
9807 | _sg.d3d11.cur_dsv = 0; |
9808 | _sg_d3d11_clear_state(); |
9809 | } |
9810 | |
9811 | _SOKOL_PRIVATE void _sg_d3d11_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
9812 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9813 | SOKOL_ASSERT(_sg.d3d11.in_pass); |
9814 | D3D11_VIEWPORT vp; |
9815 | vp.TopLeftX = (FLOAT) x; |
9816 | vp.TopLeftY = (FLOAT) (origin_top_left ? y : (_sg.d3d11.cur_height - (y + h))); |
9817 | vp.Width = (FLOAT) w; |
9818 | vp.Height = (FLOAT) h; |
9819 | vp.MinDepth = 0.0f; |
9820 | vp.MaxDepth = 1.0f; |
9821 | _sg_d3d11_RSSetViewports(_sg.d3d11.ctx, 1, &vp); |
9822 | } |
9823 | |
9824 | _SOKOL_PRIVATE void _sg_d3d11_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
9825 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9826 | SOKOL_ASSERT(_sg.d3d11.in_pass); |
9827 | D3D11_RECT rect; |
9828 | rect.left = x; |
9829 | rect.top = (origin_top_left ? y : (_sg.d3d11.cur_height - (y + h))); |
9830 | rect.right = x + w; |
9831 | rect.bottom = origin_top_left ? (y + h) : (_sg.d3d11.cur_height - y); |
9832 | _sg_d3d11_RSSetScissorRects(_sg.d3d11.ctx, 1, &rect); |
9833 | } |
9834 | |
9835 | _SOKOL_PRIVATE void _sg_d3d11_apply_pipeline(_sg_pipeline_t* pip) { |
9836 | SOKOL_ASSERT(pip); |
9837 | SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); |
9838 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9839 | SOKOL_ASSERT(_sg.d3d11.in_pass); |
9840 | SOKOL_ASSERT(pip->d3d11.rs && pip->d3d11.bs && pip->d3d11.dss && pip->d3d11.il); |
9841 | |
9842 | _sg.d3d11.cur_pipeline = pip; |
9843 | _sg.d3d11.cur_pipeline_id.id = pip->slot.id; |
9844 | _sg.d3d11.use_indexed_draw = (pip->d3d11.index_format != DXGI_FORMAT_UNKNOWN); |
9845 | _sg.d3d11.use_instanced_draw = pip->cmn.use_instanced_draw; |
9846 | |
9847 | _sg_d3d11_RSSetState(_sg.d3d11.ctx, pip->d3d11.rs); |
9848 | _sg_d3d11_OMSetDepthStencilState(_sg.d3d11.ctx, pip->d3d11.dss, pip->d3d11.stencil_ref); |
9849 | _sg_d3d11_OMSetBlendState(_sg.d3d11.ctx, pip->d3d11.bs, &pip->cmn.blend_color.r, 0xFFFFFFFF); |
9850 | _sg_d3d11_IASetPrimitiveTopology(_sg.d3d11.ctx, pip->d3d11.topology); |
9851 | _sg_d3d11_IASetInputLayout(_sg.d3d11.ctx, pip->d3d11.il); |
9852 | _sg_d3d11_VSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.vs, NULL, 0); |
9853 | _sg_d3d11_VSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->d3d11.stage[SG_SHADERSTAGE_VS].cbufs); |
9854 | _sg_d3d11_PSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.fs, NULL, 0); |
9855 | _sg_d3d11_PSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->d3d11.stage[SG_SHADERSTAGE_FS].cbufs); |
9856 | } |
9857 | |
9858 | _SOKOL_PRIVATE void _sg_d3d11_apply_bindings( |
9859 | _sg_pipeline_t* pip, |
9860 | _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, |
9861 | _sg_buffer_t* ib, int ib_offset, |
9862 | _sg_image_t** vs_imgs, int num_vs_imgs, |
9863 | _sg_image_t** fs_imgs, int num_fs_imgs) |
9864 | { |
9865 | SOKOL_ASSERT(pip); |
9866 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9867 | SOKOL_ASSERT(_sg.d3d11.in_pass); |
9868 | |
9869 | /* gather all the D3D11 resources into arrays */ |
9870 | ID3D11Buffer* d3d11_ib = ib ? ib->d3d11.buf : 0; |
9871 | ID3D11Buffer* d3d11_vbs[SG_MAX_SHADERSTAGE_BUFFERS]; |
9872 | UINT d3d11_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
9873 | ID3D11ShaderResourceView* d3d11_vs_srvs[SG_MAX_SHADERSTAGE_IMAGES]; |
9874 | ID3D11SamplerState* d3d11_vs_smps[SG_MAX_SHADERSTAGE_IMAGES]; |
9875 | ID3D11ShaderResourceView* d3d11_fs_srvs[SG_MAX_SHADERSTAGE_IMAGES]; |
9876 | ID3D11SamplerState* d3d11_fs_smps[SG_MAX_SHADERSTAGE_IMAGES]; |
9877 | int i; |
9878 | for (i = 0; i < num_vbs; i++) { |
9879 | SOKOL_ASSERT(vbs[i]->d3d11.buf); |
9880 | d3d11_vbs[i] = vbs[i]->d3d11.buf; |
9881 | d3d11_vb_offsets[i] = (UINT)vb_offsets[i]; |
9882 | } |
9883 | for (; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { |
9884 | d3d11_vbs[i] = 0; |
9885 | d3d11_vb_offsets[i] = 0; |
9886 | } |
9887 | for (i = 0; i < num_vs_imgs; i++) { |
9888 | SOKOL_ASSERT(vs_imgs[i]->d3d11.srv); |
9889 | SOKOL_ASSERT(vs_imgs[i]->d3d11.smp); |
9890 | d3d11_vs_srvs[i] = vs_imgs[i]->d3d11.srv; |
9891 | d3d11_vs_smps[i] = vs_imgs[i]->d3d11.smp; |
9892 | } |
9893 | for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
9894 | d3d11_vs_srvs[i] = 0; |
9895 | d3d11_vs_smps[i] = 0; |
9896 | } |
9897 | for (i = 0; i < num_fs_imgs; i++) { |
9898 | SOKOL_ASSERT(fs_imgs[i]->d3d11.srv); |
9899 | SOKOL_ASSERT(fs_imgs[i]->d3d11.smp); |
9900 | d3d11_fs_srvs[i] = fs_imgs[i]->d3d11.srv; |
9901 | d3d11_fs_smps[i] = fs_imgs[i]->d3d11.smp; |
9902 | } |
9903 | for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
9904 | d3d11_fs_srvs[i] = 0; |
9905 | d3d11_fs_smps[i] = 0; |
9906 | } |
9907 | |
9908 | _sg_d3d11_IASetVertexBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, d3d11_vbs, pip->d3d11.vb_strides, d3d11_vb_offsets); |
9909 | _sg_d3d11_IASetIndexBuffer(_sg.d3d11.ctx, d3d11_ib, pip->d3d11.index_format, (UINT)ib_offset); |
9910 | _sg_d3d11_VSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_srvs); |
9911 | _sg_d3d11_VSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_smps); |
9912 | _sg_d3d11_PSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_srvs); |
9913 | _sg_d3d11_PSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_smps); |
9914 | } |
9915 | |
9916 | _SOKOL_PRIVATE void _sg_d3d11_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
9917 | SOKOL_ASSERT(_sg.d3d11.ctx && _sg.d3d11.in_pass); |
9918 | SOKOL_ASSERT(_sg.d3d11.cur_pipeline && _sg.d3d11.cur_pipeline->slot.id == _sg.d3d11.cur_pipeline_id.id); |
9919 | SOKOL_ASSERT(_sg.d3d11.cur_pipeline->shader && _sg.d3d11.cur_pipeline->shader->slot.id == _sg.d3d11.cur_pipeline->cmn.shader_id.id); |
9920 | SOKOL_ASSERT(ub_index < _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); |
9921 | SOKOL_ASSERT(data->size == _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); |
9922 | ID3D11Buffer* cb = _sg.d3d11.cur_pipeline->shader->d3d11.stage[stage_index].cbufs[ub_index]; |
9923 | SOKOL_ASSERT(cb); |
9924 | _sg_d3d11_UpdateSubresource(_sg.d3d11.ctx, (ID3D11Resource*)cb, 0, NULL, data->ptr, 0, 0); |
9925 | } |
9926 | |
9927 | _SOKOL_PRIVATE void _sg_d3d11_draw(int base_element, int num_elements, int num_instances) { |
9928 | SOKOL_ASSERT(_sg.d3d11.in_pass); |
9929 | if (_sg.d3d11.use_indexed_draw) { |
9930 | if (_sg.d3d11.use_instanced_draw) { |
9931 | _sg_d3d11_DrawIndexedInstanced(_sg.d3d11.ctx, (UINT)num_elements, (UINT)num_instances, (UINT)base_element, 0, 0); |
9932 | } |
9933 | else { |
9934 | _sg_d3d11_DrawIndexed(_sg.d3d11.ctx, (UINT)num_elements, (UINT)base_element, 0); |
9935 | } |
9936 | } |
9937 | else { |
9938 | if (_sg.d3d11.use_instanced_draw) { |
9939 | _sg_d3d11_DrawInstanced(_sg.d3d11.ctx, (UINT)num_elements, (UINT)num_instances, (UINT)base_element, 0); |
9940 | } |
9941 | else { |
9942 | _sg_d3d11_Draw(_sg.d3d11.ctx, (UINT)num_elements, (UINT)base_element); |
9943 | } |
9944 | } |
9945 | } |
9946 | |
9947 | _SOKOL_PRIVATE void _sg_d3d11_commit(void) { |
9948 | SOKOL_ASSERT(!_sg.d3d11.in_pass); |
9949 | } |
9950 | |
9951 | _SOKOL_PRIVATE void _sg_d3d11_update_buffer(_sg_buffer_t* buf, const sg_range* data) { |
9952 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
9953 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9954 | SOKOL_ASSERT(buf->d3d11.buf); |
9955 | D3D11_MAPPED_SUBRESOURCE d3d11_msr; |
9956 | HRESULT hr = _sg_d3d11_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); |
9957 | if (SUCCEEDED(hr)) { |
9958 | memcpy(d3d11_msr.pData, data->ptr, data->size); |
9959 | _sg_d3d11_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0); |
9960 | } else { |
9961 | SG_LOG("failed to map buffer while updating!\n"); |
9962 | } |
9963 | } |
9964 | |
9965 | _SOKOL_PRIVATE int _sg_d3d11_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { |
9966 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
9967 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9968 | SOKOL_ASSERT(buf->d3d11.buf); |
9969 | D3D11_MAP map_type = new_frame ? D3D11_MAP_WRITE_DISCARD : D3D11_MAP_WRITE_NO_OVERWRITE; |
9970 | D3D11_MAPPED_SUBRESOURCE d3d11_msr; |
9971 | HRESULT hr = _sg_d3d11_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, map_type, 0, &d3d11_msr); |
9972 | if (SUCCEEDED(hr)) { |
9973 | uint8_t* dst_ptr = (uint8_t*)d3d11_msr.pData + buf->cmn.append_pos; |
9974 | memcpy(dst_ptr, data->ptr, data->size); |
9975 | _sg_d3d11_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0); |
9976 | } else { |
9977 | SG_LOG("failed to map buffer while appending!\n"); |
9978 | } |
9979 | /* NOTE: this alignment is a requirement from WebGPU, but we want identical behaviour across all backend */ |
9980 | return _sg_roundup((int)data->size, 4); |
9981 | } |
9982 | |
9983 | _SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_data* data) { |
9984 | SOKOL_ASSERT(img && data); |
9985 | SOKOL_ASSERT(_sg.d3d11.ctx); |
9986 | SOKOL_ASSERT(img->d3d11.tex2d || img->d3d11.tex3d); |
9987 | ID3D11Resource* d3d11_res = 0; |
9988 | if (img->d3d11.tex3d) { |
9989 | d3d11_res = (ID3D11Resource*) img->d3d11.tex3d; |
9990 | } |
9991 | else { |
9992 | d3d11_res = (ID3D11Resource*) img->d3d11.tex2d; |
9993 | } |
9994 | SOKOL_ASSERT(d3d11_res); |
9995 | const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; |
9996 | const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices:1; |
9997 | UINT subres_index = 0; |
9998 | HRESULT hr; |
9999 | D3D11_MAPPED_SUBRESOURCE d3d11_msr; |
10000 | for (int face_index = 0; face_index < num_faces; face_index++) { |
10001 | for (int slice_index = 0; slice_index < num_slices; slice_index++) { |
10002 | for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, subres_index++) { |
10003 | SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS)); |
10004 | const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1; |
10005 | const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1; |
10006 | const int src_pitch = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1); |
10007 | const sg_range* subimg_data = &(data->subimage[face_index][mip_index]); |
10008 | const size_t slice_size = subimg_data->size / (size_t)num_slices; |
10009 | const size_t slice_offset = slice_size * (size_t)slice_index; |
10010 | const uint8_t* slice_ptr = ((const uint8_t*)subimg_data->ptr) + slice_offset; |
10011 | hr = _sg_d3d11_Map(_sg.d3d11.ctx, d3d11_res, subres_index, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); |
10012 | if (SUCCEEDED(hr)) { |
10013 | /* FIXME: need to handle difference in depth-pitch for 3D textures as well! */ |
10014 | if (src_pitch == (int)d3d11_msr.RowPitch) { |
10015 | memcpy(d3d11_msr.pData, slice_ptr, slice_size); |
10016 | } |
10017 | else { |
10018 | SOKOL_ASSERT(src_pitch < (int)d3d11_msr.RowPitch); |
10019 | const uint8_t* src_ptr = slice_ptr; |
10020 | uint8_t* dst_ptr = (uint8_t*) d3d11_msr.pData; |
10021 | for (int row_index = 0; row_index < mip_height; row_index++) { |
10022 | memcpy(dst_ptr, src_ptr, (size_t)src_pitch); |
10023 | src_ptr += src_pitch; |
10024 | dst_ptr += d3d11_msr.RowPitch; |
10025 | } |
10026 | } |
10027 | _sg_d3d11_Unmap(_sg.d3d11.ctx, d3d11_res, subres_index); |
10028 | } else { |
10029 | SG_LOG("failed to map texture!\n"); |
10030 | } |
10031 | } |
10032 | } |
10033 | } |
10034 | } |
10035 | |
10036 | /*== METAL BACKEND IMPLEMENTATION ============================================*/ |
10037 | #elif defined(SOKOL_METAL) |
10038 | |
10039 | #if __has_feature(objc_arc) |
10040 | #define _SG_OBJC_RETAIN(obj) { } |
10041 | #define _SG_OBJC_RELEASE(obj) { obj = nil; } |
10042 | #else |
10043 | #define _SG_OBJC_RETAIN(obj) { [obj retain]; } |
10044 | #define _SG_OBJC_RELEASE(obj) { [obj release]; obj = nil; } |
10045 | #endif |
10046 | |
10047 | /*-- enum translation functions ----------------------------------------------*/ |
10048 | _SOKOL_PRIVATE MTLLoadAction _sg_mtl_load_action(sg_action a) { |
10049 | switch (a) { |
10050 | case SG_ACTION_CLEAR: return MTLLoadActionClear; |
10051 | case SG_ACTION_LOAD: return MTLLoadActionLoad; |
10052 | case SG_ACTION_DONTCARE: return MTLLoadActionDontCare; |
10053 | default: SOKOL_UNREACHABLE; return (MTLLoadAction)0; |
10054 | } |
10055 | } |
10056 | |
10057 | _SOKOL_PRIVATE MTLResourceOptions _sg_mtl_buffer_resource_options(sg_usage usg) { |
10058 | switch (usg) { |
10059 | case SG_USAGE_IMMUTABLE: |
10060 | #if defined(_SG_TARGET_MACOS) |
10061 | return MTLResourceStorageModeManaged; |
10062 | #else |
10063 | return MTLResourceStorageModeShared; |
10064 | #endif |
10065 | case SG_USAGE_DYNAMIC: |
10066 | case SG_USAGE_STREAM: |
10067 | #if defined(_SG_TARGET_MACOS) |
10068 | return MTLResourceCPUCacheModeWriteCombined|MTLResourceStorageModeManaged; |
10069 | #else |
10070 | return MTLResourceCPUCacheModeWriteCombined|MTLResourceStorageModeShared; |
10071 | #endif |
10072 | default: |
10073 | SOKOL_UNREACHABLE; |
10074 | return 0; |
10075 | } |
10076 | } |
10077 | |
10078 | _SOKOL_PRIVATE MTLVertexStepFunction _sg_mtl_step_function(sg_vertex_step step) { |
10079 | switch (step) { |
10080 | case SG_VERTEXSTEP_PER_VERTEX: return MTLVertexStepFunctionPerVertex; |
10081 | case SG_VERTEXSTEP_PER_INSTANCE: return MTLVertexStepFunctionPerInstance; |
10082 | default: SOKOL_UNREACHABLE; return (MTLVertexStepFunction)0; |
10083 | } |
10084 | } |
10085 | |
10086 | _SOKOL_PRIVATE MTLVertexFormat _sg_mtl_vertex_format(sg_vertex_format fmt) { |
10087 | switch (fmt) { |
10088 | case SG_VERTEXFORMAT_FLOAT: return MTLVertexFormatFloat; |
10089 | case SG_VERTEXFORMAT_FLOAT2: return MTLVertexFormatFloat2; |
10090 | case SG_VERTEXFORMAT_FLOAT3: return MTLVertexFormatFloat3; |
10091 | case SG_VERTEXFORMAT_FLOAT4: return MTLVertexFormatFloat4; |
10092 | case SG_VERTEXFORMAT_BYTE4: return MTLVertexFormatChar4; |
10093 | case SG_VERTEXFORMAT_BYTE4N: return MTLVertexFormatChar4Normalized; |
10094 | case SG_VERTEXFORMAT_UBYTE4: return MTLVertexFormatUChar4; |
10095 | case SG_VERTEXFORMAT_UBYTE4N: return MTLVertexFormatUChar4Normalized; |
10096 | case SG_VERTEXFORMAT_SHORT2: return MTLVertexFormatShort2; |
10097 | case SG_VERTEXFORMAT_SHORT2N: return MTLVertexFormatShort2Normalized; |
10098 | case SG_VERTEXFORMAT_USHORT2N: return MTLVertexFormatUShort2Normalized; |
10099 | case SG_VERTEXFORMAT_SHORT4: return MTLVertexFormatShort4; |
10100 | case SG_VERTEXFORMAT_SHORT4N: return MTLVertexFormatShort4Normalized; |
10101 | case SG_VERTEXFORMAT_USHORT4N: return MTLVertexFormatUShort4Normalized; |
10102 | case SG_VERTEXFORMAT_UINT10_N2: return MTLVertexFormatUInt1010102Normalized; |
10103 | default: SOKOL_UNREACHABLE; return (MTLVertexFormat)0; |
10104 | } |
10105 | } |
10106 | |
10107 | _SOKOL_PRIVATE MTLPrimitiveType _sg_mtl_primitive_type(sg_primitive_type t) { |
10108 | switch (t) { |
10109 | case SG_PRIMITIVETYPE_POINTS: return MTLPrimitiveTypePoint; |
10110 | case SG_PRIMITIVETYPE_LINES: return MTLPrimitiveTypeLine; |
10111 | case SG_PRIMITIVETYPE_LINE_STRIP: return MTLPrimitiveTypeLineStrip; |
10112 | case SG_PRIMITIVETYPE_TRIANGLES: return MTLPrimitiveTypeTriangle; |
10113 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return MTLPrimitiveTypeTriangleStrip; |
10114 | default: SOKOL_UNREACHABLE; return (MTLPrimitiveType)0; |
10115 | } |
10116 | } |
10117 | |
10118 | _SOKOL_PRIVATE MTLPixelFormat _sg_mtl_pixel_format(sg_pixel_format fmt) { |
10119 | switch (fmt) { |
10120 | case SG_PIXELFORMAT_R8: return MTLPixelFormatR8Unorm; |
10121 | case SG_PIXELFORMAT_R8SN: return MTLPixelFormatR8Snorm; |
10122 | case SG_PIXELFORMAT_R8UI: return MTLPixelFormatR8Uint; |
10123 | case SG_PIXELFORMAT_R8SI: return MTLPixelFormatR8Sint; |
10124 | case SG_PIXELFORMAT_R16: return MTLPixelFormatR16Unorm; |
10125 | case SG_PIXELFORMAT_R16SN: return MTLPixelFormatR16Snorm; |
10126 | case SG_PIXELFORMAT_R16UI: return MTLPixelFormatR16Uint; |
10127 | case SG_PIXELFORMAT_R16SI: return MTLPixelFormatR16Sint; |
10128 | case SG_PIXELFORMAT_R16F: return MTLPixelFormatR16Float; |
10129 | case SG_PIXELFORMAT_RG8: return MTLPixelFormatRG8Unorm; |
10130 | case SG_PIXELFORMAT_RG8SN: return MTLPixelFormatRG8Snorm; |
10131 | case SG_PIXELFORMAT_RG8UI: return MTLPixelFormatRG8Uint; |
10132 | case SG_PIXELFORMAT_RG8SI: return MTLPixelFormatRG8Sint; |
10133 | case SG_PIXELFORMAT_R32UI: return MTLPixelFormatR32Uint; |
10134 | case SG_PIXELFORMAT_R32SI: return MTLPixelFormatR32Sint; |
10135 | case SG_PIXELFORMAT_R32F: return MTLPixelFormatR32Float; |
10136 | case SG_PIXELFORMAT_RG16: return MTLPixelFormatRG16Unorm; |
10137 | case SG_PIXELFORMAT_RG16SN: return MTLPixelFormatRG16Snorm; |
10138 | case SG_PIXELFORMAT_RG16UI: return MTLPixelFormatRG16Uint; |
10139 | case SG_PIXELFORMAT_RG16SI: return MTLPixelFormatRG16Sint; |
10140 | case SG_PIXELFORMAT_RG16F: return MTLPixelFormatRG16Float; |
10141 | case SG_PIXELFORMAT_RGBA8: return MTLPixelFormatRGBA8Unorm; |
10142 | case SG_PIXELFORMAT_RGBA8SN: return MTLPixelFormatRGBA8Snorm; |
10143 | case SG_PIXELFORMAT_RGBA8UI: return MTLPixelFormatRGBA8Uint; |
10144 | case SG_PIXELFORMAT_RGBA8SI: return MTLPixelFormatRGBA8Sint; |
10145 | case SG_PIXELFORMAT_BGRA8: return MTLPixelFormatBGRA8Unorm; |
10146 | case SG_PIXELFORMAT_RGB10A2: return MTLPixelFormatRGB10A2Unorm; |
10147 | case SG_PIXELFORMAT_RG11B10F: return MTLPixelFormatRG11B10Float; |
10148 | case SG_PIXELFORMAT_RGB9E5: return MTLPixelFormatRGB9E5Float; |
10149 | case SG_PIXELFORMAT_RG32UI: return MTLPixelFormatRG32Uint; |
10150 | case SG_PIXELFORMAT_RG32SI: return MTLPixelFormatRG32Sint; |
10151 | case SG_PIXELFORMAT_RG32F: return MTLPixelFormatRG32Float; |
10152 | case SG_PIXELFORMAT_RGBA16: return MTLPixelFormatRGBA16Unorm; |
10153 | case SG_PIXELFORMAT_RGBA16SN: return MTLPixelFormatRGBA16Snorm; |
10154 | case SG_PIXELFORMAT_RGBA16UI: return MTLPixelFormatRGBA16Uint; |
10155 | case SG_PIXELFORMAT_RGBA16SI: return MTLPixelFormatRGBA16Sint; |
10156 | case SG_PIXELFORMAT_RGBA16F: return MTLPixelFormatRGBA16Float; |
10157 | case SG_PIXELFORMAT_RGBA32UI: return MTLPixelFormatRGBA32Uint; |
10158 | case SG_PIXELFORMAT_RGBA32SI: return MTLPixelFormatRGBA32Sint; |
10159 | case SG_PIXELFORMAT_RGBA32F: return MTLPixelFormatRGBA32Float; |
10160 | case SG_PIXELFORMAT_DEPTH: return MTLPixelFormatDepth32Float; |
10161 | case SG_PIXELFORMAT_DEPTH_STENCIL: return MTLPixelFormatDepth32Float_Stencil8; |
10162 | #if defined(_SG_TARGET_MACOS) |
10163 | case SG_PIXELFORMAT_BC1_RGBA: return MTLPixelFormatBC1_RGBA; |
10164 | case SG_PIXELFORMAT_BC2_RGBA: return MTLPixelFormatBC2_RGBA; |
10165 | case SG_PIXELFORMAT_BC3_RGBA: return MTLPixelFormatBC3_RGBA; |
10166 | case SG_PIXELFORMAT_BC4_R: return MTLPixelFormatBC4_RUnorm; |
10167 | case SG_PIXELFORMAT_BC4_RSN: return MTLPixelFormatBC4_RSnorm; |
10168 | case SG_PIXELFORMAT_BC5_RG: return MTLPixelFormatBC5_RGUnorm; |
10169 | case SG_PIXELFORMAT_BC5_RGSN: return MTLPixelFormatBC5_RGSnorm; |
10170 | case SG_PIXELFORMAT_BC6H_RGBF: return MTLPixelFormatBC6H_RGBFloat; |
10171 | case SG_PIXELFORMAT_BC6H_RGBUF: return MTLPixelFormatBC6H_RGBUfloat; |
10172 | case SG_PIXELFORMAT_BC7_RGBA: return MTLPixelFormatBC7_RGBAUnorm; |
10173 | #else |
10174 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: return MTLPixelFormatPVRTC_RGB_2BPP; |
10175 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: return MTLPixelFormatPVRTC_RGB_4BPP; |
10176 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: return MTLPixelFormatPVRTC_RGBA_2BPP; |
10177 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: return MTLPixelFormatPVRTC_RGBA_4BPP; |
10178 | case SG_PIXELFORMAT_ETC2_RGB8: return MTLPixelFormatETC2_RGB8; |
10179 | case SG_PIXELFORMAT_ETC2_RGB8A1: return MTLPixelFormatETC2_RGB8A1; |
10180 | case SG_PIXELFORMAT_ETC2_RGBA8: return MTLPixelFormatEAC_RGBA8; |
10181 | case SG_PIXELFORMAT_ETC2_RG11: return MTLPixelFormatEAC_RG11Unorm; |
10182 | case SG_PIXELFORMAT_ETC2_RG11SN: return MTLPixelFormatEAC_RG11Snorm; |
10183 | #endif |
10184 | default: return MTLPixelFormatInvalid; |
10185 | } |
10186 | } |
10187 | |
10188 | _SOKOL_PRIVATE MTLColorWriteMask _sg_mtl_color_write_mask(sg_color_mask m) { |
10189 | MTLColorWriteMask mtl_mask = MTLColorWriteMaskNone; |
10190 | if (m & SG_COLORMASK_R) { |
10191 | mtl_mask |= MTLColorWriteMaskRed; |
10192 | } |
10193 | if (m & SG_COLORMASK_G) { |
10194 | mtl_mask |= MTLColorWriteMaskGreen; |
10195 | } |
10196 | if (m & SG_COLORMASK_B) { |
10197 | mtl_mask |= MTLColorWriteMaskBlue; |
10198 | } |
10199 | if (m & SG_COLORMASK_A) { |
10200 | mtl_mask |= MTLColorWriteMaskAlpha; |
10201 | } |
10202 | return mtl_mask; |
10203 | } |
10204 | |
10205 | _SOKOL_PRIVATE MTLBlendOperation _sg_mtl_blend_op(sg_blend_op op) { |
10206 | switch (op) { |
10207 | case SG_BLENDOP_ADD: return MTLBlendOperationAdd; |
10208 | case SG_BLENDOP_SUBTRACT: return MTLBlendOperationSubtract; |
10209 | case SG_BLENDOP_REVERSE_SUBTRACT: return MTLBlendOperationReverseSubtract; |
10210 | default: SOKOL_UNREACHABLE; return (MTLBlendOperation)0; |
10211 | } |
10212 | } |
10213 | |
10214 | _SOKOL_PRIVATE MTLBlendFactor _sg_mtl_blend_factor(sg_blend_factor f) { |
10215 | switch (f) { |
10216 | case SG_BLENDFACTOR_ZERO: return MTLBlendFactorZero; |
10217 | case SG_BLENDFACTOR_ONE: return MTLBlendFactorOne; |
10218 | case SG_BLENDFACTOR_SRC_COLOR: return MTLBlendFactorSourceColor; |
10219 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return MTLBlendFactorOneMinusSourceColor; |
10220 | case SG_BLENDFACTOR_SRC_ALPHA: return MTLBlendFactorSourceAlpha; |
10221 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return MTLBlendFactorOneMinusSourceAlpha; |
10222 | case SG_BLENDFACTOR_DST_COLOR: return MTLBlendFactorDestinationColor; |
10223 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return MTLBlendFactorOneMinusDestinationColor; |
10224 | case SG_BLENDFACTOR_DST_ALPHA: return MTLBlendFactorDestinationAlpha; |
10225 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return MTLBlendFactorOneMinusDestinationAlpha; |
10226 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return MTLBlendFactorSourceAlphaSaturated; |
10227 | case SG_BLENDFACTOR_BLEND_COLOR: return MTLBlendFactorBlendColor; |
10228 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return MTLBlendFactorOneMinusBlendColor; |
10229 | case SG_BLENDFACTOR_BLEND_ALPHA: return MTLBlendFactorBlendAlpha; |
10230 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return MTLBlendFactorOneMinusBlendAlpha; |
10231 | default: SOKOL_UNREACHABLE; return (MTLBlendFactor)0; |
10232 | } |
10233 | } |
10234 | |
10235 | _SOKOL_PRIVATE MTLCompareFunction _sg_mtl_compare_func(sg_compare_func f) { |
10236 | switch (f) { |
10237 | case SG_COMPAREFUNC_NEVER: return MTLCompareFunctionNever; |
10238 | case SG_COMPAREFUNC_LESS: return MTLCompareFunctionLess; |
10239 | case SG_COMPAREFUNC_EQUAL: return MTLCompareFunctionEqual; |
10240 | case SG_COMPAREFUNC_LESS_EQUAL: return MTLCompareFunctionLessEqual; |
10241 | case SG_COMPAREFUNC_GREATER: return MTLCompareFunctionGreater; |
10242 | case SG_COMPAREFUNC_NOT_EQUAL: return MTLCompareFunctionNotEqual; |
10243 | case SG_COMPAREFUNC_GREATER_EQUAL: return MTLCompareFunctionGreaterEqual; |
10244 | case SG_COMPAREFUNC_ALWAYS: return MTLCompareFunctionAlways; |
10245 | default: SOKOL_UNREACHABLE; return (MTLCompareFunction)0; |
10246 | } |
10247 | } |
10248 | |
10249 | _SOKOL_PRIVATE MTLStencilOperation _sg_mtl_stencil_op(sg_stencil_op op) { |
10250 | switch (op) { |
10251 | case SG_STENCILOP_KEEP: return MTLStencilOperationKeep; |
10252 | case SG_STENCILOP_ZERO: return MTLStencilOperationZero; |
10253 | case SG_STENCILOP_REPLACE: return MTLStencilOperationReplace; |
10254 | case SG_STENCILOP_INCR_CLAMP: return MTLStencilOperationIncrementClamp; |
10255 | case SG_STENCILOP_DECR_CLAMP: return MTLStencilOperationDecrementClamp; |
10256 | case SG_STENCILOP_INVERT: return MTLStencilOperationInvert; |
10257 | case SG_STENCILOP_INCR_WRAP: return MTLStencilOperationIncrementWrap; |
10258 | case SG_STENCILOP_DECR_WRAP: return MTLStencilOperationDecrementWrap; |
10259 | default: SOKOL_UNREACHABLE; return (MTLStencilOperation)0; |
10260 | } |
10261 | } |
10262 | |
10263 | _SOKOL_PRIVATE MTLCullMode _sg_mtl_cull_mode(sg_cull_mode m) { |
10264 | switch (m) { |
10265 | case SG_CULLMODE_NONE: return MTLCullModeNone; |
10266 | case SG_CULLMODE_FRONT: return MTLCullModeFront; |
10267 | case SG_CULLMODE_BACK: return MTLCullModeBack; |
10268 | default: SOKOL_UNREACHABLE; return (MTLCullMode)0; |
10269 | } |
10270 | } |
10271 | |
10272 | _SOKOL_PRIVATE MTLWinding _sg_mtl_winding(sg_face_winding w) { |
10273 | switch (w) { |
10274 | case SG_FACEWINDING_CW: return MTLWindingClockwise; |
10275 | case SG_FACEWINDING_CCW: return MTLWindingCounterClockwise; |
10276 | default: SOKOL_UNREACHABLE; return (MTLWinding)0; |
10277 | } |
10278 | } |
10279 | |
10280 | _SOKOL_PRIVATE MTLIndexType _sg_mtl_index_type(sg_index_type t) { |
10281 | switch (t) { |
10282 | case SG_INDEXTYPE_UINT16: return MTLIndexTypeUInt16; |
10283 | case SG_INDEXTYPE_UINT32: return MTLIndexTypeUInt32; |
10284 | default: SOKOL_UNREACHABLE; return (MTLIndexType)0; |
10285 | } |
10286 | } |
10287 | |
10288 | _SOKOL_PRIVATE int _sg_mtl_index_size(sg_index_type t) { |
10289 | switch (t) { |
10290 | case SG_INDEXTYPE_NONE: return 0; |
10291 | case SG_INDEXTYPE_UINT16: return 2; |
10292 | case SG_INDEXTYPE_UINT32: return 4; |
10293 | default: SOKOL_UNREACHABLE; return 0; |
10294 | } |
10295 | } |
10296 | |
10297 | _SOKOL_PRIVATE MTLTextureType _sg_mtl_texture_type(sg_image_type t) { |
10298 | switch (t) { |
10299 | case SG_IMAGETYPE_2D: return MTLTextureType2D; |
10300 | case SG_IMAGETYPE_CUBE: return MTLTextureTypeCube; |
10301 | case SG_IMAGETYPE_3D: return MTLTextureType3D; |
10302 | case SG_IMAGETYPE_ARRAY: return MTLTextureType2DArray; |
10303 | default: SOKOL_UNREACHABLE; return (MTLTextureType)0; |
10304 | } |
10305 | } |
10306 | |
10307 | _SOKOL_PRIVATE bool _sg_mtl_is_pvrtc(sg_pixel_format fmt) { |
10308 | switch (fmt) { |
10309 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: |
10310 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: |
10311 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: |
10312 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: |
10313 | return true; |
10314 | default: |
10315 | return false; |
10316 | } |
10317 | } |
10318 | |
10319 | _SOKOL_PRIVATE MTLSamplerAddressMode _sg_mtl_address_mode(sg_wrap w) { |
10320 | switch (w) { |
10321 | case SG_WRAP_REPEAT: return MTLSamplerAddressModeRepeat; |
10322 | case SG_WRAP_CLAMP_TO_EDGE: return MTLSamplerAddressModeClampToEdge; |
10323 | #if defined(_SG_TARGET_MACOS) |
10324 | case SG_WRAP_CLAMP_TO_BORDER: return MTLSamplerAddressModeClampToBorderColor; |
10325 | #else |
10326 | /* clamp-to-border not supported on iOS, fall back to clamp-to-edge */ |
10327 | case SG_WRAP_CLAMP_TO_BORDER: return MTLSamplerAddressModeClampToEdge; |
10328 | #endif |
10329 | case SG_WRAP_MIRRORED_REPEAT: return MTLSamplerAddressModeMirrorRepeat; |
10330 | default: SOKOL_UNREACHABLE; return (MTLSamplerAddressMode)0; |
10331 | } |
10332 | } |
10333 | |
10334 | #if defined(_SG_TARGET_MACOS) |
10335 | _SOKOL_PRIVATE MTLSamplerBorderColor _sg_mtl_border_color(sg_border_color c) { |
10336 | switch (c) { |
10337 | case SG_BORDERCOLOR_TRANSPARENT_BLACK: return MTLSamplerBorderColorTransparentBlack; |
10338 | case SG_BORDERCOLOR_OPAQUE_BLACK: return MTLSamplerBorderColorOpaqueBlack; |
10339 | case SG_BORDERCOLOR_OPAQUE_WHITE: return MTLSamplerBorderColorOpaqueWhite; |
10340 | default: SOKOL_UNREACHABLE; return (MTLSamplerBorderColor)0; |
10341 | } |
10342 | } |
10343 | #endif |
10344 | |
10345 | _SOKOL_PRIVATE MTLSamplerMinMagFilter _sg_mtl_minmag_filter(sg_filter f) { |
10346 | switch (f) { |
10347 | case SG_FILTER_NEAREST: |
10348 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
10349 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
10350 | return MTLSamplerMinMagFilterNearest; |
10351 | case SG_FILTER_LINEAR: |
10352 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
10353 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
10354 | return MTLSamplerMinMagFilterLinear; |
10355 | default: |
10356 | SOKOL_UNREACHABLE; return (MTLSamplerMinMagFilter)0; |
10357 | } |
10358 | } |
10359 | |
10360 | _SOKOL_PRIVATE MTLSamplerMipFilter _sg_mtl_mip_filter(sg_filter f) { |
10361 | switch (f) { |
10362 | case SG_FILTER_NEAREST: |
10363 | case SG_FILTER_LINEAR: |
10364 | return MTLSamplerMipFilterNotMipmapped; |
10365 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
10366 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
10367 | return MTLSamplerMipFilterNearest; |
10368 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
10369 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
10370 | return MTLSamplerMipFilterLinear; |
10371 | default: |
10372 | SOKOL_UNREACHABLE; return (MTLSamplerMipFilter)0; |
10373 | } |
10374 | } |
10375 | |
10376 | /*-- a pool for all Metal resource objects, with deferred release queue -------*/ |
10377 | |
10378 | _SOKOL_PRIVATE void _sg_mtl_init_pool(const sg_desc* desc) { |
10379 | _sg.mtl.idpool.num_slots = 2 * |
10380 | ( |
10381 | 2 * desc->buffer_pool_size + |
10382 | 5 * desc->image_pool_size + |
10383 | 4 * desc->shader_pool_size + |
10384 | 2 * desc->pipeline_pool_size + |
10385 | desc->pass_pool_size |
10386 | ); |
10387 | _sg.mtl.idpool.pool = [NSMutableArray arrayWithCapacity:(NSUInteger)_sg.mtl.idpool.num_slots]; |
10388 | _SG_OBJC_RETAIN(_sg.mtl.idpool.pool); |
10389 | NSNull* null = [NSNull null]; |
10390 | for (int i = 0; i < _sg.mtl.idpool.num_slots; i++) { |
10391 | [_sg.mtl.idpool.pool addObject:null]; |
10392 | } |
10393 | SOKOL_ASSERT([_sg.mtl.idpool.pool count] == (NSUInteger)_sg.mtl.idpool.num_slots); |
10394 | /* a queue of currently free slot indices */ |
10395 | _sg.mtl.idpool.free_queue_top = 0; |
10396 | _sg.mtl.idpool.free_queue = (int*)_sg_malloc_clear((size_t)_sg.mtl.idpool.num_slots * sizeof(int)); |
10397 | /* pool slot 0 is reserved! */ |
10398 | for (int i = _sg.mtl.idpool.num_slots-1; i >= 1; i--) { |
10399 | _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = i; |
10400 | } |
10401 | /* a circular queue which holds release items (frame index |
10402 | when a resource is to be released, and the resource's |
10403 | pool index |
10404 | */ |
10405 | _sg.mtl.idpool.release_queue_front = 0; |
10406 | _sg.mtl.idpool.release_queue_back = 0; |
10407 | _sg.mtl.idpool.release_queue = (_sg_mtl_release_item_t*)_sg_malloc_clear((size_t)_sg.mtl.idpool.num_slots * sizeof(_sg_mtl_release_item_t)); |
10408 | for (int i = 0; i < _sg.mtl.idpool.num_slots; i++) { |
10409 | _sg.mtl.idpool.release_queue[i].frame_index = 0; |
10410 | _sg.mtl.idpool.release_queue[i].slot_index = _SG_MTL_INVALID_SLOT_INDEX; |
10411 | } |
10412 | } |
10413 | |
10414 | _SOKOL_PRIVATE void _sg_mtl_destroy_pool(void) { |
10415 | _sg_free(_sg.mtl.idpool.release_queue); _sg.mtl.idpool.release_queue = 0; |
10416 | _sg_free(_sg.mtl.idpool.free_queue); _sg.mtl.idpool.free_queue = 0; |
10417 | _SG_OBJC_RELEASE(_sg.mtl.idpool.pool); |
10418 | } |
10419 | |
10420 | /* get a new free resource pool slot */ |
10421 | _SOKOL_PRIVATE int _sg_mtl_alloc_pool_slot(void) { |
10422 | SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top > 0); |
10423 | const int slot_index = _sg.mtl.idpool.free_queue[--_sg.mtl.idpool.free_queue_top]; |
10424 | SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); |
10425 | return slot_index; |
10426 | } |
10427 | |
10428 | /* put a free resource pool slot back into the free-queue */ |
10429 | _SOKOL_PRIVATE void _sg_mtl_free_pool_slot(int slot_index) { |
10430 | SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top < _sg.mtl.idpool.num_slots); |
10431 | SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); |
10432 | _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = slot_index; |
10433 | } |
10434 | |
10435 | /* add an MTLResource to the pool, return pool index or 0 if input was 'nil' */ |
10436 | _SOKOL_PRIVATE int _sg_mtl_add_resource(id res) { |
10437 | if (nil == res) { |
10438 | return _SG_MTL_INVALID_SLOT_INDEX; |
10439 | } |
10440 | const int slot_index = _sg_mtl_alloc_pool_slot(); |
10441 | // NOTE: the NSMutableArray will take ownership of its items |
10442 | SOKOL_ASSERT([NSNull null] == _sg.mtl.idpool.pool[(NSUInteger)slot_index]); |
10443 | _sg.mtl.idpool.pool[(NSUInteger)slot_index] = res; |
10444 | return slot_index; |
10445 | } |
10446 | |
10447 | /* mark an MTLResource for release, this will put the resource into the |
10448 | deferred-release queue, and the resource will then be released N frames later, |
10449 | the special pool index 0 will be ignored (this means that a nil |
10450 | value was provided to _sg_mtl_add_resource() |
10451 | */ |
10452 | _SOKOL_PRIVATE void _sg_mtl_release_resource(uint32_t frame_index, int slot_index) { |
10453 | if (slot_index == _SG_MTL_INVALID_SLOT_INDEX) { |
10454 | return; |
10455 | } |
10456 | SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); |
10457 | SOKOL_ASSERT([NSNull null] != _sg.mtl.idpool.pool[(NSUInteger)slot_index]); |
10458 | int release_index = _sg.mtl.idpool.release_queue_front++; |
10459 | if (_sg.mtl.idpool.release_queue_front >= _sg.mtl.idpool.num_slots) { |
10460 | /* wrap-around */ |
10461 | _sg.mtl.idpool.release_queue_front = 0; |
10462 | } |
10463 | /* release queue full? */ |
10464 | SOKOL_ASSERT(_sg.mtl.idpool.release_queue_front != _sg.mtl.idpool.release_queue_back); |
10465 | SOKOL_ASSERT(0 == _sg.mtl.idpool.release_queue[release_index].frame_index); |
10466 | const uint32_t safe_to_release_frame_index = frame_index + SG_NUM_INFLIGHT_FRAMES + 1; |
10467 | _sg.mtl.idpool.release_queue[release_index].frame_index = safe_to_release_frame_index; |
10468 | _sg.mtl.idpool.release_queue[release_index].slot_index = slot_index; |
10469 | } |
10470 | |
10471 | /* run garbage-collection pass on all resources in the release-queue */ |
10472 | _SOKOL_PRIVATE void _sg_mtl_garbage_collect(uint32_t frame_index) { |
10473 | while (_sg.mtl.idpool.release_queue_back != _sg.mtl.idpool.release_queue_front) { |
10474 | if (frame_index < _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].frame_index) { |
10475 | /* don't need to check further, release-items past this are too young */ |
10476 | break; |
10477 | } |
10478 | /* safe to release this resource */ |
10479 | const int slot_index = _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index; |
10480 | SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); |
10481 | /* note: the NSMutableArray takes ownership of its items, assigning an NSNull object will |
10482 | release the object, no matter if using ARC or not |
10483 | */ |
10484 | SOKOL_ASSERT(_sg.mtl.idpool.pool[(NSUInteger)slot_index] != [NSNull null]); |
10485 | _sg.mtl.idpool.pool[(NSUInteger)slot_index] = [NSNull null]; |
10486 | /* put the now free pool index back on the free queue */ |
10487 | _sg_mtl_free_pool_slot(slot_index); |
10488 | /* reset the release queue slot and advance the back index */ |
10489 | _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].frame_index = 0; |
10490 | _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index = _SG_MTL_INVALID_SLOT_INDEX; |
10491 | _sg.mtl.idpool.release_queue_back++; |
10492 | if (_sg.mtl.idpool.release_queue_back >= _sg.mtl.idpool.num_slots) { |
10493 | /* wrap-around */ |
10494 | _sg.mtl.idpool.release_queue_back = 0; |
10495 | } |
10496 | } |
10497 | } |
10498 | |
10499 | _SOKOL_PRIVATE id _sg_mtl_id(int slot_index) { |
10500 | return _sg.mtl.idpool.pool[(NSUInteger)slot_index]; |
10501 | } |
10502 | |
10503 | _SOKOL_PRIVATE void _sg_mtl_init_sampler_cache(const sg_desc* desc) { |
10504 | SOKOL_ASSERT(desc->sampler_cache_size > 0); |
10505 | _sg_smpcache_init(&_sg.mtl.sampler_cache, desc->sampler_cache_size); |
10506 | } |
10507 | |
10508 | /* destroy the sampler cache, and release all sampler objects */ |
10509 | _SOKOL_PRIVATE void _sg_mtl_destroy_sampler_cache(uint32_t frame_index) { |
10510 | SOKOL_ASSERT(_sg.mtl.sampler_cache.items); |
10511 | SOKOL_ASSERT(_sg.mtl.sampler_cache.num_items <= _sg.mtl.sampler_cache.capacity); |
10512 | for (int i = 0; i < _sg.mtl.sampler_cache.num_items; i++) { |
10513 | _sg_mtl_release_resource(frame_index, (int)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, i)); |
10514 | } |
10515 | _sg_smpcache_discard(&_sg.mtl.sampler_cache); |
10516 | } |
10517 | |
10518 | /* |
10519 | create and add an MTLSamplerStateObject and return its resource pool index, |
10520 | reuse identical sampler state if one exists |
10521 | */ |
10522 | _SOKOL_PRIVATE int _sg_mtl_create_sampler(id<MTLDevice> mtl_device, const sg_image_desc* img_desc) { |
10523 | SOKOL_ASSERT(img_desc); |
10524 | int index = _sg_smpcache_find_item(&_sg.mtl.sampler_cache, img_desc); |
10525 | if (index >= 0) { |
10526 | /* reuse existing sampler */ |
10527 | return (int)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, index); |
10528 | } |
10529 | else { |
10530 | /* create a new Metal sampler state object and add to sampler cache */ |
10531 | MTLSamplerDescriptor* mtl_desc = [[MTLSamplerDescriptor alloc] init]; |
10532 | mtl_desc.sAddressMode = _sg_mtl_address_mode(img_desc->wrap_u); |
10533 | mtl_desc.tAddressMode = _sg_mtl_address_mode(img_desc->wrap_v); |
10534 | if (SG_IMAGETYPE_3D == img_desc->type) { |
10535 | mtl_desc.rAddressMode = _sg_mtl_address_mode(img_desc->wrap_w); |
10536 | } |
10537 | #if defined(_SG_TARGET_MACOS) |
10538 | mtl_desc.borderColor = _sg_mtl_border_color(img_desc->border_color); |
10539 | #endif |
10540 | mtl_desc.minFilter = _sg_mtl_minmag_filter(img_desc->min_filter); |
10541 | mtl_desc.magFilter = _sg_mtl_minmag_filter(img_desc->mag_filter); |
10542 | mtl_desc.mipFilter = _sg_mtl_mip_filter(img_desc->min_filter); |
10543 | mtl_desc.lodMinClamp = img_desc->min_lod; |
10544 | mtl_desc.lodMaxClamp = img_desc->max_lod; |
10545 | mtl_desc.maxAnisotropy = img_desc->max_anisotropy; |
10546 | mtl_desc.normalizedCoordinates = YES; |
10547 | id<MTLSamplerState> mtl_sampler = [mtl_device newSamplerStateWithDescriptor:mtl_desc]; |
10548 | _SG_OBJC_RELEASE(mtl_desc); |
10549 | int sampler_handle = _sg_mtl_add_resource(mtl_sampler); |
10550 | _SG_OBJC_RELEASE(mtl_sampler); |
10551 | _sg_smpcache_add_item(&_sg.mtl.sampler_cache, img_desc, (uintptr_t)sampler_handle); |
10552 | return sampler_handle; |
10553 | } |
10554 | } |
10555 | |
10556 | _SOKOL_PRIVATE void _sg_mtl_clear_state_cache(void) { |
10557 | _sg_clear(&_sg.mtl.state_cache, sizeof(_sg.mtl.state_cache)); |
10558 | } |
10559 | |
10560 | /* https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf */ |
10561 | _SOKOL_PRIVATE void _sg_mtl_init_caps(void) { |
10562 | #if defined(_SG_TARGET_MACOS) |
10563 | _sg.backend = SG_BACKEND_METAL_MACOS; |
10564 | #elif defined(_SG_TARGET_IOS) |
10565 | #if defined(_SG_TARGET_IOS_SIMULATOR) |
10566 | _sg.backend = SG_BACKEND_METAL_SIMULATOR; |
10567 | #else |
10568 | _sg.backend = SG_BACKEND_METAL_IOS; |
10569 | #endif |
10570 | #endif |
10571 | _sg.features.instancing = true; |
10572 | _sg.features.origin_top_left = true; |
10573 | _sg.features.multiple_render_targets = true; |
10574 | _sg.features.msaa_render_targets = true; |
10575 | _sg.features.imagetype_3d = true; |
10576 | _sg.features.imagetype_array = true; |
10577 | #if defined(_SG_TARGET_MACOS) |
10578 | _sg.features.image_clamp_to_border = true; |
10579 | #else |
10580 | _sg.features.image_clamp_to_border = false; |
10581 | #endif |
10582 | _sg.features.mrt_independent_blend_state = true; |
10583 | _sg.features.mrt_independent_write_mask = true; |
10584 | |
10585 | #if defined(_SG_TARGET_MACOS) |
10586 | _sg.limits.max_image_size_2d = 16 * 1024; |
10587 | _sg.limits.max_image_size_cube = 16 * 1024; |
10588 | _sg.limits.max_image_size_3d = 2 * 1024; |
10589 | _sg.limits.max_image_size_array = 16 * 1024; |
10590 | _sg.limits.max_image_array_layers = 2 * 1024; |
10591 | #else |
10592 | /* newer iOS devices support 16k textures */ |
10593 | _sg.limits.max_image_size_2d = 8 * 1024; |
10594 | _sg.limits.max_image_size_cube = 8 * 1024; |
10595 | _sg.limits.max_image_size_3d = 2 * 1024; |
10596 | _sg.limits.max_image_size_array = 8 * 1024; |
10597 | _sg.limits.max_image_array_layers = 2 * 1024; |
10598 | #endif |
10599 | _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES; |
10600 | |
10601 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]); |
10602 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8SN]); |
10603 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]); |
10604 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]); |
10605 | #if defined(_SG_TARGET_MACOS) |
10606 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16]); |
10607 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16SN]); |
10608 | #else |
10609 | _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_R16]); |
10610 | _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_R16SN]); |
10611 | #endif |
10612 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]); |
10613 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]); |
10614 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]); |
10615 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]); |
10616 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8SN]); |
10617 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]); |
10618 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]); |
10619 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]); |
10620 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]); |
10621 | #if defined(_SG_TARGET_MACOS) |
10622 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R32F]); |
10623 | #else |
10624 | _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_R32F]); |
10625 | #endif |
10626 | #if defined(_SG_TARGET_MACOS) |
10627 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16]); |
10628 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16SN]); |
10629 | #else |
10630 | _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RG16]); |
10631 | _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RG16SN]); |
10632 | #endif |
10633 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]); |
10634 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]); |
10635 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]); |
10636 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]); |
10637 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]); |
10638 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]); |
10639 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]); |
10640 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]); |
10641 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]); |
10642 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG11B10F]); |
10643 | #if defined(_SG_TARGET_MACOS) |
10644 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGB9E5]); |
10645 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32UI]); |
10646 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32SI]); |
10647 | #else |
10648 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB9E5]); |
10649 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32UI]); |
10650 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32SI]); |
10651 | #endif |
10652 | #if defined(_SG_TARGET_MACOS) |
10653 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
10654 | #else |
10655 | _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
10656 | #endif |
10657 | #if defined(_SG_TARGET_MACOS) |
10658 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16]); |
10659 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]); |
10660 | #else |
10661 | _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RGBA16]); |
10662 | _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]); |
10663 | #endif |
10664 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]); |
10665 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]); |
10666 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
10667 | #if defined(_SG_TARGET_MACOS) |
10668 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); |
10669 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); |
10670 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
10671 | #else |
10672 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); |
10673 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); |
10674 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
10675 | #endif |
10676 | _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]); |
10677 | _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]); |
10678 | #if defined(_SG_TARGET_MACOS) |
10679 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]); |
10680 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]); |
10681 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]); |
10682 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]); |
10683 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]); |
10684 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]); |
10685 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]); |
10686 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]); |
10687 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]); |
10688 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]); |
10689 | #else |
10690 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_2BPP]); |
10691 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_4BPP]); |
10692 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_2BPP]); |
10693 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_4BPP]); |
10694 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8]); |
10695 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8A1]); |
10696 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGBA8]); |
10697 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11]); |
10698 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11SN]); |
10699 | #endif |
10700 | } |
10701 | |
10702 | /*-- main Metal backend state and functions ----------------------------------*/ |
10703 | _SOKOL_PRIVATE void _sg_mtl_setup_backend(const sg_desc* desc) { |
10704 | /* assume already zero-initialized */ |
10705 | SOKOL_ASSERT(desc); |
10706 | SOKOL_ASSERT(desc->context.metal.device); |
10707 | SOKOL_ASSERT(desc->context.metal.renderpass_descriptor_cb || desc->context.metal.renderpass_descriptor_userdata_cb); |
10708 | SOKOL_ASSERT(desc->context.metal.drawable_cb || desc->context.metal.drawable_userdata_cb); |
10709 | SOKOL_ASSERT(desc->uniform_buffer_size > 0); |
10710 | _sg_mtl_init_pool(desc); |
10711 | _sg_mtl_init_sampler_cache(desc); |
10712 | _sg_mtl_clear_state_cache(); |
10713 | _sg.mtl.valid = true; |
10714 | _sg.mtl.renderpass_descriptor_cb = desc->context.metal.renderpass_descriptor_cb; |
10715 | _sg.mtl.renderpass_descriptor_userdata_cb = desc->context.metal.renderpass_descriptor_userdata_cb; |
10716 | _sg.mtl.drawable_cb = desc->context.metal.drawable_cb; |
10717 | _sg.mtl.drawable_userdata_cb = desc->context.metal.drawable_userdata_cb; |
10718 | _sg.mtl.user_data = desc->context.metal.user_data; |
10719 | _sg.mtl.frame_index = 1; |
10720 | _sg.mtl.ub_size = desc->uniform_buffer_size; |
10721 | _sg.mtl.sem = dispatch_semaphore_create(SG_NUM_INFLIGHT_FRAMES); |
10722 | _sg.mtl.device = (__bridge id<MTLDevice>) desc->context.metal.device; |
10723 | _sg.mtl.cmd_queue = [_sg.mtl.device newCommandQueue]; |
10724 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
10725 | _sg.mtl.uniform_buffers[i] = [_sg.mtl.device |
10726 | newBufferWithLength:(NSUInteger)_sg.mtl.ub_size |
10727 | options:MTLResourceCPUCacheModeWriteCombined|MTLResourceStorageModeShared |
10728 | ]; |
10729 | } |
10730 | _sg_mtl_init_caps(); |
10731 | } |
10732 | |
10733 | _SOKOL_PRIVATE void _sg_mtl_discard_backend(void) { |
10734 | SOKOL_ASSERT(_sg.mtl.valid); |
10735 | /* wait for the last frame to finish */ |
10736 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
10737 | dispatch_semaphore_wait(_sg.mtl.sem, DISPATCH_TIME_FOREVER); |
10738 | } |
10739 | /* semaphore must be "relinquished" before destruction */ |
10740 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
10741 | dispatch_semaphore_signal(_sg.mtl.sem); |
10742 | } |
10743 | _sg_mtl_destroy_sampler_cache(_sg.mtl.frame_index); |
10744 | _sg_mtl_garbage_collect(_sg.mtl.frame_index + SG_NUM_INFLIGHT_FRAMES + 2); |
10745 | _sg_mtl_destroy_pool(); |
10746 | _sg.mtl.valid = false; |
10747 | |
10748 | _SG_OBJC_RELEASE(_sg.mtl.sem); |
10749 | _SG_OBJC_RELEASE(_sg.mtl.device); |
10750 | _SG_OBJC_RELEASE(_sg.mtl.cmd_queue); |
10751 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
10752 | _SG_OBJC_RELEASE(_sg.mtl.uniform_buffers[i]); |
10753 | } |
10754 | /* NOTE: MTLCommandBuffer and MTLRenderCommandEncoder are auto-released */ |
10755 | _sg.mtl.cmd_buffer = nil; |
10756 | _sg.mtl.present_cmd_buffer = nil; |
10757 | _sg.mtl.cmd_encoder = nil; |
10758 | } |
10759 | |
10760 | _SOKOL_PRIVATE void _sg_mtl_bind_uniform_buffers(void) { |
10761 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
10762 | for (int slot = 0; slot < SG_MAX_SHADERSTAGE_UBS; slot++) { |
10763 | [_sg.mtl.cmd_encoder |
10764 | setVertexBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] |
10765 | offset:0 |
10766 | atIndex:(NSUInteger)slot]; |
10767 | [_sg.mtl.cmd_encoder |
10768 | setFragmentBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] |
10769 | offset:0 |
10770 | atIndex:(NSUInteger)slot]; |
10771 | } |
10772 | } |
10773 | |
10774 | _SOKOL_PRIVATE void _sg_mtl_reset_state_cache(void) { |
10775 | _sg_mtl_clear_state_cache(); |
10776 | |
10777 | /* need to restore the uniform buffer binding (normally happens in |
10778 | _sg_mtl_begin_pass() |
10779 | */ |
10780 | if (nil != _sg.mtl.cmd_encoder) { |
10781 | _sg_mtl_bind_uniform_buffers(); |
10782 | } |
10783 | } |
10784 | |
10785 | _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_context(_sg_context_t* ctx) { |
10786 | SOKOL_ASSERT(ctx); |
10787 | _SOKOL_UNUSED(ctx); |
10788 | return SG_RESOURCESTATE_VALID; |
10789 | } |
10790 | |
10791 | _SOKOL_PRIVATE void _sg_mtl_discard_context(_sg_context_t* ctx) { |
10792 | SOKOL_ASSERT(ctx); |
10793 | _SOKOL_UNUSED(ctx); |
10794 | /* empty */ |
10795 | } |
10796 | |
10797 | _SOKOL_PRIVATE void _sg_mtl_activate_context(_sg_context_t* ctx) { |
10798 | _SOKOL_UNUSED(ctx); |
10799 | _sg_mtl_clear_state_cache(); |
10800 | } |
10801 | |
10802 | _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
10803 | SOKOL_ASSERT(buf && desc); |
10804 | _sg_buffer_common_init(&buf->cmn, desc); |
10805 | const bool injected = (0 != desc->mtl_buffers[0]); |
10806 | MTLResourceOptions mtl_options = _sg_mtl_buffer_resource_options(buf->cmn.usage); |
10807 | for (int slot = 0; slot < buf->cmn.num_slots; slot++) { |
10808 | id<MTLBuffer> mtl_buf; |
10809 | if (injected) { |
10810 | SOKOL_ASSERT(desc->mtl_buffers[slot]); |
10811 | mtl_buf = (__bridge id<MTLBuffer>) desc->mtl_buffers[slot]; |
10812 | } |
10813 | else { |
10814 | if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { |
10815 | SOKOL_ASSERT(desc->data.ptr); |
10816 | mtl_buf = [_sg.mtl.device newBufferWithBytes:desc->data.ptr length:(NSUInteger)buf->cmn.size options:mtl_options]; |
10817 | } |
10818 | else { |
10819 | mtl_buf = [_sg.mtl.device newBufferWithLength:(NSUInteger)buf->cmn.size options:mtl_options]; |
10820 | } |
10821 | } |
10822 | buf->mtl.buf[slot] = _sg_mtl_add_resource(mtl_buf); |
10823 | _SG_OBJC_RELEASE(mtl_buf); |
10824 | } |
10825 | return SG_RESOURCESTATE_VALID; |
10826 | } |
10827 | |
10828 | _SOKOL_PRIVATE void _sg_mtl_discard_buffer(_sg_buffer_t* buf) { |
10829 | SOKOL_ASSERT(buf); |
10830 | for (int slot = 0; slot < buf->cmn.num_slots; slot++) { |
10831 | /* it's valid to call release resource with '0' */ |
10832 | _sg_mtl_release_resource(_sg.mtl.frame_index, buf->mtl.buf[slot]); |
10833 | } |
10834 | } |
10835 | |
10836 | _SOKOL_PRIVATE void _sg_mtl_copy_image_data(const _sg_image_t* img, __unsafe_unretained id<MTLTexture> mtl_tex, const sg_image_data* data) { |
10837 | const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; |
10838 | const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; |
10839 | for (int face_index = 0; face_index < num_faces; face_index++) { |
10840 | for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) { |
10841 | SOKOL_ASSERT(data->subimage[face_index][mip_index].ptr); |
10842 | SOKOL_ASSERT(data->subimage[face_index][mip_index].size > 0); |
10843 | const uint8_t* data_ptr = (const uint8_t*)data->subimage[face_index][mip_index].ptr; |
10844 | const int mip_width = _sg_max(img->cmn.width >> mip_index, 1); |
10845 | const int mip_height = _sg_max(img->cmn.height >> mip_index, 1); |
10846 | /* special case PVRTC formats: bytePerRow and bytesPerImage must be 0 */ |
10847 | int bytes_per_row = 0; |
10848 | int bytes_per_slice = 0; |
10849 | if (!_sg_mtl_is_pvrtc(img->cmn.pixel_format)) { |
10850 | bytes_per_row = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1); |
10851 | bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); |
10852 | } |
10853 | /* bytesPerImage special case: https://developer.apple.com/documentation/metal/mtltexture/1515679-replaceregion |
10854 | |
10855 | "Supply a nonzero value only when you copy data to a MTLTextureType3D type texture" |
10856 | */ |
10857 | MTLRegion region; |
10858 | int bytes_per_image; |
10859 | if (img->cmn.type == SG_IMAGETYPE_3D) { |
10860 | const int mip_depth = _sg_max(img->cmn.num_slices >> mip_index, 1); |
10861 | region = MTLRegionMake3D(0, 0, 0, (NSUInteger)mip_width, (NSUInteger)mip_height, (NSUInteger)mip_depth); |
10862 | bytes_per_image = bytes_per_slice; |
10863 | /* FIXME: apparently the minimal bytes_per_image size for 3D texture |
10864 | is 4 KByte... somehow need to handle this */ |
10865 | } |
10866 | else { |
10867 | region = MTLRegionMake2D(0, 0, (NSUInteger)mip_width, (NSUInteger)mip_height); |
10868 | bytes_per_image = 0; |
10869 | } |
10870 | |
10871 | for (int slice_index = 0; slice_index < num_slices; slice_index++) { |
10872 | const int mtl_slice_index = (img->cmn.type == SG_IMAGETYPE_CUBE) ? face_index : slice_index; |
10873 | const int slice_offset = slice_index * bytes_per_slice; |
10874 | SOKOL_ASSERT((slice_offset + bytes_per_slice) <= (int)data->subimage[face_index][mip_index].size); |
10875 | [mtl_tex replaceRegion:region |
10876 | mipmapLevel:(NSUInteger)mip_index |
10877 | slice:(NSUInteger)mtl_slice_index |
10878 | withBytes:data_ptr + slice_offset |
10879 | bytesPerRow:(NSUInteger)bytes_per_row |
10880 | bytesPerImage:(NSUInteger)bytes_per_image]; |
10881 | } |
10882 | } |
10883 | } |
10884 | } |
10885 | |
10886 | /* |
10887 | FIXME: METAL RESOURCE STORAGE MODE FOR macOS AND iOS |
10888 | |
10889 | For immutable textures on macOS, the recommended procedure is to create |
10890 | a MTLStorageModeManaged texture with the immutable content first, |
10891 | and then use the GPU to blit the content into a MTLStorageModePrivate |
10892 | texture before the first use. |
10893 | |
10894 | On iOS use the same one-time-blit procedure, but from a |
10895 | MTLStorageModeShared to a MTLStorageModePrivate texture. |
10896 | |
10897 | It probably makes sense to handle this in a separate 'resource manager' |
10898 | with a recycable pool of blit-source-textures? |
10899 | */ |
10900 | |
10901 | /* initialize MTLTextureDescritor with common attributes */ |
10902 | _SOKOL_PRIVATE bool _sg_mtl_init_texdesc_common(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) { |
10903 | mtl_desc.textureType = _sg_mtl_texture_type(img->cmn.type); |
10904 | mtl_desc.pixelFormat = _sg_mtl_pixel_format(img->cmn.pixel_format); |
10905 | if (MTLPixelFormatInvalid == mtl_desc.pixelFormat) { |
10906 | SG_LOG("Unsupported texture pixel format!\n"); |
10907 | return false; |
10908 | } |
10909 | mtl_desc.width = (NSUInteger)img->cmn.width; |
10910 | mtl_desc.height = (NSUInteger)img->cmn.height; |
10911 | if (SG_IMAGETYPE_3D == img->cmn.type) { |
10912 | mtl_desc.depth = (NSUInteger)img->cmn.num_slices; |
10913 | } |
10914 | else { |
10915 | mtl_desc.depth = 1; |
10916 | } |
10917 | mtl_desc.mipmapLevelCount = (NSUInteger)img->cmn.num_mipmaps; |
10918 | if (SG_IMAGETYPE_ARRAY == img->cmn.type) { |
10919 | mtl_desc.arrayLength = (NSUInteger)img->cmn.num_slices; |
10920 | } |
10921 | else { |
10922 | mtl_desc.arrayLength = 1; |
10923 | } |
10924 | mtl_desc.usage = MTLTextureUsageShaderRead; |
10925 | if (img->cmn.render_target) { |
10926 | mtl_desc.usage |= MTLTextureUsageRenderTarget; |
10927 | } |
10928 | MTLResourceOptions res_options = 0; |
10929 | if (img->cmn.usage != SG_USAGE_IMMUTABLE) { |
10930 | res_options |= MTLResourceCPUCacheModeWriteCombined; |
10931 | } |
10932 | #if defined(_SG_TARGET_MACOS) |
10933 | /* macOS: use managed textures */ |
10934 | res_options |= MTLResourceStorageModeManaged; |
10935 | #else |
10936 | /* iOS: use CPU/GPU shared memory */ |
10937 | res_options |= MTLResourceStorageModeShared; |
10938 | #endif |
10939 | mtl_desc.resourceOptions = res_options; |
10940 | return true; |
10941 | } |
10942 | |
10943 | /* initialize MTLTextureDescritor with rendertarget attributes */ |
10944 | _SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) { |
10945 | SOKOL_ASSERT(img->cmn.render_target); |
10946 | _SOKOL_UNUSED(img); |
10947 | /* render targets are only visible to the GPU */ |
10948 | mtl_desc.resourceOptions = MTLResourceStorageModePrivate; |
10949 | /* non-MSAA render targets are shader-readable */ |
10950 | mtl_desc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget; |
10951 | } |
10952 | |
10953 | /* initialize MTLTextureDescritor with MSAA attributes */ |
10954 | _SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt_msaa(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) { |
10955 | SOKOL_ASSERT(img->cmn.sample_count > 1); |
10956 | /* render targets are only visible to the GPU */ |
10957 | mtl_desc.resourceOptions = MTLResourceStorageModePrivate; |
10958 | /* MSAA render targets are not shader-readable (instead they are resolved) */ |
10959 | mtl_desc.usage = MTLTextureUsageRenderTarget; |
10960 | mtl_desc.textureType = MTLTextureType2DMultisample; |
10961 | mtl_desc.depth = 1; |
10962 | mtl_desc.arrayLength = 1; |
10963 | mtl_desc.mipmapLevelCount = 1; |
10964 | mtl_desc.sampleCount = (NSUInteger)img->cmn.sample_count; |
10965 | } |
10966 | |
10967 | _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_image(_sg_image_t* img, const sg_image_desc* desc) { |
10968 | SOKOL_ASSERT(img && desc); |
10969 | _sg_image_common_init(&img->cmn, desc); |
10970 | const bool injected = (0 != desc->mtl_textures[0]); |
10971 | const bool msaa = (img->cmn.sample_count > 1); |
10972 | |
10973 | /* first initialize all Metal resource pool slots to 'empty' */ |
10974 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
10975 | img->mtl.tex[i] = _sg_mtl_add_resource(nil); |
10976 | } |
10977 | img->mtl.sampler_state = _sg_mtl_add_resource(nil); |
10978 | img->mtl.depth_tex = _sg_mtl_add_resource(nil); |
10979 | img->mtl.msaa_tex = _sg_mtl_add_resource(nil); |
10980 | |
10981 | /* initialize a Metal texture descriptor with common attributes */ |
10982 | MTLTextureDescriptor* mtl_desc = [[MTLTextureDescriptor alloc] init]; |
10983 | if (!_sg_mtl_init_texdesc_common(mtl_desc, img)) { |
10984 | _SG_OBJC_RELEASE(mtl_desc); |
10985 | return SG_RESOURCESTATE_FAILED; |
10986 | } |
10987 | |
10988 | /* special case depth-stencil-buffer? */ |
10989 | if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { |
10990 | /* depth-stencil buffer texture must always be a render target */ |
10991 | SOKOL_ASSERT(img->cmn.render_target); |
10992 | SOKOL_ASSERT(img->cmn.type == SG_IMAGETYPE_2D); |
10993 | SOKOL_ASSERT(img->cmn.num_mipmaps == 1); |
10994 | SOKOL_ASSERT(!injected); |
10995 | if (msaa) { |
10996 | _sg_mtl_init_texdesc_rt_msaa(mtl_desc, img); |
10997 | } |
10998 | else { |
10999 | _sg_mtl_init_texdesc_rt(mtl_desc, img); |
11000 | } |
11001 | id<MTLTexture> tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; |
11002 | SOKOL_ASSERT(nil != tex); |
11003 | img->mtl.depth_tex = _sg_mtl_add_resource(tex); |
11004 | _SG_OBJC_RELEASE(tex); |
11005 | } |
11006 | else { |
11007 | /* create the color texture |
11008 | In case this is a render target without MSAA, add the relevant |
11009 | render-target descriptor attributes. |
11010 | In case this is a render target *with* MSAA, the color texture |
11011 | will serve as MSAA-resolve target (not as render target), and rendering |
11012 | will go into a separate render target texture of type |
11013 | MTLTextureType2DMultisample. |
11014 | */ |
11015 | if (img->cmn.render_target && !msaa) { |
11016 | _sg_mtl_init_texdesc_rt(mtl_desc, img); |
11017 | } |
11018 | for (int slot = 0; slot < img->cmn.num_slots; slot++) { |
11019 | id<MTLTexture> tex; |
11020 | if (injected) { |
11021 | SOKOL_ASSERT(desc->mtl_textures[slot]); |
11022 | tex = (__bridge id<MTLTexture>) desc->mtl_textures[slot]; |
11023 | } |
11024 | else { |
11025 | tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; |
11026 | if ((img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) { |
11027 | _sg_mtl_copy_image_data(img, tex, &desc->data); |
11028 | } |
11029 | } |
11030 | img->mtl.tex[slot] = _sg_mtl_add_resource(tex); |
11031 | _SG_OBJC_RELEASE(tex); |
11032 | } |
11033 | |
11034 | /* if MSAA color render target, create an additional MSAA render-surface texture */ |
11035 | if (img->cmn.render_target && msaa) { |
11036 | _sg_mtl_init_texdesc_rt_msaa(mtl_desc, img); |
11037 | id<MTLTexture> tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; |
11038 | img->mtl.msaa_tex = _sg_mtl_add_resource(tex); |
11039 | _SG_OBJC_RELEASE(tex); |
11040 | } |
11041 | |
11042 | /* create (possibly shared) sampler state */ |
11043 | img->mtl.sampler_state = _sg_mtl_create_sampler(_sg.mtl.device, desc); |
11044 | } |
11045 | _SG_OBJC_RELEASE(mtl_desc); |
11046 | return SG_RESOURCESTATE_VALID; |
11047 | } |
11048 | |
11049 | _SOKOL_PRIVATE void _sg_mtl_discard_image(_sg_image_t* img) { |
11050 | SOKOL_ASSERT(img); |
11051 | /* it's valid to call release resource with a 'null resource' */ |
11052 | for (int slot = 0; slot < img->cmn.num_slots; slot++) { |
11053 | _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.tex[slot]); |
11054 | } |
11055 | _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.depth_tex); |
11056 | _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.msaa_tex); |
11057 | /* NOTE: sampler state objects are shared and not released until shutdown */ |
11058 | } |
11059 | |
11060 | _SOKOL_PRIVATE id<MTLLibrary> _sg_mtl_compile_library(const char* src) { |
11061 | NSError* err = NULL; |
11062 | id<MTLLibrary> lib = [_sg.mtl.device |
11063 | newLibraryWithSource:[NSString stringWithUTF8String:src] |
11064 | options:nil |
11065 | error:&err |
11066 | ]; |
11067 | if (err) { |
11068 | SG_LOG([err.localizedDescription UTF8String]); |
11069 | } |
11070 | return lib; |
11071 | } |
11072 | |
11073 | _SOKOL_PRIVATE id<MTLLibrary> _sg_mtl_library_from_bytecode(const void* ptr, size_t num_bytes) { |
11074 | NSError* err = NULL; |
11075 | dispatch_data_t lib_data = dispatch_data_create(ptr, num_bytes, NULL, DISPATCH_DATA_DESTRUCTOR_DEFAULT); |
11076 | id<MTLLibrary> lib = [_sg.mtl.device newLibraryWithData:lib_data error:&err]; |
11077 | if (err) { |
11078 | SG_LOG([err.localizedDescription UTF8String]); |
11079 | } |
11080 | _SG_OBJC_RELEASE(lib_data); |
11081 | return lib; |
11082 | } |
11083 | |
11084 | _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
11085 | SOKOL_ASSERT(shd && desc); |
11086 | |
11087 | _sg_shader_common_init(&shd->cmn, desc); |
11088 | |
11089 | /* create metal libray objects and lookup entry functions */ |
11090 | id<MTLLibrary> vs_lib = nil; |
11091 | id<MTLLibrary> fs_lib = nil; |
11092 | id<MTLFunction> vs_func = nil; |
11093 | id<MTLFunction> fs_func = nil; |
11094 | const char* vs_entry = desc->vs.entry; |
11095 | const char* fs_entry = desc->fs.entry; |
11096 | if (desc->vs.bytecode.ptr && desc->fs.bytecode.ptr) { |
11097 | /* separate byte code provided */ |
11098 | vs_lib = _sg_mtl_library_from_bytecode(desc->vs.bytecode.ptr, desc->vs.bytecode.size); |
11099 | fs_lib = _sg_mtl_library_from_bytecode(desc->fs.bytecode.ptr, desc->fs.bytecode.size); |
11100 | if ((nil == vs_lib) || (nil == fs_lib)) { |
11101 | goto failed; |
11102 | } |
11103 | vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]]; |
11104 | fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]]; |
11105 | } |
11106 | else if (desc->vs.source && desc->fs.source) { |
11107 | /* separate sources provided */ |
11108 | vs_lib = _sg_mtl_compile_library(desc->vs.source); |
11109 | fs_lib = _sg_mtl_compile_library(desc->fs.source); |
11110 | if ((nil == vs_lib) || (nil == fs_lib)) { |
11111 | goto failed; |
11112 | } |
11113 | vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]]; |
11114 | fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]]; |
11115 | } |
11116 | else { |
11117 | goto failed; |
11118 | } |
11119 | if (nil == vs_func) { |
11120 | SG_LOG("vertex shader entry function not found\n"); |
11121 | goto failed; |
11122 | } |
11123 | if (nil == fs_func) { |
11124 | SG_LOG("fragment shader entry function not found\n"); |
11125 | goto failed; |
11126 | } |
11127 | /* it is legal to call _sg_mtl_add_resource with a nil value, this will return a special 0xFFFFFFFF index */ |
11128 | shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_lib = _sg_mtl_add_resource(vs_lib); |
11129 | _SG_OBJC_RELEASE(vs_lib); |
11130 | shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_lib = _sg_mtl_add_resource(fs_lib); |
11131 | _SG_OBJC_RELEASE(fs_lib); |
11132 | shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func = _sg_mtl_add_resource(vs_func); |
11133 | _SG_OBJC_RELEASE(vs_func); |
11134 | shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func = _sg_mtl_add_resource(fs_func); |
11135 | _SG_OBJC_RELEASE(fs_func); |
11136 | return SG_RESOURCESTATE_VALID; |
11137 | failed: |
11138 | if (vs_lib != nil) { |
11139 | _SG_OBJC_RELEASE(vs_lib); |
11140 | } |
11141 | if (fs_lib != nil) { |
11142 | _SG_OBJC_RELEASE(fs_lib); |
11143 | } |
11144 | if (vs_func != nil) { |
11145 | _SG_OBJC_RELEASE(vs_func); |
11146 | } |
11147 | if (fs_func != nil) { |
11148 | _SG_OBJC_RELEASE(fs_func); |
11149 | } |
11150 | return SG_RESOURCESTATE_FAILED; |
11151 | } |
11152 | |
11153 | _SOKOL_PRIVATE void _sg_mtl_discard_shader(_sg_shader_t* shd) { |
11154 | SOKOL_ASSERT(shd); |
11155 | /* it is valid to call _sg_mtl_release_resource with a 'null resource' */ |
11156 | _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func); |
11157 | _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_lib); |
11158 | _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func); |
11159 | _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_lib); |
11160 | } |
11161 | |
11162 | _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { |
11163 | SOKOL_ASSERT(pip && shd && desc); |
11164 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
11165 | |
11166 | pip->shader = shd; |
11167 | _sg_pipeline_common_init(&pip->cmn, desc); |
11168 | |
11169 | sg_primitive_type prim_type = desc->primitive_type; |
11170 | pip->mtl.prim_type = _sg_mtl_primitive_type(prim_type); |
11171 | pip->mtl.index_size = _sg_mtl_index_size(pip->cmn.index_type); |
11172 | if (SG_INDEXTYPE_NONE != pip->cmn.index_type) { |
11173 | pip->mtl.index_type = _sg_mtl_index_type(pip->cmn.index_type); |
11174 | } |
11175 | pip->mtl.cull_mode = _sg_mtl_cull_mode(desc->cull_mode); |
11176 | pip->mtl.winding = _sg_mtl_winding(desc->face_winding); |
11177 | pip->mtl.stencil_ref = desc->stencil.ref; |
11178 | |
11179 | /* create vertex-descriptor */ |
11180 | MTLVertexDescriptor* vtx_desc = [MTLVertexDescriptor vertexDescriptor]; |
11181 | for (NSUInteger attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
11182 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
11183 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
11184 | break; |
11185 | } |
11186 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
11187 | vtx_desc.attributes[attr_index].format = _sg_mtl_vertex_format(a_desc->format); |
11188 | vtx_desc.attributes[attr_index].offset = (NSUInteger)a_desc->offset; |
11189 | vtx_desc.attributes[attr_index].bufferIndex = (NSUInteger)(a_desc->buffer_index + SG_MAX_SHADERSTAGE_UBS); |
11190 | pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; |
11191 | } |
11192 | for (NSUInteger layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
11193 | if (pip->cmn.vertex_layout_valid[layout_index]) { |
11194 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; |
11195 | const NSUInteger mtl_vb_slot = layout_index + SG_MAX_SHADERSTAGE_UBS; |
11196 | SOKOL_ASSERT(l_desc->stride > 0); |
11197 | vtx_desc.layouts[mtl_vb_slot].stride = (NSUInteger)l_desc->stride; |
11198 | vtx_desc.layouts[mtl_vb_slot].stepFunction = _sg_mtl_step_function(l_desc->step_func); |
11199 | vtx_desc.layouts[mtl_vb_slot].stepRate = (NSUInteger)l_desc->step_rate; |
11200 | if (SG_VERTEXSTEP_PER_INSTANCE == l_desc->step_func) { |
11201 | // NOTE: not actually used in _sg_mtl_draw() |
11202 | pip->cmn.use_instanced_draw = true; |
11203 | } |
11204 | } |
11205 | } |
11206 | |
11207 | /* render-pipeline descriptor */ |
11208 | MTLRenderPipelineDescriptor* rp_desc = [[MTLRenderPipelineDescriptor alloc] init]; |
11209 | rp_desc.vertexDescriptor = vtx_desc; |
11210 | SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX); |
11211 | rp_desc.vertexFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func); |
11212 | SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX); |
11213 | rp_desc.fragmentFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func); |
11214 | rp_desc.rasterSampleCount = (NSUInteger)desc->sample_count; |
11215 | rp_desc.alphaToCoverageEnabled = desc->alpha_to_coverage_enabled; |
11216 | rp_desc.alphaToOneEnabled = NO; |
11217 | rp_desc.rasterizationEnabled = YES; |
11218 | rp_desc.depthAttachmentPixelFormat = _sg_mtl_pixel_format(desc->depth.pixel_format); |
11219 | if (desc->depth.pixel_format == SG_PIXELFORMAT_DEPTH_STENCIL) { |
11220 | rp_desc.stencilAttachmentPixelFormat = _sg_mtl_pixel_format(desc->depth.pixel_format); |
11221 | } |
11222 | /* FIXME: this only works on macOS 10.13! |
11223 | for (int i = 0; i < (SG_MAX_SHADERSTAGE_UBS+SG_MAX_SHADERSTAGE_BUFFERS); i++) { |
11224 | rp_desc.vertexBuffers[i].mutability = MTLMutabilityImmutable; |
11225 | } |
11226 | for (int i = 0; i < SG_MAX_SHADERSTAGE_UBS; i++) { |
11227 | rp_desc.fragmentBuffers[i].mutability = MTLMutabilityImmutable; |
11228 | } |
11229 | */ |
11230 | for (NSUInteger i = 0; i < (NSUInteger)desc->color_count; i++) { |
11231 | SOKOL_ASSERT(i < SG_MAX_COLOR_ATTACHMENTS); |
11232 | const sg_color_state* cs = &desc->colors[i]; |
11233 | rp_desc.colorAttachments[i].pixelFormat = _sg_mtl_pixel_format(cs->pixel_format); |
11234 | rp_desc.colorAttachments[i].writeMask = _sg_mtl_color_write_mask(cs->write_mask); |
11235 | rp_desc.colorAttachments[i].blendingEnabled = cs->blend.enabled; |
11236 | rp_desc.colorAttachments[i].alphaBlendOperation = _sg_mtl_blend_op(cs->blend.op_alpha); |
11237 | rp_desc.colorAttachments[i].rgbBlendOperation = _sg_mtl_blend_op(cs->blend.op_rgb); |
11238 | rp_desc.colorAttachments[i].destinationAlphaBlendFactor = _sg_mtl_blend_factor(cs->blend.dst_factor_alpha); |
11239 | rp_desc.colorAttachments[i].destinationRGBBlendFactor = _sg_mtl_blend_factor(cs->blend.dst_factor_rgb); |
11240 | rp_desc.colorAttachments[i].sourceAlphaBlendFactor = _sg_mtl_blend_factor(cs->blend.src_factor_alpha); |
11241 | rp_desc.colorAttachments[i].sourceRGBBlendFactor = _sg_mtl_blend_factor(cs->blend.src_factor_rgb); |
11242 | } |
11243 | NSError* err = NULL; |
11244 | id<MTLRenderPipelineState> mtl_rps = [_sg.mtl.device newRenderPipelineStateWithDescriptor:rp_desc error:&err]; |
11245 | _SG_OBJC_RELEASE(rp_desc); |
11246 | if (nil == mtl_rps) { |
11247 | SOKOL_ASSERT(err); |
11248 | SG_LOG([err.localizedDescription UTF8String]); |
11249 | return SG_RESOURCESTATE_FAILED; |
11250 | } |
11251 | |
11252 | /* depth-stencil-state */ |
11253 | MTLDepthStencilDescriptor* ds_desc = [[MTLDepthStencilDescriptor alloc] init]; |
11254 | ds_desc.depthCompareFunction = _sg_mtl_compare_func(desc->depth.compare); |
11255 | ds_desc.depthWriteEnabled = desc->depth.write_enabled; |
11256 | if (desc->stencil.enabled) { |
11257 | const sg_stencil_face_state* sb = &desc->stencil.back; |
11258 | ds_desc.backFaceStencil = [[MTLStencilDescriptor alloc] init]; |
11259 | ds_desc.backFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sb->fail_op); |
11260 | ds_desc.backFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sb->depth_fail_op); |
11261 | ds_desc.backFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sb->pass_op); |
11262 | ds_desc.backFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sb->compare); |
11263 | ds_desc.backFaceStencil.readMask = desc->stencil.read_mask; |
11264 | ds_desc.backFaceStencil.writeMask = desc->stencil.write_mask; |
11265 | const sg_stencil_face_state* sf = &desc->stencil.front; |
11266 | ds_desc.frontFaceStencil = [[MTLStencilDescriptor alloc] init]; |
11267 | ds_desc.frontFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sf->fail_op); |
11268 | ds_desc.frontFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sf->depth_fail_op); |
11269 | ds_desc.frontFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sf->pass_op); |
11270 | ds_desc.frontFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sf->compare); |
11271 | ds_desc.frontFaceStencil.readMask = desc->stencil.read_mask; |
11272 | ds_desc.frontFaceStencil.writeMask = desc->stencil.write_mask; |
11273 | } |
11274 | id<MTLDepthStencilState> mtl_dss = [_sg.mtl.device newDepthStencilStateWithDescriptor:ds_desc]; |
11275 | _SG_OBJC_RELEASE(ds_desc); |
11276 | pip->mtl.rps = _sg_mtl_add_resource(mtl_rps); |
11277 | _SG_OBJC_RELEASE(mtl_rps); |
11278 | pip->mtl.dss = _sg_mtl_add_resource(mtl_dss); |
11279 | _SG_OBJC_RELEASE(mtl_dss); |
11280 | return SG_RESOURCESTATE_VALID; |
11281 | } |
11282 | |
11283 | _SOKOL_PRIVATE void _sg_mtl_discard_pipeline(_sg_pipeline_t* pip) { |
11284 | SOKOL_ASSERT(pip); |
11285 | /* it's valid to call release resource with a 'null resource' */ |
11286 | _sg_mtl_release_resource(_sg.mtl.frame_index, pip->mtl.rps); |
11287 | _sg_mtl_release_resource(_sg.mtl.frame_index, pip->mtl.dss); |
11288 | } |
11289 | |
11290 | _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { |
11291 | SOKOL_ASSERT(pass && desc); |
11292 | SOKOL_ASSERT(att_images && att_images[0]); |
11293 | |
11294 | _sg_pass_common_init(&pass->cmn, desc); |
11295 | |
11296 | /* copy image pointers */ |
11297 | const sg_pass_attachment_desc* att_desc; |
11298 | for (int i = 0; i < pass->cmn.num_color_atts; i++) { |
11299 | att_desc = &desc->color_attachments[i]; |
11300 | if (att_desc->image.id != SG_INVALID_ID) { |
11301 | SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); |
11302 | SOKOL_ASSERT(0 == pass->mtl.color_atts[i].image); |
11303 | SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); |
11304 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format)); |
11305 | pass->mtl.color_atts[i].image = att_images[i]; |
11306 | } |
11307 | } |
11308 | SOKOL_ASSERT(0 == pass->mtl.ds_att.image); |
11309 | att_desc = &desc->depth_stencil_attachment; |
11310 | if (att_desc->image.id != SG_INVALID_ID) { |
11311 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
11312 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
11313 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); |
11314 | pass->mtl.ds_att.image = att_images[ds_img_index]; |
11315 | } |
11316 | return SG_RESOURCESTATE_VALID; |
11317 | } |
11318 | |
11319 | _SOKOL_PRIVATE void _sg_mtl_discard_pass(_sg_pass_t* pass) { |
11320 | SOKOL_ASSERT(pass); |
11321 | _SOKOL_UNUSED(pass); |
11322 | } |
11323 | |
11324 | _SOKOL_PRIVATE _sg_image_t* _sg_mtl_pass_color_image(const _sg_pass_t* pass, int index) { |
11325 | SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); |
11326 | /* NOTE: may return null */ |
11327 | return pass->mtl.color_atts[index].image; |
11328 | } |
11329 | |
11330 | _SOKOL_PRIVATE _sg_image_t* _sg_mtl_pass_ds_image(const _sg_pass_t* pass) { |
11331 | /* NOTE: may return null */ |
11332 | SOKOL_ASSERT(pass); |
11333 | return pass->mtl.ds_att.image; |
11334 | } |
11335 | |
11336 | _SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { |
11337 | SOKOL_ASSERT(action); |
11338 | SOKOL_ASSERT(!_sg.mtl.in_pass); |
11339 | SOKOL_ASSERT(_sg.mtl.cmd_queue); |
11340 | SOKOL_ASSERT(nil == _sg.mtl.cmd_encoder); |
11341 | SOKOL_ASSERT(_sg.mtl.renderpass_descriptor_cb || _sg.mtl.renderpass_descriptor_userdata_cb); |
11342 | _sg.mtl.in_pass = true; |
11343 | _sg.mtl.cur_width = w; |
11344 | _sg.mtl.cur_height = h; |
11345 | _sg_mtl_clear_state_cache(); |
11346 | |
11347 | /* |
11348 | if this is the first pass in the frame, create command buffers |
11349 | |
11350 | NOTE: we're creating two command buffers here, one with unretained references |
11351 | for storing the regular commands, and one with retained references for |
11352 | storing the presentDrawable call (this needs to hold on the drawable until |
11353 | presentation has happened - and the easiest way to do this is to let the |
11354 | command buffer manage the lifetime of the drawable). |
11355 | |
11356 | Also see: https://github.com/floooh/sokol/issues/762 |
11357 | */ |
11358 | if (nil == _sg.mtl.cmd_buffer) { |
11359 | SOKOL_ASSERT(nil == _sg.mtl.present_cmd_buffer); |
11360 | /* block until the oldest frame in flight has finished */ |
11361 | dispatch_semaphore_wait(_sg.mtl.sem, DISPATCH_TIME_FOREVER); |
11362 | _sg.mtl.cmd_buffer = [_sg.mtl.cmd_queue commandBufferWithUnretainedReferences]; |
11363 | _sg.mtl.present_cmd_buffer = [_sg.mtl.cmd_queue commandBuffer]; |
11364 | [_sg.mtl.cmd_buffer enqueue]; |
11365 | [_sg.mtl.present_cmd_buffer enqueue]; |
11366 | [_sg.mtl.present_cmd_buffer addCompletedHandler:^(id<MTLCommandBuffer> cmd_buf) { |
11367 | // NOTE: this code is called on a different thread! |
11368 | _SOKOL_UNUSED(cmd_buf); |
11369 | dispatch_semaphore_signal(_sg.mtl.sem); |
11370 | }]; |
11371 | } |
11372 | |
11373 | /* if this is first pass in frame, get uniform buffer base pointer */ |
11374 | if (0 == _sg.mtl.cur_ub_base_ptr) { |
11375 | _sg.mtl.cur_ub_base_ptr = (uint8_t*)[_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] contents]; |
11376 | } |
11377 | |
11378 | /* initialize a render pass descriptor */ |
11379 | MTLRenderPassDescriptor* pass_desc = nil; |
11380 | if (pass) { |
11381 | /* offscreen render pass */ |
11382 | pass_desc = [MTLRenderPassDescriptor renderPassDescriptor]; |
11383 | } |
11384 | else { |
11385 | /* default render pass, call user-provided callback to provide render pass descriptor */ |
11386 | if (_sg.mtl.renderpass_descriptor_cb) { |
11387 | pass_desc = (__bridge MTLRenderPassDescriptor*) _sg.mtl.renderpass_descriptor_cb(); |
11388 | } |
11389 | else { |
11390 | pass_desc = (__bridge MTLRenderPassDescriptor*) _sg.mtl.renderpass_descriptor_userdata_cb(_sg.mtl.user_data); |
11391 | } |
11392 | |
11393 | } |
11394 | if (pass_desc) { |
11395 | _sg.mtl.pass_valid = true; |
11396 | } |
11397 | else { |
11398 | /* default pass descriptor will not be valid if window is minimized, |
11399 | don't do any rendering in this case */ |
11400 | _sg.mtl.pass_valid = false; |
11401 | return; |
11402 | } |
11403 | if (pass) { |
11404 | /* setup pass descriptor for offscreen rendering */ |
11405 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); |
11406 | for (NSUInteger i = 0; i < (NSUInteger)pass->cmn.num_color_atts; i++) { |
11407 | const _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; |
11408 | const _sg_mtl_attachment_t* mtl_att = &pass->mtl.color_atts[i]; |
11409 | const _sg_image_t* att_img = mtl_att->image; |
11410 | SOKOL_ASSERT(att_img->slot.state == SG_RESOURCESTATE_VALID); |
11411 | SOKOL_ASSERT(att_img->slot.id == cmn_att->image_id.id); |
11412 | const bool is_msaa = (att_img->cmn.sample_count > 1); |
11413 | pass_desc.colorAttachments[i].loadAction = _sg_mtl_load_action(action->colors[i].action); |
11414 | pass_desc.colorAttachments[i].storeAction = is_msaa ? MTLStoreActionMultisampleResolve : MTLStoreActionStore; |
11415 | sg_color c = action->colors[i].value; |
11416 | pass_desc.colorAttachments[i].clearColor = MTLClearColorMake(c.r, c.g, c.b, c.a); |
11417 | if (is_msaa) { |
11418 | SOKOL_ASSERT(att_img->mtl.msaa_tex != _SG_MTL_INVALID_SLOT_INDEX); |
11419 | SOKOL_ASSERT(att_img->mtl.tex[mtl_att->image->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); |
11420 | pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.msaa_tex); |
11421 | pass_desc.colorAttachments[i].resolveTexture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]); |
11422 | pass_desc.colorAttachments[i].resolveLevel = (NSUInteger)cmn_att->mip_level; |
11423 | switch (att_img->cmn.type) { |
11424 | case SG_IMAGETYPE_CUBE: |
11425 | case SG_IMAGETYPE_ARRAY: |
11426 | pass_desc.colorAttachments[i].resolveSlice = (NSUInteger)cmn_att->slice; |
11427 | break; |
11428 | case SG_IMAGETYPE_3D: |
11429 | pass_desc.colorAttachments[i].resolveDepthPlane = (NSUInteger)cmn_att->slice; |
11430 | break; |
11431 | default: break; |
11432 | } |
11433 | } |
11434 | else { |
11435 | SOKOL_ASSERT(att_img->mtl.tex[att_img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); |
11436 | pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]); |
11437 | pass_desc.colorAttachments[i].level = (NSUInteger)cmn_att->mip_level; |
11438 | switch (att_img->cmn.type) { |
11439 | case SG_IMAGETYPE_CUBE: |
11440 | case SG_IMAGETYPE_ARRAY: |
11441 | pass_desc.colorAttachments[i].slice = (NSUInteger)cmn_att->slice; |
11442 | break; |
11443 | case SG_IMAGETYPE_3D: |
11444 | pass_desc.colorAttachments[i].depthPlane = (NSUInteger)cmn_att->slice; |
11445 | break; |
11446 | default: break; |
11447 | } |
11448 | } |
11449 | } |
11450 | const _sg_image_t* ds_att_img = pass->mtl.ds_att.image; |
11451 | if (0 != ds_att_img) { |
11452 | SOKOL_ASSERT(ds_att_img->slot.state == SG_RESOURCESTATE_VALID); |
11453 | SOKOL_ASSERT(ds_att_img->slot.id == pass->cmn.ds_att.image_id.id); |
11454 | SOKOL_ASSERT(ds_att_img->mtl.depth_tex != _SG_MTL_INVALID_SLOT_INDEX); |
11455 | pass_desc.depthAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex); |
11456 | pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); |
11457 | pass_desc.depthAttachment.clearDepth = action->depth.value; |
11458 | if (_sg_is_depth_stencil_format(ds_att_img->cmn.pixel_format)) { |
11459 | pass_desc.stencilAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex); |
11460 | pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); |
11461 | pass_desc.stencilAttachment.clearStencil = action->stencil.value; |
11462 | } |
11463 | } |
11464 | } |
11465 | else { |
11466 | /* setup pass descriptor for default rendering */ |
11467 | pass_desc.colorAttachments[0].loadAction = _sg_mtl_load_action(action->colors[0].action); |
11468 | sg_color c = action->colors[0].value; |
11469 | pass_desc.colorAttachments[0].clearColor = MTLClearColorMake(c.r, c.g, c.b, c.a); |
11470 | pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); |
11471 | pass_desc.depthAttachment.clearDepth = action->depth.value; |
11472 | pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); |
11473 | pass_desc.stencilAttachment.clearStencil = action->stencil.value; |
11474 | } |
11475 | |
11476 | /* create a render command encoder, this might return nil if window is minimized */ |
11477 | _sg.mtl.cmd_encoder = [_sg.mtl.cmd_buffer renderCommandEncoderWithDescriptor:pass_desc]; |
11478 | if (nil == _sg.mtl.cmd_encoder) { |
11479 | _sg.mtl.pass_valid = false; |
11480 | return; |
11481 | } |
11482 | |
11483 | /* bind the global uniform buffer, this only happens once per pass */ |
11484 | _sg_mtl_bind_uniform_buffers(); |
11485 | } |
11486 | |
11487 | _SOKOL_PRIVATE void _sg_mtl_end_pass(void) { |
11488 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11489 | _sg.mtl.in_pass = false; |
11490 | _sg.mtl.pass_valid = false; |
11491 | if (nil != _sg.mtl.cmd_encoder) { |
11492 | [_sg.mtl.cmd_encoder endEncoding]; |
11493 | /* NOTE: MTLRenderCommandEncoder is autoreleased */ |
11494 | _sg.mtl.cmd_encoder = nil; |
11495 | } |
11496 | } |
11497 | |
11498 | _SOKOL_PRIVATE void _sg_mtl_commit(void) { |
11499 | SOKOL_ASSERT(!_sg.mtl.in_pass); |
11500 | SOKOL_ASSERT(!_sg.mtl.pass_valid); |
11501 | SOKOL_ASSERT(_sg.mtl.drawable_cb || _sg.mtl.drawable_userdata_cb); |
11502 | SOKOL_ASSERT(nil == _sg.mtl.cmd_encoder); |
11503 | SOKOL_ASSERT(nil != _sg.mtl.cmd_buffer); |
11504 | SOKOL_ASSERT(nil != _sg.mtl.present_cmd_buffer); |
11505 | |
11506 | /* present, commit and signal semaphore when done */ |
11507 | id<MTLDrawable> cur_drawable = nil; |
11508 | if (_sg.mtl.drawable_cb) { |
11509 | cur_drawable = (__bridge id<MTLDrawable>) _sg.mtl.drawable_cb(); |
11510 | } |
11511 | else { |
11512 | cur_drawable = (__bridge id<MTLDrawable>) _sg.mtl.drawable_userdata_cb(_sg.mtl.user_data); |
11513 | } |
11514 | if (nil != cur_drawable) { |
11515 | [_sg.mtl.present_cmd_buffer presentDrawable:cur_drawable]; |
11516 | } |
11517 | [_sg.mtl.cmd_buffer commit]; |
11518 | [_sg.mtl.present_cmd_buffer commit]; |
11519 | |
11520 | /* garbage-collect resources pending for release */ |
11521 | _sg_mtl_garbage_collect(_sg.mtl.frame_index); |
11522 | |
11523 | /* rotate uniform buffer slot */ |
11524 | if (++_sg.mtl.cur_frame_rotate_index >= SG_NUM_INFLIGHT_FRAMES) { |
11525 | _sg.mtl.cur_frame_rotate_index = 0; |
11526 | } |
11527 | _sg.mtl.frame_index++; |
11528 | _sg.mtl.cur_ub_offset = 0; |
11529 | _sg.mtl.cur_ub_base_ptr = 0; |
11530 | /* NOTE: MTLCommandBuffer is autoreleased */ |
11531 | _sg.mtl.cmd_buffer = nil; |
11532 | _sg.mtl.present_cmd_buffer = nil; |
11533 | } |
11534 | |
11535 | _SOKOL_PRIVATE void _sg_mtl_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
11536 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11537 | if (!_sg.mtl.pass_valid) { |
11538 | return; |
11539 | } |
11540 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
11541 | MTLViewport vp; |
11542 | vp.originX = (double) x; |
11543 | vp.originY = (double) (origin_top_left ? y : (_sg.mtl.cur_height - (y + h))); |
11544 | vp.width = (double) w; |
11545 | vp.height = (double) h; |
11546 | vp.znear = 0.0; |
11547 | vp.zfar = 1.0; |
11548 | [_sg.mtl.cmd_encoder setViewport:vp]; |
11549 | } |
11550 | |
11551 | _SOKOL_PRIVATE void _sg_mtl_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
11552 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11553 | if (!_sg.mtl.pass_valid) { |
11554 | return; |
11555 | } |
11556 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
11557 | /* clip against framebuffer rect */ |
11558 | x = _sg_min(_sg_max(0, x), _sg.mtl.cur_width-1); |
11559 | y = _sg_min(_sg_max(0, y), _sg.mtl.cur_height-1); |
11560 | if ((x + w) > _sg.mtl.cur_width) { |
11561 | w = _sg.mtl.cur_width - x; |
11562 | } |
11563 | if ((y + h) > _sg.mtl.cur_height) { |
11564 | h = _sg.mtl.cur_height - y; |
11565 | } |
11566 | w = _sg_max(w, 1); |
11567 | h = _sg_max(h, 1); |
11568 | |
11569 | MTLScissorRect r; |
11570 | r.x = (NSUInteger)x; |
11571 | r.y = (NSUInteger) (origin_top_left ? y : (_sg.mtl.cur_height - (y + h))); |
11572 | r.width = (NSUInteger)w; |
11573 | r.height = (NSUInteger)h; |
11574 | [_sg.mtl.cmd_encoder setScissorRect:r]; |
11575 | } |
11576 | |
11577 | _SOKOL_PRIVATE void _sg_mtl_apply_pipeline(_sg_pipeline_t* pip) { |
11578 | SOKOL_ASSERT(pip); |
11579 | SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); |
11580 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11581 | if (!_sg.mtl.pass_valid) { |
11582 | return; |
11583 | } |
11584 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
11585 | |
11586 | if ((_sg.mtl.state_cache.cur_pipeline != pip) || (_sg.mtl.state_cache.cur_pipeline_id.id != pip->slot.id)) { |
11587 | _sg.mtl.state_cache.cur_pipeline = pip; |
11588 | _sg.mtl.state_cache.cur_pipeline_id.id = pip->slot.id; |
11589 | sg_color c = pip->cmn.blend_color; |
11590 | [_sg.mtl.cmd_encoder setBlendColorRed:c.r green:c.g blue:c.b alpha:c.a]; |
11591 | [_sg.mtl.cmd_encoder setCullMode:pip->mtl.cull_mode]; |
11592 | [_sg.mtl.cmd_encoder setFrontFacingWinding:pip->mtl.winding]; |
11593 | [_sg.mtl.cmd_encoder setStencilReferenceValue:pip->mtl.stencil_ref]; |
11594 | [_sg.mtl.cmd_encoder setDepthBias:pip->cmn.depth_bias slopeScale:pip->cmn.depth_bias_slope_scale clamp:pip->cmn.depth_bias_clamp]; |
11595 | SOKOL_ASSERT(pip->mtl.rps != _SG_MTL_INVALID_SLOT_INDEX); |
11596 | [_sg.mtl.cmd_encoder setRenderPipelineState:_sg_mtl_id(pip->mtl.rps)]; |
11597 | SOKOL_ASSERT(pip->mtl.dss != _SG_MTL_INVALID_SLOT_INDEX); |
11598 | [_sg.mtl.cmd_encoder setDepthStencilState:_sg_mtl_id(pip->mtl.dss)]; |
11599 | } |
11600 | } |
11601 | |
11602 | _SOKOL_PRIVATE void _sg_mtl_apply_bindings( |
11603 | _sg_pipeline_t* pip, |
11604 | _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, |
11605 | _sg_buffer_t* ib, int ib_offset, |
11606 | _sg_image_t** vs_imgs, int num_vs_imgs, |
11607 | _sg_image_t** fs_imgs, int num_fs_imgs) |
11608 | { |
11609 | _SOKOL_UNUSED(pip); |
11610 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11611 | if (!_sg.mtl.pass_valid) { |
11612 | return; |
11613 | } |
11614 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
11615 | |
11616 | /* store index buffer binding, this will be needed later in sg_draw() */ |
11617 | _sg.mtl.state_cache.cur_indexbuffer = ib; |
11618 | _sg.mtl.state_cache.cur_indexbuffer_offset = ib_offset; |
11619 | if (ib) { |
11620 | SOKOL_ASSERT(pip->cmn.index_type != SG_INDEXTYPE_NONE); |
11621 | _sg.mtl.state_cache.cur_indexbuffer_id.id = ib->slot.id; |
11622 | } |
11623 | else { |
11624 | SOKOL_ASSERT(pip->cmn.index_type == SG_INDEXTYPE_NONE); |
11625 | _sg.mtl.state_cache.cur_indexbuffer_id.id = SG_INVALID_ID; |
11626 | } |
11627 | |
11628 | /* apply vertex buffers */ |
11629 | NSUInteger slot; |
11630 | for (slot = 0; slot < (NSUInteger)num_vbs; slot++) { |
11631 | const _sg_buffer_t* vb = vbs[slot]; |
11632 | if ((_sg.mtl.state_cache.cur_vertexbuffers[slot] != vb) || |
11633 | (_sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] != vb_offsets[slot]) || |
11634 | (_sg.mtl.state_cache.cur_vertexbuffer_ids[slot].id != vb->slot.id)) |
11635 | { |
11636 | _sg.mtl.state_cache.cur_vertexbuffers[slot] = vb; |
11637 | _sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] = vb_offsets[slot]; |
11638 | _sg.mtl.state_cache.cur_vertexbuffer_ids[slot].id = vb->slot.id; |
11639 | const NSUInteger mtl_slot = SG_MAX_SHADERSTAGE_UBS + slot; |
11640 | SOKOL_ASSERT(vb->mtl.buf[vb->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); |
11641 | [_sg.mtl.cmd_encoder setVertexBuffer:_sg_mtl_id(vb->mtl.buf[vb->cmn.active_slot]) |
11642 | offset:(NSUInteger)vb_offsets[slot] |
11643 | atIndex:mtl_slot]; |
11644 | } |
11645 | } |
11646 | |
11647 | /* apply vertex shader images */ |
11648 | for (slot = 0; slot < (NSUInteger)num_vs_imgs; slot++) { |
11649 | const _sg_image_t* img = vs_imgs[slot]; |
11650 | if ((_sg.mtl.state_cache.cur_vs_images[slot] != img) || (_sg.mtl.state_cache.cur_vs_image_ids[slot].id != img->slot.id)) { |
11651 | _sg.mtl.state_cache.cur_vs_images[slot] = img; |
11652 | _sg.mtl.state_cache.cur_vs_image_ids[slot].id = img->slot.id; |
11653 | SOKOL_ASSERT(img->mtl.tex[img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); |
11654 | [_sg.mtl.cmd_encoder setVertexTexture:_sg_mtl_id(img->mtl.tex[img->cmn.active_slot]) atIndex:slot]; |
11655 | SOKOL_ASSERT(img->mtl.sampler_state != _SG_MTL_INVALID_SLOT_INDEX); |
11656 | [_sg.mtl.cmd_encoder setVertexSamplerState:_sg_mtl_id(img->mtl.sampler_state) atIndex:slot]; |
11657 | } |
11658 | } |
11659 | |
11660 | /* apply fragment shader images */ |
11661 | for (slot = 0; slot < (NSUInteger)num_fs_imgs; slot++) { |
11662 | const _sg_image_t* img = fs_imgs[slot]; |
11663 | if ((_sg.mtl.state_cache.cur_fs_images[slot] != img) || (_sg.mtl.state_cache.cur_fs_image_ids[slot].id != img->slot.id)) { |
11664 | _sg.mtl.state_cache.cur_fs_images[slot] = img; |
11665 | _sg.mtl.state_cache.cur_fs_image_ids[slot].id = img->slot.id; |
11666 | SOKOL_ASSERT(img->mtl.tex[img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); |
11667 | [_sg.mtl.cmd_encoder setFragmentTexture:_sg_mtl_id(img->mtl.tex[img->cmn.active_slot]) atIndex:slot]; |
11668 | SOKOL_ASSERT(img->mtl.sampler_state != _SG_MTL_INVALID_SLOT_INDEX); |
11669 | [_sg.mtl.cmd_encoder setFragmentSamplerState:_sg_mtl_id(img->mtl.sampler_state) atIndex:slot]; |
11670 | } |
11671 | } |
11672 | } |
11673 | |
11674 | _SOKOL_PRIVATE void _sg_mtl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
11675 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11676 | if (!_sg.mtl.pass_valid) { |
11677 | return; |
11678 | } |
11679 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
11680 | SOKOL_ASSERT(((size_t)_sg.mtl.cur_ub_offset + data->size) <= (size_t)_sg.mtl.ub_size); |
11681 | SOKOL_ASSERT((_sg.mtl.cur_ub_offset & (_SG_MTL_UB_ALIGN-1)) == 0); |
11682 | SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && _sg.mtl.state_cache.cur_pipeline->shader); |
11683 | SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id); |
11684 | SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->shader->slot.id == _sg.mtl.state_cache.cur_pipeline->cmn.shader_id.id); |
11685 | SOKOL_ASSERT(ub_index < _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); |
11686 | SOKOL_ASSERT(data->size <= _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); |
11687 | |
11688 | /* copy to global uniform buffer, record offset into cmd encoder, and advance offset */ |
11689 | uint8_t* dst = &_sg.mtl.cur_ub_base_ptr[_sg.mtl.cur_ub_offset]; |
11690 | memcpy(dst, data->ptr, data->size); |
11691 | if (stage_index == SG_SHADERSTAGE_VS) { |
11692 | [_sg.mtl.cmd_encoder setVertexBufferOffset:(NSUInteger)_sg.mtl.cur_ub_offset atIndex:(NSUInteger)ub_index]; |
11693 | } |
11694 | else { |
11695 | [_sg.mtl.cmd_encoder setFragmentBufferOffset:(NSUInteger)_sg.mtl.cur_ub_offset atIndex:(NSUInteger)ub_index]; |
11696 | } |
11697 | _sg.mtl.cur_ub_offset = _sg_roundup(_sg.mtl.cur_ub_offset + (int)data->size, _SG_MTL_UB_ALIGN); |
11698 | } |
11699 | |
11700 | _SOKOL_PRIVATE void _sg_mtl_draw(int base_element, int num_elements, int num_instances) { |
11701 | SOKOL_ASSERT(_sg.mtl.in_pass); |
11702 | if (!_sg.mtl.pass_valid) { |
11703 | return; |
11704 | } |
11705 | SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); |
11706 | SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && (_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id)); |
11707 | if (SG_INDEXTYPE_NONE != _sg.mtl.state_cache.cur_pipeline->cmn.index_type) { |
11708 | /* indexed rendering */ |
11709 | SOKOL_ASSERT(_sg.mtl.state_cache.cur_indexbuffer && (_sg.mtl.state_cache.cur_indexbuffer->slot.id == _sg.mtl.state_cache.cur_indexbuffer_id.id)); |
11710 | const _sg_buffer_t* ib = _sg.mtl.state_cache.cur_indexbuffer; |
11711 | SOKOL_ASSERT(ib->mtl.buf[ib->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); |
11712 | const NSUInteger index_buffer_offset = (NSUInteger) (_sg.mtl.state_cache.cur_indexbuffer_offset + base_element * _sg.mtl.state_cache.cur_pipeline->mtl.index_size); |
11713 | [_sg.mtl.cmd_encoder drawIndexedPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type |
11714 | indexCount:(NSUInteger)num_elements |
11715 | indexType:_sg.mtl.state_cache.cur_pipeline->mtl.index_type |
11716 | indexBuffer:_sg_mtl_id(ib->mtl.buf[ib->cmn.active_slot]) |
11717 | indexBufferOffset:index_buffer_offset |
11718 | instanceCount:(NSUInteger)num_instances]; |
11719 | } |
11720 | else { |
11721 | /* non-indexed rendering */ |
11722 | [_sg.mtl.cmd_encoder drawPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type |
11723 | vertexStart:(NSUInteger)base_element |
11724 | vertexCount:(NSUInteger)num_elements |
11725 | instanceCount:(NSUInteger)num_instances]; |
11726 | } |
11727 | } |
11728 | |
11729 | _SOKOL_PRIVATE void _sg_mtl_update_buffer(_sg_buffer_t* buf, const sg_range* data) { |
11730 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
11731 | if (++buf->cmn.active_slot >= buf->cmn.num_slots) { |
11732 | buf->cmn.active_slot = 0; |
11733 | } |
11734 | __unsafe_unretained id<MTLBuffer> mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]); |
11735 | void* dst_ptr = [mtl_buf contents]; |
11736 | memcpy(dst_ptr, data->ptr, data->size); |
11737 | #if defined(_SG_TARGET_MACOS) |
11738 | [mtl_buf didModifyRange:NSMakeRange(0, data->size)]; |
11739 | #endif |
11740 | } |
11741 | |
11742 | _SOKOL_PRIVATE int _sg_mtl_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { |
11743 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
11744 | if (new_frame) { |
11745 | if (++buf->cmn.active_slot >= buf->cmn.num_slots) { |
11746 | buf->cmn.active_slot = 0; |
11747 | } |
11748 | } |
11749 | __unsafe_unretained id<MTLBuffer> mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]); |
11750 | uint8_t* dst_ptr = (uint8_t*) [mtl_buf contents]; |
11751 | dst_ptr += buf->cmn.append_pos; |
11752 | memcpy(dst_ptr, data->ptr, data->size); |
11753 | #if defined(_SG_TARGET_MACOS) |
11754 | [mtl_buf didModifyRange:NSMakeRange((NSUInteger)buf->cmn.append_pos, (NSUInteger)data->size)]; |
11755 | #endif |
11756 | /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backends */ |
11757 | return _sg_roundup((int)data->size, 4); |
11758 | } |
11759 | |
11760 | _SOKOL_PRIVATE void _sg_mtl_update_image(_sg_image_t* img, const sg_image_data* data) { |
11761 | SOKOL_ASSERT(img && data); |
11762 | if (++img->cmn.active_slot >= img->cmn.num_slots) { |
11763 | img->cmn.active_slot = 0; |
11764 | } |
11765 | __unsafe_unretained id<MTLTexture> mtl_tex = _sg_mtl_id(img->mtl.tex[img->cmn.active_slot]); |
11766 | _sg_mtl_copy_image_data(img, mtl_tex, data); |
11767 | } |
11768 | |
11769 | /*== WEBGPU BACKEND IMPLEMENTATION ===========================================*/ |
11770 | #elif defined(SOKOL_WGPU) |
11771 | |
11772 | _SOKOL_PRIVATE WGPUBufferUsageFlags _sg_wgpu_buffer_usage(sg_buffer_type t, sg_usage u) { |
11773 | WGPUBufferUsageFlags res = 0; |
11774 | if (SG_BUFFERTYPE_VERTEXBUFFER == t) { |
11775 | res |= WGPUBufferUsage_Vertex; |
11776 | } |
11777 | else { |
11778 | res |= WGPUBufferUsage_Index; |
11779 | } |
11780 | if (SG_USAGE_IMMUTABLE != u) { |
11781 | res |= WGPUBufferUsage_CopyDst; |
11782 | } |
11783 | return res; |
11784 | } |
11785 | |
11786 | _SOKOL_PRIVATE WGPULoadOp _sg_wgpu_load_op(sg_action a) { |
11787 | switch (a) { |
11788 | case SG_ACTION_CLEAR: |
11789 | case SG_ACTION_DONTCARE: |
11790 | return WGPULoadOp_Clear; |
11791 | case SG_ACTION_LOAD: |
11792 | return WGPULoadOp_Load; |
11793 | default: |
11794 | SOKOL_UNREACHABLE; |
11795 | return (WGPULoadOp)0; |
11796 | } |
11797 | } |
11798 | |
11799 | _SOKOL_PRIVATE WGPUTextureViewDimension _sg_wgpu_tex_viewdim(sg_image_type t) { |
11800 | switch (t) { |
11801 | case SG_IMAGETYPE_2D: return WGPUTextureViewDimension_2D; |
11802 | case SG_IMAGETYPE_CUBE: return WGPUTextureViewDimension_Cube; |
11803 | case SG_IMAGETYPE_3D: return WGPUTextureViewDimension_3D; |
11804 | case SG_IMAGETYPE_ARRAY: return WGPUTextureViewDimension_2DArray; |
11805 | default: SOKOL_UNREACHABLE; return WGPUTextureViewDimension_Force32; |
11806 | } |
11807 | } |
11808 | |
11809 | _SOKOL_PRIVATE WGPUTextureComponentType _sg_wgpu_tex_comptype(sg_sampler_type t) { |
11810 | switch (t) { |
11811 | case SG_SAMPLERTYPE_FLOAT: return WGPUTextureComponentType_Float; |
11812 | case SG_SAMPLERTYPE_SINT: return WGPUTextureComponentType_Sint; |
11813 | case SG_SAMPLERTYPE_UINT: return WGPUTextureComponentType_Uint; |
11814 | default: SOKOL_UNREACHABLE; return WGPUTextureComponentType_Force32; |
11815 | } |
11816 | } |
11817 | |
11818 | _SOKOL_PRIVATE WGPUTextureDimension _sg_wgpu_tex_dim(sg_image_type t) { |
11819 | if (SG_IMAGETYPE_3D == t) { |
11820 | return WGPUTextureDimension_3D; |
11821 | } |
11822 | else { |
11823 | return WGPUTextureDimension_2D; |
11824 | } |
11825 | } |
11826 | |
11827 | _SOKOL_PRIVATE WGPUAddressMode _sg_wgpu_sampler_addrmode(sg_wrap m) { |
11828 | switch (m) { |
11829 | case SG_WRAP_REPEAT: |
11830 | return WGPUAddressMode_Repeat; |
11831 | case SG_WRAP_CLAMP_TO_EDGE: |
11832 | case SG_WRAP_CLAMP_TO_BORDER: |
11833 | return WGPUAddressMode_ClampToEdge; |
11834 | case SG_WRAP_MIRRORED_REPEAT: |
11835 | return WGPUAddressMode_MirrorRepeat; |
11836 | default: |
11837 | SOKOL_UNREACHABLE; |
11838 | return WGPUAddressMode_Force32; |
11839 | } |
11840 | } |
11841 | |
11842 | _SOKOL_PRIVATE WGPUFilterMode _sg_wgpu_sampler_minmagfilter(sg_filter f) { |
11843 | switch (f) { |
11844 | case SG_FILTER_NEAREST: |
11845 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
11846 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
11847 | return WGPUFilterMode_Nearest; |
11848 | case SG_FILTER_LINEAR: |
11849 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
11850 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
11851 | return WGPUFilterMode_Linear; |
11852 | default: |
11853 | SOKOL_UNREACHABLE; |
11854 | return WGPUFilterMode_Force32; |
11855 | } |
11856 | } |
11857 | |
11858 | _SOKOL_PRIVATE WGPUFilterMode _sg_wgpu_sampler_mipfilter(sg_filter f) { |
11859 | switch (f) { |
11860 | case SG_FILTER_NEAREST: |
11861 | case SG_FILTER_LINEAR: |
11862 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
11863 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
11864 | return WGPUFilterMode_Nearest; |
11865 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
11866 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
11867 | return WGPUFilterMode_Linear; |
11868 | default: |
11869 | SOKOL_UNREACHABLE; |
11870 | return WGPUFilterMode_Force32; |
11871 | } |
11872 | } |
11873 | |
11874 | _SOKOL_PRIVATE WGPUIndexFormat _sg_wgpu_indexformat(sg_index_type t) { |
11875 | /* NOTE: there's no WGPUIndexFormat_None */ |
11876 | return (t == SG_INDEXTYPE_UINT16) ? WGPUIndexFormat_Uint16 : WGPUIndexFormat_Uint32; |
11877 | } |
11878 | |
11879 | _SOKOL_PRIVATE WGPUInputStepMode _sg_wgpu_stepmode(sg_vertex_step s) { |
11880 | return (s == SG_VERTEXSTEP_PER_VERTEX) ? WGPUInputStepMode_Vertex : WGPUInputStepMode_Instance; |
11881 | } |
11882 | |
11883 | _SOKOL_PRIVATE WGPUVertexFormat _sg_wgpu_vertexformat(sg_vertex_format f) { |
11884 | switch (f) { |
11885 | case SG_VERTEXFORMAT_FLOAT: return WGPUVertexFormat_Float; |
11886 | case SG_VERTEXFORMAT_FLOAT2: return WGPUVertexFormat_Float2; |
11887 | case SG_VERTEXFORMAT_FLOAT3: return WGPUVertexFormat_Float3; |
11888 | case SG_VERTEXFORMAT_FLOAT4: return WGPUVertexFormat_Float4; |
11889 | case SG_VERTEXFORMAT_BYTE4: return WGPUVertexFormat_Char4; |
11890 | case SG_VERTEXFORMAT_BYTE4N: return WGPUVertexFormat_Char4Norm; |
11891 | case SG_VERTEXFORMAT_UBYTE4: return WGPUVertexFormat_UChar4; |
11892 | case SG_VERTEXFORMAT_UBYTE4N: return WGPUVertexFormat_UChar4Norm; |
11893 | case SG_VERTEXFORMAT_SHORT2: return WGPUVertexFormat_Short2; |
11894 | case SG_VERTEXFORMAT_SHORT2N: return WGPUVertexFormat_Short2Norm; |
11895 | case SG_VERTEXFORMAT_USHORT2N: return WGPUVertexFormat_UShort2Norm; |
11896 | case SG_VERTEXFORMAT_SHORT4: return WGPUVertexFormat_Short4; |
11897 | case SG_VERTEXFORMAT_SHORT4N: return WGPUVertexFormat_Short4Norm; |
11898 | case SG_VERTEXFORMAT_USHORT4N: return WGPUVertexFormat_UShort4Norm; |
11899 | /* FIXME! UINT10_N2 */ |
11900 | case SG_VERTEXFORMAT_UINT10_N2: |
11901 | default: |
11902 | SOKOL_UNREACHABLE; |
11903 | return WGPUVertexFormat_Force32; |
11904 | } |
11905 | } |
11906 | |
11907 | _SOKOL_PRIVATE WGPUPrimitiveTopology _sg_wgpu_topology(sg_primitive_type t) { |
11908 | switch (t) { |
11909 | case SG_PRIMITIVETYPE_POINTS: return WGPUPrimitiveTopology_PointList; |
11910 | case SG_PRIMITIVETYPE_LINES: return WGPUPrimitiveTopology_LineList; |
11911 | case SG_PRIMITIVETYPE_LINE_STRIP: return WGPUPrimitiveTopology_LineStrip; |
11912 | case SG_PRIMITIVETYPE_TRIANGLES: return WGPUPrimitiveTopology_TriangleList; |
11913 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return WGPUPrimitiveTopology_TriangleStrip; |
11914 | default: SOKOL_UNREACHABLE; return WGPUPrimitiveTopology_Force32; |
11915 | } |
11916 | } |
11917 | |
11918 | _SOKOL_PRIVATE WGPUFrontFace _sg_wgpu_frontface(sg_face_winding fw) { |
11919 | return (fw == SG_FACEWINDING_CCW) ? WGPUFrontFace_CCW : WGPUFrontFace_CW; |
11920 | } |
11921 | |
11922 | _SOKOL_PRIVATE WGPUCullMode _sg_wgpu_cullmode(sg_cull_mode cm) { |
11923 | switch (cm) { |
11924 | case SG_CULLMODE_NONE: return WGPUCullMode_None; |
11925 | case SG_CULLMODE_FRONT: return WGPUCullMode_Front; |
11926 | case SG_CULLMODE_BACK: return WGPUCullMode_Back; |
11927 | default: SOKOL_UNREACHABLE; return WGPUCullMode_Force32; |
11928 | } |
11929 | } |
11930 | |
11931 | _SOKOL_PRIVATE WGPUTextureFormat _sg_wgpu_textureformat(sg_pixel_format p) { |
11932 | switch (p) { |
11933 | case SG_PIXELFORMAT_NONE: return WGPUTextureFormat_Undefined; |
11934 | case SG_PIXELFORMAT_R8: return WGPUTextureFormat_R8Unorm; |
11935 | case SG_PIXELFORMAT_R8SN: return WGPUTextureFormat_R8Snorm; |
11936 | case SG_PIXELFORMAT_R8UI: return WGPUTextureFormat_R8Uint; |
11937 | case SG_PIXELFORMAT_R8SI: return WGPUTextureFormat_R8Sint; |
11938 | case SG_PIXELFORMAT_R16UI: return WGPUTextureFormat_R16Uint; |
11939 | case SG_PIXELFORMAT_R16SI: return WGPUTextureFormat_R16Sint; |
11940 | case SG_PIXELFORMAT_R16F: return WGPUTextureFormat_R16Float; |
11941 | case SG_PIXELFORMAT_RG8: return WGPUTextureFormat_RG8Unorm; |
11942 | case SG_PIXELFORMAT_RG8SN: return WGPUTextureFormat_RG8Snorm; |
11943 | case SG_PIXELFORMAT_RG8UI: return WGPUTextureFormat_RG8Uint; |
11944 | case SG_PIXELFORMAT_RG8SI: return WGPUTextureFormat_RG8Sint; |
11945 | case SG_PIXELFORMAT_R32UI: return WGPUTextureFormat_R32Uint; |
11946 | case SG_PIXELFORMAT_R32SI: return WGPUTextureFormat_R32Sint; |
11947 | case SG_PIXELFORMAT_R32F: return WGPUTextureFormat_R32Float; |
11948 | case SG_PIXELFORMAT_RG16UI: return WGPUTextureFormat_RG16Uint; |
11949 | case SG_PIXELFORMAT_RG16SI: return WGPUTextureFormat_RG16Sint; |
11950 | case SG_PIXELFORMAT_RG16F: return WGPUTextureFormat_RG16Float; |
11951 | case SG_PIXELFORMAT_RGBA8: return WGPUTextureFormat_RGBA8Unorm; |
11952 | case SG_PIXELFORMAT_RGBA8SN: return WGPUTextureFormat_RGBA8Snorm; |
11953 | case SG_PIXELFORMAT_RGBA8UI: return WGPUTextureFormat_RGBA8Uint; |
11954 | case SG_PIXELFORMAT_RGBA8SI: return WGPUTextureFormat_RGBA8Sint; |
11955 | case SG_PIXELFORMAT_BGRA8: return WGPUTextureFormat_BGRA8Unorm; |
11956 | case SG_PIXELFORMAT_RGB10A2: return WGPUTextureFormat_RGB10A2Unorm; |
11957 | case SG_PIXELFORMAT_RG11B10F: return WGPUTextureFormat_RG11B10Float; |
11958 | case SG_PIXELFORMAT_RG32UI: return WGPUTextureFormat_RG32Uint; |
11959 | case SG_PIXELFORMAT_RG32SI: return WGPUTextureFormat_RG32Sint; |
11960 | case SG_PIXELFORMAT_RG32F: return WGPUTextureFormat_RG32Float; |
11961 | case SG_PIXELFORMAT_RGBA16UI: return WGPUTextureFormat_RGBA16Uint; |
11962 | case SG_PIXELFORMAT_RGBA16SI: return WGPUTextureFormat_RGBA16Sint; |
11963 | case SG_PIXELFORMAT_RGBA16F: return WGPUTextureFormat_RGBA16Float; |
11964 | case SG_PIXELFORMAT_RGBA32UI: return WGPUTextureFormat_RGBA32Uint; |
11965 | case SG_PIXELFORMAT_RGBA32SI: return WGPUTextureFormat_RGBA32Sint; |
11966 | case SG_PIXELFORMAT_RGBA32F: return WGPUTextureFormat_RGBA32Float; |
11967 | case SG_PIXELFORMAT_DEPTH: return WGPUTextureFormat_Depth24Plus; |
11968 | case SG_PIXELFORMAT_DEPTH_STENCIL: return WGPUTextureFormat_Depth24PlusStencil8; |
11969 | case SG_PIXELFORMAT_BC1_RGBA: return WGPUTextureFormat_BC1RGBAUnorm; |
11970 | case SG_PIXELFORMAT_BC2_RGBA: return WGPUTextureFormat_BC2RGBAUnorm; |
11971 | case SG_PIXELFORMAT_BC3_RGBA: return WGPUTextureFormat_BC3RGBAUnorm; |
11972 | case SG_PIXELFORMAT_BC4_R: return WGPUTextureFormat_BC4RUnorm; |
11973 | case SG_PIXELFORMAT_BC4_RSN: return WGPUTextureFormat_BC4RSnorm; |
11974 | case SG_PIXELFORMAT_BC5_RG: return WGPUTextureFormat_BC5RGUnorm; |
11975 | case SG_PIXELFORMAT_BC5_RGSN: return WGPUTextureFormat_BC5RGSnorm; |
11976 | case SG_PIXELFORMAT_BC6H_RGBF: return WGPUTextureFormat_BC6HRGBSfloat; |
11977 | case SG_PIXELFORMAT_BC6H_RGBUF: return WGPUTextureFormat_BC6HRGBUfloat; |
11978 | case SG_PIXELFORMAT_BC7_RGBA: return WGPUTextureFormat_BC7RGBAUnorm; |
11979 | |
11980 | /* NOT SUPPORTED */ |
11981 | case SG_PIXELFORMAT_R16: |
11982 | case SG_PIXELFORMAT_R16SN: |
11983 | case SG_PIXELFORMAT_RG16: |
11984 | case SG_PIXELFORMAT_RG16SN: |
11985 | case SG_PIXELFORMAT_RGBA16: |
11986 | case SG_PIXELFORMAT_RGBA16SN: |
11987 | case SG_PIXELFORMAT_RGB9E5: |
11988 | case SG_PIXELFORMAT_PVRTC_RGB_2BPP: |
11989 | case SG_PIXELFORMAT_PVRTC_RGB_4BPP: |
11990 | case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: |
11991 | case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: |
11992 | case SG_PIXELFORMAT_ETC2_RGB8: |
11993 | case SG_PIXELFORMAT_ETC2_RGB8A1: |
11994 | case SG_PIXELFORMAT_ETC2_RGBA8: |
11995 | case SG_PIXELFORMAT_ETC2_RG11: |
11996 | case SG_PIXELFORMAT_ETC2_RG11SN: |
11997 | default: |
11998 | SOKOL_UNREACHABLE; |
11999 | return WGPUTextureFormat_Force32; |
12000 | } |
12001 | } |
12002 | |
12003 | /* |
12004 | FIXME ??? this isn't needed anywhere? |
12005 | _SOKOL_PRIVATE WGPUTextureAspect _sg_wgpu_texture_aspect(sg_pixel_format fmt) { |
12006 | if (_sg_is_valid_rendertarget_depth_format(fmt)) { |
12007 | if (!_sg_is_depth_stencil_format(fmt)) { |
12008 | return WGPUTextureAspect_DepthOnly; |
12009 | } |
12010 | } |
12011 | return WGPUTextureAspect_All; |
12012 | } |
12013 | */ |
12014 | |
12015 | _SOKOL_PRIVATE WGPUCompareFunction _sg_wgpu_comparefunc(sg_compare_func f) { |
12016 | switch (f) { |
12017 | case SG_COMPAREFUNC_NEVER: return WGPUCompareFunction_Never; |
12018 | case SG_COMPAREFUNC_LESS: return WGPUCompareFunction_Less; |
12019 | case SG_COMPAREFUNC_EQUAL: return WGPUCompareFunction_Equal; |
12020 | case SG_COMPAREFUNC_LESS_EQUAL: return WGPUCompareFunction_LessEqual; |
12021 | case SG_COMPAREFUNC_GREATER: return WGPUCompareFunction_Greater; |
12022 | case SG_COMPAREFUNC_NOT_EQUAL: return WGPUCompareFunction_NotEqual; |
12023 | case SG_COMPAREFUNC_GREATER_EQUAL: return WGPUCompareFunction_GreaterEqual; |
12024 | case SG_COMPAREFUNC_ALWAYS: return WGPUCompareFunction_Always; |
12025 | default: SOKOL_UNREACHABLE; return WGPUCompareFunction_Force32; |
12026 | } |
12027 | } |
12028 | |
12029 | _SOKOL_PRIVATE WGPUStencilOperation _sg_wgpu_stencilop(sg_stencil_op op) { |
12030 | switch (op) { |
12031 | case SG_STENCILOP_KEEP: return WGPUStencilOperation_Keep; |
12032 | case SG_STENCILOP_ZERO: return WGPUStencilOperation_Zero; |
12033 | case SG_STENCILOP_REPLACE: return WGPUStencilOperation_Replace; |
12034 | case SG_STENCILOP_INCR_CLAMP: return WGPUStencilOperation_IncrementClamp; |
12035 | case SG_STENCILOP_DECR_CLAMP: return WGPUStencilOperation_DecrementClamp; |
12036 | case SG_STENCILOP_INVERT: return WGPUStencilOperation_Invert; |
12037 | case SG_STENCILOP_INCR_WRAP: return WGPUStencilOperation_IncrementWrap; |
12038 | case SG_STENCILOP_DECR_WRAP: return WGPUStencilOperation_DecrementWrap; |
12039 | default: SOKOL_UNREACHABLE; return WGPUStencilOperation_Force32; |
12040 | } |
12041 | } |
12042 | |
12043 | _SOKOL_PRIVATE WGPUBlendOperation _sg_wgpu_blendop(sg_blend_op op) { |
12044 | switch (op) { |
12045 | case SG_BLENDOP_ADD: return WGPUBlendOperation_Add; |
12046 | case SG_BLENDOP_SUBTRACT: return WGPUBlendOperation_Subtract; |
12047 | case SG_BLENDOP_REVERSE_SUBTRACT: return WGPUBlendOperation_ReverseSubtract; |
12048 | default: SOKOL_UNREACHABLE; return WGPUBlendOperation_Force32; |
12049 | } |
12050 | } |
12051 | |
12052 | _SOKOL_PRIVATE WGPUBlendFactor _sg_wgpu_blendfactor(sg_blend_factor f) { |
12053 | switch (f) { |
12054 | case SG_BLENDFACTOR_ZERO: return WGPUBlendFactor_Zero; |
12055 | case SG_BLENDFACTOR_ONE: return WGPUBlendFactor_One; |
12056 | case SG_BLENDFACTOR_SRC_COLOR: return WGPUBlendFactor_SrcColor; |
12057 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return WGPUBlendFactor_OneMinusSrcColor; |
12058 | case SG_BLENDFACTOR_SRC_ALPHA: return WGPUBlendFactor_SrcAlpha; |
12059 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return WGPUBlendFactor_OneMinusSrcAlpha; |
12060 | case SG_BLENDFACTOR_DST_COLOR: return WGPUBlendFactor_DstColor; |
12061 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return WGPUBlendFactor_OneMinusDstColor; |
12062 | case SG_BLENDFACTOR_DST_ALPHA: return WGPUBlendFactor_DstAlpha; |
12063 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return WGPUBlendFactor_OneMinusDstAlpha; |
12064 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return WGPUBlendFactor_SrcAlphaSaturated; |
12065 | case SG_BLENDFACTOR_BLEND_COLOR: return WGPUBlendFactor_BlendColor; |
12066 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return WGPUBlendFactor_OneMinusBlendColor; |
12067 | /* FIXME: separate blend alpha value not supported? */ |
12068 | case SG_BLENDFACTOR_BLEND_ALPHA: return WGPUBlendFactor_BlendColor; |
12069 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return WGPUBlendFactor_OneMinusBlendColor; |
12070 | default: |
12071 | SOKOL_UNREACHABLE; return WGPUBlendFactor_Force32; |
12072 | } |
12073 | } |
12074 | |
12075 | _SOKOL_PRIVATE WGPUColorWriteMaskFlags _sg_wgpu_colorwritemask(uint8_t m) { |
12076 | WGPUColorWriteMaskFlags res = 0; |
12077 | if (0 != (m & SG_COLORMASK_R)) { |
12078 | res |= WGPUColorWriteMask_Red; |
12079 | } |
12080 | if (0 != (m & SG_COLORMASK_G)) { |
12081 | res |= WGPUColorWriteMask_Green; |
12082 | } |
12083 | if (0 != (m & SG_COLORMASK_B)) { |
12084 | res |= WGPUColorWriteMask_Blue; |
12085 | } |
12086 | if (0 != (m & SG_COLORMASK_A)) { |
12087 | res |= WGPUColorWriteMask_Alpha; |
12088 | } |
12089 | return res; |
12090 | } |
12091 | |
12092 | _SOKOL_PRIVATE void _sg_wgpu_init_caps(void) { |
12093 | _sg.backend = SG_BACKEND_WGPU; |
12094 | _sg.features.instancing = true; |
12095 | _sg.features.origin_top_left = true; |
12096 | _sg.features.multiple_render_targets = true; |
12097 | _sg.features.msaa_render_targets = true; |
12098 | _sg.features.imagetype_3d = true; |
12099 | _sg.features.imagetype_array = true; |
12100 | _sg.features.image_clamp_to_border = false; |
12101 | _sg.features.mrt_independent_blend_state = true; |
12102 | _sg.features.mrt_independent_write_mask = true; |
12103 | |
12104 | /* FIXME: max images size??? */ |
12105 | _sg.limits.max_image_size_2d = 8 * 1024; |
12106 | _sg.limits.max_image_size_cube = 8 * 1024; |
12107 | _sg.limits.max_image_size_3d = 2 * 1024; |
12108 | _sg.limits.max_image_size_array = 8 * 1024; |
12109 | _sg.limits.max_image_array_layers = 2 * 1024; |
12110 | _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES; |
12111 | |
12112 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]); |
12113 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8SN]); |
12114 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]); |
12115 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]); |
12116 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]); |
12117 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]); |
12118 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]); |
12119 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]); |
12120 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG8SN]); |
12121 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]); |
12122 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]); |
12123 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]); |
12124 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]); |
12125 | _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_R32F]); |
12126 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]); |
12127 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]); |
12128 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]); |
12129 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]); |
12130 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]); |
12131 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]); |
12132 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]); |
12133 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]); |
12134 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]); |
12135 | /* FIXME: missing SG_PIXELFORMAT_RG11B10F */ |
12136 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32UI]); |
12137 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32SI]); |
12138 | _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_RG32F]); |
12139 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]); |
12140 | _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]); |
12141 | _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); |
12142 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); |
12143 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); |
12144 | _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); |
12145 | _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]); |
12146 | _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]); |
12147 | |
12148 | /* FIXME FIXME FIXME: need to check if BC texture compression is |
12149 | actually supported, currently the WebGPU C-API doesn't allow this |
12150 | */ |
12151 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]); |
12152 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]); |
12153 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]); |
12154 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]); |
12155 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]); |
12156 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]); |
12157 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]); |
12158 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]); |
12159 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]); |
12160 | _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]); |
12161 | } |
12162 | |
12163 | /* |
12164 | WGPU uniform buffer pool implementation: |
12165 | |
12166 | At start of frame, a mapped buffer is grabbed from the pool, |
12167 | or a new buffer is created if there is no mapped buffer available. |
12168 | |
12169 | At end of frame, the current buffer is unmapped before queue submit, |
12170 | and async-mapped immediately again. |
12171 | |
12172 | UNIFORM BUFFER FIXME: |
12173 | |
12174 | - As per WebGPU spec, it should be possible to create a Uniform|MapWrite |
12175 | buffer, but this isn't currently allowed in Dawn. |
12176 | */ |
12177 | _SOKOL_PRIVATE void _sg_wgpu_ubpool_init(const sg_desc* desc) { |
12178 | |
12179 | /* Add the max-uniform-update size (64 KB) to the requested buffer size, |
12180 | this is to prevent validation errors in the WebGPU implementation |
12181 | if the entire buffer size is used per frame. 64 KB is the allowed |
12182 | max uniform update size on NVIDIA |
12183 | */ |
12184 | _sg.wgpu.ub.num_bytes = desc->uniform_buffer_size + _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE; |
12185 | |
12186 | WGPUBufferDescriptor ub_desc; |
12187 | _sg_clear(&ub_desc, sizeof(ub_desc)); |
12188 | ub_desc.size = _sg.wgpu.ub.num_bytes; |
12189 | ub_desc.usage = WGPUBufferUsage_Uniform|WGPUBufferUsage_CopyDst; |
12190 | _sg.wgpu.ub.buf = wgpuDeviceCreateBuffer(_sg.wgpu.dev, &ub_desc); |
12191 | SOKOL_ASSERT(_sg.wgpu.ub.buf); |
12192 | |
12193 | WGPUBindGroupLayoutBinding ub_bglb_desc[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS]; |
12194 | _sg_clear(ub_bglb_desc, sizeof(ub_bglb_desc)); |
12195 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
12196 | WGPUShaderStage vis = (stage_index == SG_SHADERSTAGE_VS) ? WGPUShaderStage_Vertex : WGPUShaderStage_Fragment; |
12197 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
12198 | int bind_index = stage_index * SG_MAX_SHADERSTAGE_UBS + ub_index; |
12199 | ub_bglb_desc[stage_index][ub_index].binding = bind_index; |
12200 | ub_bglb_desc[stage_index][ub_index].visibility = vis; |
12201 | ub_bglb_desc[stage_index][ub_index].type = WGPUBindingType_UniformBuffer; |
12202 | ub_bglb_desc[stage_index][ub_index].hasDynamicOffset = true; |
12203 | } |
12204 | } |
12205 | |
12206 | WGPUBindGroupLayoutDescriptor ub_bgl_desc; |
12207 | _sg_clear(&ub_bgl_desc, sizeof(ub_bgl_desc)); |
12208 | ub_bgl_desc.bindingCount = SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS; |
12209 | ub_bgl_desc.bindings = &ub_bglb_desc[0][0]; |
12210 | _sg.wgpu.ub.bindgroup_layout = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &ub_bgl_desc); |
12211 | SOKOL_ASSERT(_sg.wgpu.ub.bindgroup_layout); |
12212 | |
12213 | WGPUBindGroupBinding ub_bgb[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS]; |
12214 | _sg_clear(ub_bgb, sizeof(ub_bgb)); |
12215 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
12216 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
12217 | int bind_index = stage_index * SG_MAX_SHADERSTAGE_UBS + ub_index; |
12218 | ub_bgb[stage_index][ub_index].binding = bind_index; |
12219 | ub_bgb[stage_index][ub_index].buffer = _sg.wgpu.ub.buf; |
12220 | // FIXME FIXME FIXME FIXME: HACK FOR VALIDATION BUG IN DAWN |
12221 | ub_bgb[stage_index][ub_index].size = (1<<16); |
12222 | } |
12223 | } |
12224 | WGPUBindGroupDescriptor bg_desc; |
12225 | _sg_clear(&bg_desc, sizeof(bg_desc)); |
12226 | bg_desc.layout = _sg.wgpu.ub.bindgroup_layout; |
12227 | bg_desc.bindingCount = SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS; |
12228 | bg_desc.bindings = &ub_bgb[0][0]; |
12229 | _sg.wgpu.ub.bindgroup = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc); |
12230 | SOKOL_ASSERT(_sg.wgpu.ub.bindgroup); |
12231 | } |
12232 | |
12233 | _SOKOL_PRIVATE void _sg_wgpu_ubpool_discard(void) { |
12234 | if (_sg.wgpu.ub.buf) { |
12235 | wgpuBufferRelease(_sg.wgpu.ub.buf); |
12236 | _sg.wgpu.ub.buf = 0; |
12237 | } |
12238 | if (_sg.wgpu.ub.bindgroup) { |
12239 | wgpuBindGroupRelease(_sg.wgpu.ub.bindgroup); |
12240 | _sg.wgpu.ub.bindgroup = 0; |
12241 | } |
12242 | if (_sg.wgpu.ub.bindgroup_layout) { |
12243 | wgpuBindGroupLayoutRelease(_sg.wgpu.ub.bindgroup_layout); |
12244 | _sg.wgpu.ub.bindgroup_layout = 0; |
12245 | } |
12246 | for (int i = 0; i < _sg.wgpu.ub.stage.num; i++) { |
12247 | if (_sg.wgpu.ub.stage.buf[i]) { |
12248 | wgpuBufferRelease(_sg.wgpu.ub.stage.buf[i]); |
12249 | _sg.wgpu.ub.stage.buf[i] = 0; |
12250 | _sg.wgpu.ub.stage.ptr[i] = 0; |
12251 | } |
12252 | } |
12253 | } |
12254 | |
12255 | _SOKOL_PRIVATE void _sg_wgpu_ubpool_mapped_callback(WGPUBufferMapAsyncStatus status, void* data, uint64_t data_len, void* user_data) { |
12256 | if (!_sg.wgpu.valid) { |
12257 | return; |
12258 | } |
12259 | /* FIXME: better handling for this */ |
12260 | if (WGPUBufferMapAsyncStatus_Success != status) { |
12261 | SG_LOG("Mapping uniform buffer failed!\n"); |
12262 | SOKOL_ASSERT(false); |
12263 | } |
12264 | SOKOL_ASSERT(data && (data_len == _sg.wgpu.ub.num_bytes)); |
12265 | int index = (int)(intptr_t) user_data; |
12266 | SOKOL_ASSERT(index < _sg.wgpu.ub.stage.num); |
12267 | SOKOL_ASSERT(0 == _sg.wgpu.ub.stage.ptr[index]); |
12268 | _sg.wgpu.ub.stage.ptr[index] = (uint8_t*) data; |
12269 | } |
12270 | |
12271 | _SOKOL_PRIVATE void _sg_wgpu_ubpool_next_frame(bool first_frame) { |
12272 | |
12273 | /* immediately request a new mapping for the last frame's current staging buffer */ |
12274 | if (!first_frame) { |
12275 | WGPUBuffer ub_src = _sg.wgpu.ub.stage.buf[_sg.wgpu.ub.stage.cur]; |
12276 | wgpuBufferMapWriteAsync(ub_src, _sg_wgpu_ubpool_mapped_callback, (void*)(intptr_t)_sg.wgpu.ub.stage.cur); |
12277 | } |
12278 | |
12279 | /* rewind per-frame offsets */ |
12280 | _sg.wgpu.ub.offset = 0; |
12281 | _sg_clear(&_sg.wgpu.ub.bind_offsets, sizeof(_sg.wgpu.ub.bind_offsets)); |
12282 | |
12283 | /* check if a mapped staging buffer is available, otherwise create one */ |
12284 | for (int i = 0; i < _sg.wgpu.ub.stage.num; i++) { |
12285 | if (_sg.wgpu.ub.stage.ptr[i]) { |
12286 | _sg.wgpu.ub.stage.cur = i; |
12287 | return; |
12288 | } |
12289 | } |
12290 | |
12291 | /* no mapped uniform buffer available, create one */ |
12292 | SOKOL_ASSERT(_sg.wgpu.ub.stage.num < _SG_WGPU_STAGING_PIPELINE_SIZE); |
12293 | _sg.wgpu.ub.stage.cur = _sg.wgpu.ub.stage.num++; |
12294 | const int cur = _sg.wgpu.ub.stage.cur; |
12295 | |
12296 | WGPUBufferDescriptor desc; |
12297 | _sg_clear(&desc, sizeof(desc)); |
12298 | desc.size = _sg.wgpu.ub.num_bytes; |
12299 | desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_MapWrite; |
12300 | WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &desc); |
12301 | _sg.wgpu.ub.stage.buf[cur] = res.buffer; |
12302 | _sg.wgpu.ub.stage.ptr[cur] = (uint8_t*) res.data; |
12303 | SOKOL_ASSERT(_sg.wgpu.ub.stage.buf[cur]); |
12304 | SOKOL_ASSERT(_sg.wgpu.ub.stage.ptr[cur]); |
12305 | SOKOL_ASSERT(res.dataLength == _sg.wgpu.ub.num_bytes); |
12306 | } |
12307 | |
12308 | _SOKOL_PRIVATE void _sg_wgpu_ubpool_flush(void) { |
12309 | /* unmap staging buffer and copy to uniform buffer */ |
12310 | const int cur = _sg.wgpu.ub.stage.cur; |
12311 | SOKOL_ASSERT(_sg.wgpu.ub.stage.ptr[cur]); |
12312 | _sg.wgpu.ub.stage.ptr[cur] = 0; |
12313 | WGPUBuffer src_buf = _sg.wgpu.ub.stage.buf[cur]; |
12314 | wgpuBufferUnmap(src_buf); |
12315 | if (_sg.wgpu.ub.offset > 0) { |
12316 | WGPUBuffer dst_buf = _sg.wgpu.ub.buf; |
12317 | wgpuCommandEncoderCopyBufferToBuffer(_sg.wgpu.render_cmd_enc, src_buf, 0, dst_buf, 0, _sg.wgpu.ub.offset); |
12318 | } |
12319 | } |
12320 | |
12321 | /* helper function to compute number of bytes needed in staging buffer to copy image data */ |
12322 | _SOKOL_PRIVATE uint32_t _sg_wgpu_image_data_buffer_size(const _sg_image_t* img) { |
12323 | uint32_t num_bytes = 0; |
12324 | const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; |
12325 | const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; |
12326 | for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) { |
12327 | const uint32_t mip_width = _sg_max(img->cmn.width >> mip_index, 1); |
12328 | const uint32_t mip_height = _sg_max(img->cmn.height >> mip_index, 1); |
12329 | /* row-pitch must be 256-aligend */ |
12330 | const uint32_t bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN); |
12331 | num_bytes += bytes_per_slice * num_slices * num_faces; |
12332 | } |
12333 | return num_bytes; |
12334 | } |
12335 | |
12336 | /* helper function to copy image data into a texture via a staging buffer, returns number of |
12337 | bytes copied |
12338 | */ |
12339 | _SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_data(WGPUBuffer stg_buf, uint8_t* stg_base_ptr, uint32_t stg_base_offset, _sg_image_t* img, const sg_image_data* data) { |
12340 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
12341 | SOKOL_ASSERT(stg_buf && stg_base_ptr); |
12342 | SOKOL_ASSERT(img); |
12343 | SOKOL_ASSERT(data); |
12344 | uint32_t stg_offset = stg_base_offset; |
12345 | const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; |
12346 | const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; |
12347 | const sg_pixel_format fmt = img->cmn.pixel_format; |
12348 | WGPUBufferCopyView src_view; |
12349 | _sg_clear(&src_view, sizeof(src_view)); |
12350 | src_view.buffer = stg_buf; |
12351 | WGPUTextureCopyView dst_view; |
12352 | _sg_clear(&dst_view, sizeof(dst_view)); |
12353 | dst_view.texture = img->wgpu.tex; |
12354 | WGPUExtent3D extent; |
12355 | _sg_clear(&extent, sizeof(extent)); |
12356 | |
12357 | for (uint32_t face_index = 0; face_index < num_faces; face_index++) { |
12358 | for (uint32_t mip_index = 0; mip_index < (uint32_t)img->cmn.num_mipmaps; mip_index++) { |
12359 | SOKOL_ASSERT(data->subimage[face_index][mip_index].ptr); |
12360 | SOKOL_ASSERT(data->subimage[face_index][mip_index].size > 0); |
12361 | const uint8_t* src_base_ptr = (const uint8_t*)data->subimage[face_index][mip_index].ptr; |
12362 | SOKOL_ASSERT(src_base_ptr); |
12363 | uint8_t* dst_base_ptr = stg_base_ptr + stg_offset; |
12364 | |
12365 | const uint32_t mip_width = _sg_max(img->cmn.width >> mip_index, 1); |
12366 | const uint32_t mip_height = _sg_max(img->cmn.height >> mip_index, 1); |
12367 | const uint32_t mip_depth = (img->cmn.type == SG_IMAGETYPE_3D) ? _sg_max(img->cmn.num_slices >> mip_index, 1) : 1; |
12368 | const uint32_t num_rows = _sg_num_rows(fmt, mip_height); |
12369 | const uint32_t src_bytes_per_row = _sg_row_pitch(fmt, mip_width, 1); |
12370 | const uint32_t dst_bytes_per_row = _sg_row_pitch(fmt, mip_width, _SG_WGPU_ROWPITCH_ALIGN); |
12371 | const uint32_t src_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, 1); |
12372 | const uint32_t dst_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN); |
12373 | SOKOL_ASSERT((uint32_t)data->subimage[face_index][mip_index].size == (src_bytes_per_slice * num_slices)); |
12374 | SOKOL_ASSERT(src_bytes_per_row <= dst_bytes_per_row); |
12375 | SOKOL_ASSERT(src_bytes_per_slice == (src_bytes_per_row * num_rows)); |
12376 | SOKOL_ASSERT(dst_bytes_per_slice == (dst_bytes_per_row * num_rows)); |
12377 | _SOKOL_UNUSED(src_bytes_per_slice); |
12378 | |
12379 | /* copy data into mapped staging buffer */ |
12380 | if (src_bytes_per_row == dst_bytes_per_row) { |
12381 | /* can do a single memcpy */ |
12382 | uint32_t num_bytes = data->subimage[face_index][mip_index].size; |
12383 | memcpy(dst_base_ptr, src_base_ptr, num_bytes); |
12384 | } |
12385 | else { |
12386 | /* src/dst pitch doesn't match, need to copy row by row */ |
12387 | uint8_t* dst_ptr = dst_base_ptr; |
12388 | const uint8_t* src_ptr = src_base_ptr; |
12389 | for (uint32_t slice_index = 0; slice_index < num_slices; slice_index++) { |
12390 | SOKOL_ASSERT(dst_ptr == dst_base_ptr + slice_index * dst_bytes_per_slice); |
12391 | for (uint32_t row_index = 0; row_index < num_rows; row_index++) { |
12392 | memcpy(dst_ptr, src_ptr, src_bytes_per_row); |
12393 | src_ptr += src_bytes_per_row; |
12394 | dst_ptr += dst_bytes_per_row; |
12395 | } |
12396 | } |
12397 | } |
12398 | |
12399 | /* record the staging copy operation into command encoder */ |
12400 | src_view.imageHeight = mip_height; |
12401 | src_view.rowPitch = dst_bytes_per_row; |
12402 | dst_view.mipLevel = mip_index; |
12403 | extent.width = mip_width; |
12404 | extent.height = mip_height; |
12405 | extent.depth = mip_depth; |
12406 | SOKOL_ASSERT((img->cmn.type != SG_IMAGETYPE_CUBE) || (num_slices == 1)); |
12407 | for (uint32_t slice_index = 0; slice_index < num_slices; slice_index++) { |
12408 | const uint32_t layer_index = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? slice_index : face_index; |
12409 | src_view.offset = stg_offset; |
12410 | dst_view.arrayLayer = layer_index; |
12411 | wgpuCommandEncoderCopyBufferToTexture(_sg.wgpu.staging_cmd_enc, &src_view, &dst_view, &extent); |
12412 | stg_offset += dst_bytes_per_slice; |
12413 | SOKOL_ASSERT(stg_offset <= _sg.wgpu.staging.num_bytes); |
12414 | } |
12415 | } |
12416 | } |
12417 | SOKOL_ASSERT(stg_offset >= stg_base_offset); |
12418 | return (stg_offset - stg_base_offset); |
12419 | } |
12420 | |
12421 | /* |
12422 | The WGPU staging buffer implementation: |
12423 | |
12424 | Very similar to the uniform buffer pool, there's a pool of big |
12425 | per-frame staging buffers, each must be big enough to hold |
12426 | all data uploaded to dynamic resources for one frame. |
12427 | |
12428 | Staging buffers are created on demand and reused, because the |
12429 | 'frame pipeline depth' of WGPU isn't predictable. |
12430 | |
12431 | The difference to the uniform buffer system is that there isn't |
12432 | a 1:1 relationship for source- and destination for the |
12433 | data-copy operation. There's always one staging buffer as copy-source |
12434 | per frame, but many copy-destinations (regular vertex/index buffers |
12435 | or images). Instead of one big copy-operation at the end of the frame, |
12436 | multiple copy-operations will be written throughout the frame. |
12437 | */ |
12438 | _SOKOL_PRIVATE void _sg_wgpu_staging_init(const sg_desc* desc) { |
12439 | SOKOL_ASSERT(desc && (desc->staging_buffer_size > 0)); |
12440 | _sg.wgpu.staging.num_bytes = desc->staging_buffer_size; |
12441 | /* there's actually nothing more to do here */ |
12442 | } |
12443 | |
12444 | _SOKOL_PRIVATE void _sg_wgpu_staging_discard(void) { |
12445 | for (int i = 0; i < _sg.wgpu.staging.num; i++) { |
12446 | if (_sg.wgpu.staging.buf[i]) { |
12447 | wgpuBufferRelease(_sg.wgpu.staging.buf[i]); |
12448 | _sg.wgpu.staging.buf[i] = 0; |
12449 | _sg.wgpu.staging.ptr[i] = 0; |
12450 | } |
12451 | } |
12452 | } |
12453 | |
12454 | _SOKOL_PRIVATE void _sg_wgpu_staging_mapped_callback(WGPUBufferMapAsyncStatus status, void* data, uint64_t data_len, void* user_data) { |
12455 | if (!_sg.wgpu.valid) { |
12456 | return; |
12457 | } |
12458 | /* FIXME: better handling for this */ |
12459 | if (WGPUBufferMapAsyncStatus_Success != status) { |
12460 | SOKOL_ASSERT("Mapping staging buffer failed!\n"); |
12461 | SOKOL_ASSERT(false); |
12462 | } |
12463 | SOKOL_ASSERT(data && (data_len == _sg.wgpu.staging.num_bytes)); |
12464 | int index = (int)(intptr_t) user_data; |
12465 | SOKOL_ASSERT(index < _sg.wgpu.staging.num); |
12466 | SOKOL_ASSERT(0 == _sg.wgpu.staging.ptr[index]); |
12467 | _sg.wgpu.staging.ptr[index] = (uint8_t*) data; |
12468 | } |
12469 | |
12470 | _SOKOL_PRIVATE void _sg_wgpu_staging_next_frame(bool first_frame) { |
12471 | |
12472 | /* immediately request a new mapping for the last frame's current staging buffer */ |
12473 | if (!first_frame) { |
12474 | WGPUBuffer cur_buf = _sg.wgpu.staging.buf[_sg.wgpu.staging.cur]; |
12475 | wgpuBufferMapWriteAsync(cur_buf, _sg_wgpu_staging_mapped_callback, (void*)(intptr_t)_sg.wgpu.staging.cur); |
12476 | } |
12477 | |
12478 | /* rewind staging-buffer offset */ |
12479 | _sg.wgpu.staging.offset = 0; |
12480 | |
12481 | /* check if mapped staging buffer is available, otherwise create one */ |
12482 | for (int i = 0; i < _sg.wgpu.staging.num; i++) { |
12483 | if (_sg.wgpu.staging.ptr[i]) { |
12484 | _sg.wgpu.staging.cur = i; |
12485 | return; |
12486 | } |
12487 | } |
12488 | |
12489 | /* no mapped buffer available, create one */ |
12490 | SOKOL_ASSERT(_sg.wgpu.staging.num < _SG_WGPU_STAGING_PIPELINE_SIZE); |
12491 | _sg.wgpu.staging.cur = _sg.wgpu.staging.num++; |
12492 | const int cur = _sg.wgpu.staging.cur; |
12493 | |
12494 | WGPUBufferDescriptor desc; |
12495 | _sg_clear(&desc, sizeof(desc)); |
12496 | desc.size = _sg.wgpu.staging.num_bytes; |
12497 | desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_MapWrite; |
12498 | WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &desc); |
12499 | _sg.wgpu.staging.buf[cur] = res.buffer; |
12500 | _sg.wgpu.staging.ptr[cur] = (uint8_t*) res.data; |
12501 | SOKOL_ASSERT(_sg.wgpu.staging.buf[cur]); |
12502 | SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); |
12503 | SOKOL_ASSERT(res.dataLength == _sg.wgpu.staging.num_bytes); |
12504 | } |
12505 | |
12506 | _SOKOL_PRIVATE uint32_t _sg_wgpu_staging_copy_to_buffer(WGPUBuffer dst_buf, uint32_t dst_buf_offset, const void* data, uint32_t data_num_bytes) { |
12507 | /* Copy a chunk of data into the staging buffer, and record a blit-operation into |
12508 | the command encoder, bump the offset for the next data chunk, return 0 if there |
12509 | was not enough room in the staging buffer, return the number of actually |
12510 | copied bytes on success. |
12511 | |
12512 | NOTE: that the number of staging bytes to be copied must be a multiple of 4. |
12513 | |
12514 | */ |
12515 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
12516 | SOKOL_ASSERT((dst_buf_offset & 3) == 0); |
12517 | SOKOL_ASSERT(data_num_bytes > 0); |
12518 | uint32_t copy_num_bytes = _sg_roundup(data_num_bytes, 4); |
12519 | if ((_sg.wgpu.staging.offset + copy_num_bytes) >= _sg.wgpu.staging.num_bytes) { |
12520 | SG_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_buffer())!\n"); |
12521 | return false; |
12522 | } |
12523 | const int cur = _sg.wgpu.staging.cur; |
12524 | SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); |
12525 | uint32_t stg_buf_offset = _sg.wgpu.staging.offset; |
12526 | uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur] + stg_buf_offset; |
12527 | memcpy(stg_ptr, data, data_num_bytes); |
12528 | WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur]; |
12529 | wgpuCommandEncoderCopyBufferToBuffer(_sg.wgpu.staging_cmd_enc, stg_buf, stg_buf_offset, dst_buf, dst_buf_offset, copy_num_bytes); |
12530 | _sg.wgpu.staging.offset = stg_buf_offset + copy_num_bytes; |
12531 | return copy_num_bytes; |
12532 | } |
12533 | |
12534 | _SOKOL_PRIVATE bool _sg_wgpu_staging_copy_to_texture(_sg_image_t* img, const sg_image_data* data) { |
12535 | /* similar to _sg_wgpu_staging_copy_to_buffer(), but with image data instead */ |
12536 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
12537 | uint32_t num_bytes = _sg_wgpu_image_data_buffer_size(img); |
12538 | if ((_sg.wgpu.staging.offset + num_bytes) >= _sg.wgpu.staging.num_bytes) { |
12539 | SG_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_texture)!\n"); |
12540 | return false; |
12541 | } |
12542 | const int cur = _sg.wgpu.staging.cur; |
12543 | SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); |
12544 | uint32_t stg_offset = _sg.wgpu.staging.offset; |
12545 | uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur]; |
12546 | WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur]; |
12547 | uint32_t bytes_copied = _sg_wgpu_copy_image_data(stg_buf, stg_ptr, stg_offset, img, data); |
12548 | _SOKOL_UNUSED(bytes_copied); |
12549 | SOKOL_ASSERT(bytes_copied == num_bytes); |
12550 | _sg.wgpu.staging.offset = _sg_roundup(stg_offset + num_bytes, _SG_WGPU_STAGING_ALIGN); |
12551 | return true; |
12552 | } |
12553 | |
12554 | _SOKOL_PRIVATE void _sg_wgpu_staging_unmap(void) { |
12555 | /* called at end of frame before queue-submit */ |
12556 | const int cur = _sg.wgpu.staging.cur; |
12557 | SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); |
12558 | _sg.wgpu.staging.ptr[cur] = 0; |
12559 | wgpuBufferUnmap(_sg.wgpu.staging.buf[cur]); |
12560 | } |
12561 | |
12562 | /*--- WGPU sampler cache functions ---*/ |
12563 | _SOKOL_PRIVATE void _sg_wgpu_init_sampler_cache(const sg_desc* desc) { |
12564 | SOKOL_ASSERT(desc->sampler_cache_size > 0); |
12565 | _sg_smpcache_init(&_sg.wgpu.sampler_cache, desc->sampler_cache_size); |
12566 | } |
12567 | |
12568 | _SOKOL_PRIVATE void _sg_wgpu_destroy_sampler_cache(void) { |
12569 | SOKOL_ASSERT(_sg.wgpu.sampler_cache.items); |
12570 | SOKOL_ASSERT(_sg.wgpu.sampler_cache.num_items <= _sg.wgpu.sampler_cache.capacity); |
12571 | for (int i = 0; i < _sg.wgpu.sampler_cache.num_items; i++) { |
12572 | wgpuSamplerRelease((WGPUSampler)_sg_smpcache_sampler(&_sg.wgpu.sampler_cache, i)); |
12573 | } |
12574 | _sg_smpcache_discard(&_sg.wgpu.sampler_cache); |
12575 | } |
12576 | |
12577 | _SOKOL_PRIVATE WGPUSampler _sg_wgpu_create_sampler(const sg_image_desc* img_desc) { |
12578 | SOKOL_ASSERT(img_desc); |
12579 | int index = _sg_smpcache_find_item(&_sg.wgpu.sampler_cache, img_desc); |
12580 | if (index >= 0) { |
12581 | /* reuse existing sampler */ |
12582 | return (WGPUSampler) _sg_smpcache_sampler(&_sg.wgpu.sampler_cache, index); |
12583 | } |
12584 | else { |
12585 | /* create a new WGPU sampler and add to sampler cache */ |
12586 | /* FIXME: anisotropic filtering not supported? */ |
12587 | WGPUSamplerDescriptor smp_desc; |
12588 | _sg_clear(&smp_desc, sizeof(smp_desc)); |
12589 | smp_desc.addressModeU = _sg_wgpu_sampler_addrmode(img_desc->wrap_u); |
12590 | smp_desc.addressModeV = _sg_wgpu_sampler_addrmode(img_desc->wrap_v); |
12591 | smp_desc.addressModeW = _sg_wgpu_sampler_addrmode(img_desc->wrap_w); |
12592 | smp_desc.magFilter = _sg_wgpu_sampler_minmagfilter(img_desc->mag_filter); |
12593 | smp_desc.minFilter = _sg_wgpu_sampler_minmagfilter(img_desc->min_filter); |
12594 | smp_desc.mipmapFilter = _sg_wgpu_sampler_mipfilter(img_desc->min_filter); |
12595 | smp_desc.lodMinClamp = img_desc->min_lod; |
12596 | smp_desc.lodMaxClamp = img_desc->max_lod; |
12597 | WGPUSampler smp = wgpuDeviceCreateSampler(_sg.wgpu.dev, &smp_desc); |
12598 | SOKOL_ASSERT(smp); |
12599 | _sg_smpcache_add_item(&_sg.wgpu.sampler_cache, img_desc, (uintptr_t)smp); |
12600 | return smp; |
12601 | } |
12602 | } |
12603 | |
12604 | /*--- WGPU backend API functions ---*/ |
12605 | _SOKOL_PRIVATE void _sg_wgpu_setup_backend(const sg_desc* desc) { |
12606 | SOKOL_ASSERT(desc); |
12607 | SOKOL_ASSERT(desc->context.wgpu.device); |
12608 | SOKOL_ASSERT(desc->context.wgpu.render_view_cb || desc->context.wgpu.render_view_userdata_cb); |
12609 | SOKOL_ASSERT(desc->context.wgpu.resolve_view_cb || desc->context.wgpu.resolve_view_userdata_cb); |
12610 | SOKOL_ASSERT(desc->context.wgpu.depth_stencil_view_cb || desc->context.wgpu.depth_stencil_view_userdata_cb); |
12611 | SOKOL_ASSERT(desc->uniform_buffer_size > 0); |
12612 | SOKOL_ASSERT(desc->staging_buffer_size > 0); |
12613 | _sg.backend = SG_BACKEND_WGPU; |
12614 | _sg.wgpu.valid = true; |
12615 | _sg.wgpu.dev = (WGPUDevice) desc->context.wgpu.device; |
12616 | _sg.wgpu.render_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.render_view_cb; |
12617 | _sg.wgpu.render_view_userdata_cb = (WGPUTextureView(*)(void*)) desc->context.wgpu.render_view_userdata_cb; |
12618 | _sg.wgpu.resolve_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.resolve_view_cb; |
12619 | _sg.wgpu.resolve_view_userdata_cb = (WGPUTextureView(*)(void*)) desc->context.wgpu.resolve_view_userdata_cb; |
12620 | _sg.wgpu.depth_stencil_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.depth_stencil_view_cb; |
12621 | _sg.wgpu.depth_stencil_view_userdata_cb = (WGPUTextureView(*)(void*)) desc->context.wgpu.depth_stencil_view_userdata_cb; |
12622 | _sg.wgpu.user_data = desc->context.wgpu.user_data; |
12623 | _sg.wgpu.queue = wgpuDeviceCreateQueue(_sg.wgpu.dev); |
12624 | SOKOL_ASSERT(_sg.wgpu.queue); |
12625 | |
12626 | /* setup WebGPU features and limits */ |
12627 | _sg_wgpu_init_caps(); |
12628 | |
12629 | /* setup the sampler cache, uniform and staging buffer pools */ |
12630 | _sg_wgpu_init_sampler_cache(&_sg.desc); |
12631 | _sg_wgpu_ubpool_init(desc); |
12632 | _sg_wgpu_ubpool_next_frame(true); |
12633 | _sg_wgpu_staging_init(desc); |
12634 | _sg_wgpu_staging_next_frame(true); |
12635 | |
12636 | /* create an empty bind group for shader stages without bound images */ |
12637 | WGPUBindGroupLayoutDescriptor bgl_desc; |
12638 | _sg_clear(&bgl_desc, sizeof(bgl_desc)); |
12639 | WGPUBindGroupLayout empty_bgl = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &bgl_desc); |
12640 | SOKOL_ASSERT(empty_bgl); |
12641 | WGPUBindGroupDescriptor bg_desc; |
12642 | _sg_clear(&bg_desc, sizeof(bg_desc)); |
12643 | bg_desc.layout = empty_bgl; |
12644 | _sg.wgpu.empty_bind_group = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc); |
12645 | SOKOL_ASSERT(_sg.wgpu.empty_bind_group); |
12646 | wgpuBindGroupLayoutRelease(empty_bgl); |
12647 | |
12648 | /* create initial per-frame command encoders */ |
12649 | WGPUCommandEncoderDescriptor cmd_enc_desc; |
12650 | _sg_clear(&cmd_enc_desc, sizeof(cmd_enc_desc)); |
12651 | _sg.wgpu.render_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); |
12652 | SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); |
12653 | _sg.wgpu.staging_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); |
12654 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
12655 | } |
12656 | |
12657 | _SOKOL_PRIVATE void _sg_wgpu_discard_backend(void) { |
12658 | SOKOL_ASSERT(_sg.wgpu.valid); |
12659 | SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); |
12660 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
12661 | _sg.wgpu.valid = false; |
12662 | _sg_wgpu_ubpool_discard(); |
12663 | _sg_wgpu_staging_discard(); |
12664 | _sg_wgpu_destroy_sampler_cache(); |
12665 | wgpuBindGroupRelease(_sg.wgpu.empty_bind_group); |
12666 | wgpuCommandEncoderRelease(_sg.wgpu.render_cmd_enc); |
12667 | _sg.wgpu.render_cmd_enc = 0; |
12668 | wgpuCommandEncoderRelease(_sg.wgpu.staging_cmd_enc); |
12669 | _sg.wgpu.staging_cmd_enc = 0; |
12670 | if (_sg.wgpu.queue) { |
12671 | wgpuQueueRelease(_sg.wgpu.queue); |
12672 | _sg.wgpu.queue = 0; |
12673 | } |
12674 | } |
12675 | |
12676 | _SOKOL_PRIVATE void _sg_wgpu_reset_state_cache(void) { |
12677 | SG_LOG("_sg_wgpu_reset_state_cache: FIXME\n"); |
12678 | } |
12679 | |
12680 | _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_context(_sg_context_t* ctx) { |
12681 | SOKOL_ASSERT(ctx); |
12682 | _SOKOL_UNUSED(ctx); |
12683 | return SG_RESOURCESTATE_VALID; |
12684 | } |
12685 | |
12686 | _SOKOL_PRIVATE void _sg_wgpu_discard_context(_sg_context_t* ctx) { |
12687 | SOKOL_ASSERT(ctx); |
12688 | _SOKOL_UNUSED(ctx); |
12689 | } |
12690 | |
12691 | _SOKOL_PRIVATE void _sg_wgpu_activate_context(_sg_context_t* ctx) { |
12692 | (void)ctx; |
12693 | SG_LOG("_sg_wgpu_activate_context: FIXME\n"); |
12694 | } |
12695 | |
12696 | _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
12697 | SOKOL_ASSERT(buf && desc); |
12698 | const bool injected = (0 != desc->wgpu_buffer); |
12699 | _sg_buffer_common_init(&buf->cmn, desc); |
12700 | if (injected) { |
12701 | buf->wgpu.buf = (WGPUBuffer) desc->wgpu_buffer; |
12702 | wgpuBufferReference(buf->wgpu.buf); |
12703 | } |
12704 | else { |
12705 | WGPUBufferDescriptor wgpu_buf_desc; |
12706 | _sg_clear(&wgpu_buf_desc, sizeof(wgpu_buf_desc)); |
12707 | wgpu_buf_desc.usage = _sg_wgpu_buffer_usage(buf->cmn.type, buf->cmn.usage); |
12708 | wgpu_buf_desc.size = buf->cmn.size; |
12709 | if (SG_USAGE_IMMUTABLE == buf->cmn.usage) { |
12710 | SOKOL_ASSERT(desc->data.ptr); |
12711 | WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc); |
12712 | buf->wgpu.buf = res.buffer; |
12713 | SOKOL_ASSERT(res.data && (res.dataLength == buf->cmn.size)); |
12714 | memcpy(res.data, desc->data.ptr, buf->cmn.size); |
12715 | wgpuBufferUnmap(res.buffer); |
12716 | } |
12717 | else { |
12718 | buf->wgpu.buf = wgpuDeviceCreateBuffer(_sg.wgpu.dev, &wgpu_buf_desc); |
12719 | } |
12720 | } |
12721 | return SG_RESOURCESTATE_VALID; |
12722 | } |
12723 | |
12724 | _SOKOL_PRIVATE void _sg_wgpu_discard_buffer(_sg_buffer_t* buf) { |
12725 | SOKOL_ASSERT(buf); |
12726 | WGPUBuffer wgpu_buf = buf->wgpu.buf; |
12727 | if (0 != wgpu_buf) { |
12728 | wgpuBufferRelease(wgpu_buf); |
12729 | } |
12730 | } |
12731 | |
12732 | _SOKOL_PRIVATE void _sg_wgpu_init_texdesc_common(WGPUTextureDescriptor* wgpu_tex_desc, const sg_image_desc* desc) { |
12733 | wgpu_tex_desc->usage = WGPUTextureUsage_Sampled|WGPUTextureUsage_CopyDst; |
12734 | wgpu_tex_desc->dimension = _sg_wgpu_tex_dim(desc->type); |
12735 | wgpu_tex_desc->size.width = desc->width; |
12736 | wgpu_tex_desc->size.height = desc->height; |
12737 | if (desc->type == SG_IMAGETYPE_3D) { |
12738 | wgpu_tex_desc->size.depth = desc->num_slices; |
12739 | wgpu_tex_desc->arrayLayerCount = 1; |
12740 | } |
12741 | else if (desc->type == SG_IMAGETYPE_CUBE) { |
12742 | wgpu_tex_desc->size.depth = 1; |
12743 | wgpu_tex_desc->arrayLayerCount = 6; |
12744 | } |
12745 | else { |
12746 | wgpu_tex_desc->size.depth = 1; |
12747 | wgpu_tex_desc->arrayLayerCount = desc->num_slices; |
12748 | } |
12749 | wgpu_tex_desc->format = _sg_wgpu_textureformat(desc->pixel_format); |
12750 | wgpu_tex_desc->mipLevelCount = desc->num_mipmaps; |
12751 | wgpu_tex_desc->sampleCount = 1; |
12752 | } |
12753 | |
12754 | _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_image(_sg_image_t* img, const sg_image_desc* desc) { |
12755 | SOKOL_ASSERT(img && desc); |
12756 | SOKOL_ASSERT(_sg.wgpu.dev); |
12757 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
12758 | |
12759 | _sg_image_common_init(&img->cmn, desc); |
12760 | |
12761 | const bool injected = (0 != desc->wgpu_texture); |
12762 | const bool is_msaa = desc->sample_count > 1; |
12763 | WGPUTextureDescriptor wgpu_tex_desc; |
12764 | _sg_clear(&wgpu_tex_desc, sizeof(wgpu_tex_desc)); |
12765 | _sg_wgpu_init_texdesc_common(&wgpu_tex_desc, desc); |
12766 | if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { |
12767 | SOKOL_ASSERT(img->cmn.render_target); |
12768 | SOKOL_ASSERT(img->cmn.type == SG_IMAGETYPE_2D); |
12769 | SOKOL_ASSERT(img->cmn.num_mipmaps == 1); |
12770 | SOKOL_ASSERT(!injected); |
12771 | /* NOTE: a depth-stencil texture will never be MSAA-resolved, so there |
12772 | won't be a separate MSAA- and resolve-texture |
12773 | */ |
12774 | wgpu_tex_desc.usage = WGPUTextureUsage_OutputAttachment; |
12775 | wgpu_tex_desc.sampleCount = desc->sample_count; |
12776 | img->wgpu.tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc); |
12777 | SOKOL_ASSERT(img->wgpu.tex); |
12778 | } |
12779 | else { |
12780 | if (injected) { |
12781 | img->wgpu.tex = (WGPUTexture) desc->wgpu_texture; |
12782 | wgpuTextureReference(img->wgpu.tex); |
12783 | } |
12784 | else { |
12785 | /* NOTE: in the MSAA-rendertarget case, both the MSAA texture *and* |
12786 | the resolve texture need OutputAttachment usage |
12787 | */ |
12788 | if (img->cmn.render_target) { |
12789 | wgpu_tex_desc.usage = WGPUTextureUsage_Sampled|WGPUTextureUsage_OutputAttachment; |
12790 | } |
12791 | img->wgpu.tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc); |
12792 | SOKOL_ASSERT(img->wgpu.tex); |
12793 | |
12794 | /* copy content into texture via a throw-away staging buffer */ |
12795 | if (desc->usage == SG_USAGE_IMMUTABLE && !desc->render_target) { |
12796 | WGPUBufferDescriptor wgpu_buf_desc; |
12797 | _sg_clear(&wgpu_buf_desc, sizeof(wgpu_buf_desc)); |
12798 | wgpu_buf_desc.size = _sg_wgpu_image_data_buffer_size(img); |
12799 | wgpu_buf_desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_CopyDst; |
12800 | WGPUCreateBufferMappedResult map = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc); |
12801 | SOKOL_ASSERT(map.buffer && map.data); |
12802 | uint32_t num_bytes = _sg_wgpu_copy_image_data(map.buffer, (uint8_t*)map.data, 0, img, &desc->data); |
12803 | _SOKOL_UNUSED(num_bytes); |
12804 | SOKOL_ASSERT(num_bytes == wgpu_buf_desc.size); |
12805 | wgpuBufferUnmap(map.buffer); |
12806 | wgpuBufferRelease(map.buffer); |
12807 | } |
12808 | } |
12809 | |
12810 | /* create texture view object */ |
12811 | WGPUTextureViewDescriptor wgpu_view_desc; |
12812 | _sg_clear(&wgpu_view_desc, sizeof(wgpu_view_desc)); |
12813 | wgpu_view_desc.dimension = _sg_wgpu_tex_viewdim(desc->type); |
12814 | img->wgpu.tex_view = wgpuTextureCreateView(img->wgpu.tex, &wgpu_view_desc); |
12815 | |
12816 | /* if render target and MSAA, then a separate texture in MSAA format is needed |
12817 | which will be resolved into the regular texture at the end of the |
12818 | offscreen-render pass |
12819 | */ |
12820 | if (desc->render_target && is_msaa) { |
12821 | wgpu_tex_desc.dimension = WGPUTextureDimension_2D; |
12822 | wgpu_tex_desc.size.depth = 1; |
12823 | wgpu_tex_desc.arrayLayerCount = 1; |
12824 | wgpu_tex_desc.mipLevelCount = 1; |
12825 | wgpu_tex_desc.usage = WGPUTextureUsage_OutputAttachment; |
12826 | wgpu_tex_desc.sampleCount = desc->sample_count; |
12827 | img->wgpu.msaa_tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc); |
12828 | SOKOL_ASSERT(img->wgpu.msaa_tex); |
12829 | } |
12830 | |
12831 | /* create sampler via shared-sampler-cache */ |
12832 | img->wgpu.sampler = _sg_wgpu_create_sampler(desc); |
12833 | SOKOL_ASSERT(img->wgpu.sampler); |
12834 | } |
12835 | return SG_RESOURCESTATE_VALID; |
12836 | } |
12837 | |
12838 | _SOKOL_PRIVATE void _sg_wgpu_discard_image(_sg_image_t* img) { |
12839 | SOKOL_ASSERT(img); |
12840 | if (img->wgpu.tex) { |
12841 | wgpuTextureRelease(img->wgpu.tex); |
12842 | img->wgpu.tex = 0; |
12843 | } |
12844 | if (img->wgpu.tex_view) { |
12845 | wgpuTextureViewRelease(img->wgpu.tex_view); |
12846 | img->wgpu.tex_view = 0; |
12847 | } |
12848 | if (img->wgpu.msaa_tex) { |
12849 | wgpuTextureRelease(img->wgpu.msaa_tex); |
12850 | img->wgpu.msaa_tex = 0; |
12851 | } |
12852 | /* NOTE: do *not* destroy the sampler from the shared-sampler-cache */ |
12853 | img->wgpu.sampler = 0; |
12854 | } |
12855 | |
12856 | /* |
12857 | How BindGroups work in WebGPU: |
12858 | |
12859 | - up to 4 bind groups can be bound simultaneously |
12860 | - up to 16 bindings per bind group |
12861 | - 'binding' slots are local per bind group |
12862 | - in the shader: |
12863 | layout(set=0, binding=1) corresponds to bind group 0, binding 1 |
12864 | |
12865 | Now how to map this to sokol-gfx's bind model: |
12866 | |
12867 | Reduce SG_MAX_SHADERSTAGE_IMAGES to 8, then: |
12868 | |
12869 | 1 bind group for all 8 uniform buffers |
12870 | 1 bind group for vertex shader textures + samplers |
12871 | 1 bind group for fragment shader textures + samples |
12872 | |
12873 | Alternatively: |
12874 | |
12875 | 1 bind group for 8 uniform buffer slots |
12876 | 1 bind group for 8 vs images + 8 vs samplers |
12877 | 1 bind group for 12 fs images |
12878 | 1 bind group for 12 fs samplers |
12879 | |
12880 | I guess this means that we need to create BindGroups on the |
12881 | fly during sg_apply_bindings() :/ |
12882 | */ |
12883 | _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
12884 | SOKOL_ASSERT(shd && desc); |
12885 | SOKOL_ASSERT(desc->vs.bytecode.ptr && desc->fs.bytecode.ptr); |
12886 | _sg_shader_common_init(&shd->cmn, desc); |
12887 | |
12888 | bool success = true; |
12889 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
12890 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; |
12891 | SOKOL_ASSERT((stage_desc->bytecode.size & 3) == 0); |
12892 | |
12893 | _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; |
12894 | _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index]; |
12895 | |
12896 | _sg_strcpy(&wgpu_stage->entry, stage_desc->entry); |
12897 | WGPUShaderModuleDescriptor wgpu_shdmod_desc; |
12898 | _sg_clear(&wgpu_shdmod_desc, sizeof(wgpu_shdmod_desc)); |
12899 | wgpu_shdmod_desc.codeSize = stage_desc->bytecode.size >> 2; |
12900 | wgpu_shdmod_desc.code = (const uint32_t*) stage_desc->bytecode.ptr; |
12901 | wgpu_stage->module = wgpuDeviceCreateShaderModule(_sg.wgpu.dev, &wgpu_shdmod_desc); |
12902 | if (0 == wgpu_stage->module) { |
12903 | success = false; |
12904 | } |
12905 | |
12906 | /* create image/sampler bind group for the shader stage */ |
12907 | WGPUShaderStage vis = (stage_index == SG_SHADERSTAGE_VS) ? WGPUShaderStage_Vertex : WGPUShaderStage_Fragment; |
12908 | int num_imgs = cmn_stage->num_images; |
12909 | if (num_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) { |
12910 | num_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES; |
12911 | } |
12912 | WGPUBindGroupLayoutBinding bglb_desc[_SG_WGPU_MAX_SHADERSTAGE_IMAGES * 2]; |
12913 | _sg_clear(bglb_desc, sizeof(bglb_desc)); |
12914 | for (int img_index = 0; img_index < num_imgs; img_index++) { |
12915 | /* texture- and sampler-bindings */ |
12916 | WGPUBindGroupLayoutBinding* tex_desc = &bglb_desc[img_index*2 + 0]; |
12917 | WGPUBindGroupLayoutBinding* smp_desc = &bglb_desc[img_index*2 + 1]; |
12918 | |
12919 | tex_desc->binding = img_index; |
12920 | tex_desc->visibility = vis; |
12921 | tex_desc->type = WGPUBindingType_SampledTexture; |
12922 | tex_desc->textureDimension = _sg_wgpu_tex_viewdim(cmn_stage->images[img_index].image_type); |
12923 | tex_desc->textureComponentType = _sg_wgpu_tex_comptype(cmn_stage->images[img_index].sampler_type); |
12924 | |
12925 | smp_desc->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES; |
12926 | smp_desc->visibility = vis; |
12927 | smp_desc->type = WGPUBindingType_Sampler; |
12928 | } |
12929 | WGPUBindGroupLayoutDescriptor img_bgl_desc; |
12930 | _sg_clear(&img_bgl_desc, sizeof(img_bgl_desc)); |
12931 | img_bgl_desc.bindingCount = num_imgs * 2; |
12932 | img_bgl_desc.bindings = &bglb_desc[0]; |
12933 | wgpu_stage->bind_group_layout = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &img_bgl_desc); |
12934 | SOKOL_ASSERT(wgpu_stage->bind_group_layout); |
12935 | } |
12936 | return success ? SG_RESOURCESTATE_VALID : SG_RESOURCESTATE_FAILED; |
12937 | } |
12938 | |
12939 | _SOKOL_PRIVATE void _sg_wgpu_discard_shader(_sg_shader_t* shd) { |
12940 | SOKOL_ASSERT(shd); |
12941 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
12942 | _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index]; |
12943 | if (wgpu_stage->module) { |
12944 | wgpuShaderModuleRelease(wgpu_stage->module); |
12945 | wgpu_stage->module = 0; |
12946 | } |
12947 | if (wgpu_stage->bind_group_layout) { |
12948 | wgpuBindGroupLayoutRelease(wgpu_stage->bind_group_layout); |
12949 | wgpu_stage->bind_group_layout = 0; |
12950 | } |
12951 | } |
12952 | } |
12953 | |
12954 | _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { |
12955 | SOKOL_ASSERT(pip && shd && desc); |
12956 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
12957 | SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout); |
12958 | SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout); |
12959 | pip->shader = shd; |
12960 | _sg_pipeline_common_init(&pip->cmn, desc); |
12961 | pip->wgpu.stencil_ref = (uint32_t) desc->stencil.ref; |
12962 | |
12963 | WGPUBindGroupLayout pip_bgl[3] = { |
12964 | _sg.wgpu.ub.bindgroup_layout, |
12965 | shd->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout, |
12966 | shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout |
12967 | }; |
12968 | WGPUPipelineLayoutDescriptor pl_desc; |
12969 | _sg_clear(&pl_desc, sizeof(pl_desc)); |
12970 | pl_desc.bindGroupLayoutCount = 3; |
12971 | pl_desc.bindGroupLayouts = &pip_bgl[0]; |
12972 | WGPUPipelineLayout pip_layout = wgpuDeviceCreatePipelineLayout(_sg.wgpu.dev, &pl_desc); |
12973 | |
12974 | WGPUVertexBufferLayoutDescriptor vb_desc[SG_MAX_SHADERSTAGE_BUFFERS]; |
12975 | _sg_clear(&vb_desc, sizeof(vb_desc)); |
12976 | WGPUVertexAttributeDescriptor va_desc[SG_MAX_SHADERSTAGE_BUFFERS][SG_MAX_VERTEX_ATTRIBUTES]; |
12977 | _sg_clear(&va_desc, sizeof(va_desc)); |
12978 | int vb_idx = 0; |
12979 | for (; vb_idx < SG_MAX_SHADERSTAGE_BUFFERS; vb_idx++) { |
12980 | const sg_buffer_layout_desc* src_vb_desc = &desc->layout.buffers[vb_idx]; |
12981 | if (0 == src_vb_desc->stride) { |
12982 | break; |
12983 | } |
12984 | vb_desc[vb_idx].arrayStride = src_vb_desc->stride; |
12985 | vb_desc[vb_idx].stepMode = _sg_wgpu_stepmode(src_vb_desc->step_func); |
12986 | /* NOTE: WebGPU has no support for vertex step rate (because that's |
12987 | not supported by Core Vulkan |
12988 | */ |
12989 | int va_idx = 0; |
12990 | for (int va_loc = 0; va_loc < SG_MAX_VERTEX_ATTRIBUTES; va_loc++) { |
12991 | const sg_vertex_attr_desc* src_va_desc = &desc->layout.attrs[va_loc]; |
12992 | if (SG_VERTEXFORMAT_INVALID == src_va_desc->format) { |
12993 | break; |
12994 | } |
12995 | pip->cmn.vertex_layout_valid[src_va_desc->buffer_index] = true; |
12996 | if (vb_idx == src_va_desc->buffer_index) { |
12997 | va_desc[vb_idx][va_idx].format = _sg_wgpu_vertexformat(src_va_desc->format); |
12998 | va_desc[vb_idx][va_idx].offset = src_va_desc->offset; |
12999 | va_desc[vb_idx][va_idx].shaderLocation = va_loc; |
13000 | va_idx++; |
13001 | } |
13002 | } |
13003 | vb_desc[vb_idx].attributeCount = va_idx; |
13004 | vb_desc[vb_idx].attributes = &va_desc[vb_idx][0]; |
13005 | } |
13006 | WGPUVertexStateDescriptor vx_state_desc; |
13007 | _sg_clear(&vx_state_desc, sizeof(vx_state_desc)); |
13008 | vx_state_desc.indexFormat = _sg_wgpu_indexformat(desc->index_type); |
13009 | vx_state_desc.vertexBufferCount = vb_idx; |
13010 | vx_state_desc.vertexBuffers = vb_desc; |
13011 | |
13012 | WGPURasterizationStateDescriptor rs_desc; |
13013 | _sg_clear(&rs_desc, sizeof(rs_desc)); |
13014 | rs_desc.frontFace = _sg_wgpu_frontface(desc->face_winding); |
13015 | rs_desc.cullMode = _sg_wgpu_cullmode(desc->cull_mode); |
13016 | rs_desc.depthBias = (int32_t) desc->depth.bias; |
13017 | rs_desc.depthBiasClamp = desc->depth.bias_clamp; |
13018 | rs_desc.depthBiasSlopeScale = desc->depth.bias_slope_scale; |
13019 | |
13020 | WGPUDepthStencilStateDescriptor ds_desc; |
13021 | _sg_clear(&ds_desc, sizeof(ds_desc)); |
13022 | ds_desc.format = _sg_wgpu_textureformat(desc->depth.pixel_format); |
13023 | ds_desc.depthWriteEnabled = desc->depth.write_enabled; |
13024 | ds_desc.depthCompare = _sg_wgpu_comparefunc(desc->depth.compare); |
13025 | ds_desc.stencilReadMask = desc->stencil.read_mask; |
13026 | ds_desc.stencilWriteMask = desc->stencil.write_mask; |
13027 | ds_desc.stencilFront.compare = _sg_wgpu_comparefunc(desc->stencil.front.compare); |
13028 | ds_desc.stencilFront.failOp = _sg_wgpu_stencilop(desc->stencil.front.fail_op); |
13029 | ds_desc.stencilFront.depthFailOp = _sg_wgpu_stencilop(desc->stencil.front.depth_fail_op); |
13030 | ds_desc.stencilFront.passOp = _sg_wgpu_stencilop(desc->stencil.front.pass_op); |
13031 | ds_desc.stencilBack.compare = _sg_wgpu_comparefunc(desc->stencil.back.compare); |
13032 | ds_desc.stencilBack.failOp = _sg_wgpu_stencilop(desc->stencil.back.fail_op); |
13033 | ds_desc.stencilBack.depthFailOp = _sg_wgpu_stencilop(desc->stencil.back.depth_fail_op); |
13034 | ds_desc.stencilBack.passOp = _sg_wgpu_stencilop(desc->stencil.back.pass_op); |
13035 | |
13036 | WGPUProgrammableStageDescriptor fs_desc; |
13037 | _sg_clear(&fs_desc, sizeof(fs_desc)); |
13038 | fs_desc.module = shd->wgpu.stage[SG_SHADERSTAGE_FS].module; |
13039 | fs_desc.entryPoint = shd->wgpu.stage[SG_SHADERSTAGE_VS].entry.buf; |
13040 | |
13041 | WGPUColorStateDescriptor cs_desc[SG_MAX_COLOR_ATTACHMENTS]; |
13042 | _sg_clear(cs_desc, sizeof(cs_desc)); |
13043 | for (uint32_t i = 0; i < desc->color_count; i++) { |
13044 | SOKOL_ASSERT(i < SG_MAX_COLOR_ATTACHMENTS); |
13045 | cs_desc[i].format = _sg_wgpu_textureformat(desc->colors[i].pixel_format); |
13046 | cs_desc[i].colorBlend.operation = _sg_wgpu_blendop(desc->colors[i].blend.op_rgb); |
13047 | cs_desc[i].colorBlend.srcFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.src_factor_rgb); |
13048 | cs_desc[i].colorBlend.dstFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.dst_factor_rgb); |
13049 | cs_desc[i].alphaBlend.operation = _sg_wgpu_blendop(desc->colors[i].blend.op_alpha); |
13050 | cs_desc[i].alphaBlend.srcFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.src_factor_alpha); |
13051 | cs_desc[i].alphaBlend.dstFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.dst_factor_alpha); |
13052 | cs_desc[i].writeMask = _sg_wgpu_colorwritemask(desc->colors[i].write_mask); |
13053 | } |
13054 | |
13055 | WGPURenderPipelineDescriptor pip_desc; |
13056 | _sg_clear(&pip_desc, sizeof(pip_desc)); |
13057 | pip_desc.layout = pip_layout; |
13058 | pip_desc.vertexStage.module = shd->wgpu.stage[SG_SHADERSTAGE_VS].module; |
13059 | pip_desc.vertexStage.entryPoint = shd->wgpu.stage[SG_SHADERSTAGE_VS].entry.buf; |
13060 | pip_desc.fragmentStage = &fs_desc; |
13061 | pip_desc.vertexState = &vx_state_desc; |
13062 | pip_desc.primitiveTopology = _sg_wgpu_topology(desc->primitive_type); |
13063 | pip_desc.rasterizationState = &rs_desc; |
13064 | pip_desc.sampleCount = desc->sample_count; |
13065 | if (SG_PIXELFORMAT_NONE != desc->depth.pixel_format) { |
13066 | pip_desc.depthStencilState = &ds_desc; |
13067 | } |
13068 | pip_desc.colorStateCount = desc->color_count; |
13069 | pip_desc.colorStates = cs_desc; |
13070 | pip_desc.sampleMask = 0xFFFFFFFF; /* FIXME: ??? */ |
13071 | pip->wgpu.pip = wgpuDeviceCreateRenderPipeline(_sg.wgpu.dev, &pip_desc); |
13072 | SOKOL_ASSERT(0 != pip->wgpu.pip); |
13073 | wgpuPipelineLayoutRelease(pip_layout); |
13074 | |
13075 | return SG_RESOURCESTATE_VALID; |
13076 | } |
13077 | |
13078 | _SOKOL_PRIVATE void _sg_wgpu_discard_pipeline(_sg_pipeline_t* pip) { |
13079 | SOKOL_ASSERT(pip); |
13080 | if (pip == _sg.wgpu.cur_pipeline) { |
13081 | _sg.wgpu.cur_pipeline = 0; |
13082 | _Sg.wgpu.cur_pipeline_id.id = SG_INVALID_ID; |
13083 | } |
13084 | if (pip->wgpu.pip) { |
13085 | wgpuRenderPipelineRelease(pip->wgpu.pip); |
13086 | pip->wgpu.pip = 0; |
13087 | } |
13088 | } |
13089 | |
13090 | _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { |
13091 | SOKOL_ASSERT(pass && desc); |
13092 | SOKOL_ASSERT(att_images && att_images[0]); |
13093 | _sg_pass_common_init(&pass->cmn, desc); |
13094 | |
13095 | /* copy image pointers and create render-texture views */ |
13096 | const sg_pass_attachment_desc* att_desc; |
13097 | for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { |
13098 | att_desc = &desc->color_attachments[i]; |
13099 | if (att_desc->image.id != SG_INVALID_ID) { |
13100 | SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); |
13101 | SOKOL_ASSERT(0 == pass->wgpu.color_atts[i].image); |
13102 | _sg_image_t* img = att_images[i]; |
13103 | SOKOL_ASSERT(img && (img->slot.id == att_desc->image.id)); |
13104 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format)); |
13105 | pass->wgpu.color_atts[i].image = img; |
13106 | /* create a render-texture-view to render into the right sub-surface */ |
13107 | const bool is_msaa = img->cmn.sample_count > 1; |
13108 | WGPUTextureViewDescriptor view_desc; |
13109 | _sg_clear(&view_desc, sizeof(view_desc)); |
13110 | view_desc.baseMipLevel = is_msaa ? 0 : att_desc->mip_level; |
13111 | view_desc.mipLevelCount = 1; |
13112 | view_desc.baseArrayLayer = is_msaa ? 0 : att_desc->slice; |
13113 | view_desc.arrayLayerCount = 1; |
13114 | WGPUTexture wgpu_tex = is_msaa ? img->wgpu.msaa_tex : img->wgpu.tex; |
13115 | SOKOL_ASSERT(wgpu_tex); |
13116 | pass->wgpu.color_atts[i].render_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc); |
13117 | SOKOL_ASSERT(pass->wgpu.color_atts[i].render_tex_view); |
13118 | /* ... and if needed a separate resolve texture view */ |
13119 | if (is_msaa) { |
13120 | view_desc.baseMipLevel = att_desc->mip_level; |
13121 | view_desc.baseArrayLayer = att_desc->slice; |
13122 | WGPUTexture wgpu_tex = img->wgpu.tex; |
13123 | pass->wgpu.color_atts[i].resolve_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc); |
13124 | SOKOL_ASSERT(pass->wgpu.color_atts[i].resolve_tex_view); |
13125 | } |
13126 | } |
13127 | } |
13128 | SOKOL_ASSERT(0 == pass->wgpu.ds_att.image); |
13129 | att_desc = &desc->depth_stencil_attachment; |
13130 | if (att_desc->image.id != SG_INVALID_ID) { |
13131 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
13132 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
13133 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); |
13134 | _sg_image_t* ds_img = att_images[ds_img_index]; |
13135 | pass->wgpu.ds_att.image = ds_img; |
13136 | /* create a render-texture view */ |
13137 | SOKOL_ASSERT(0 == att_desc->mip_level); |
13138 | SOKOL_ASSERT(0 == att_desc->slice); |
13139 | WGPUTextureViewDescriptor view_desc; |
13140 | _sg_clear(&view_desc, sizeof(view_desc)); |
13141 | WGPUTexture wgpu_tex = ds_img->wgpu.tex; |
13142 | SOKOL_ASSERT(wgpu_tex); |
13143 | pass->wgpu.ds_att.render_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc); |
13144 | SOKOL_ASSERT(pass->wgpu.ds_att.render_tex_view); |
13145 | } |
13146 | return SG_RESOURCESTATE_VALID; |
13147 | } |
13148 | |
13149 | _SOKOL_PRIVATE void _sg_wgpu_discard_pass(_sg_pass_t* pass) { |
13150 | SOKOL_ASSERT(pass); |
13151 | for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { |
13152 | if (pass->wgpu.color_atts[i].render_tex_view) { |
13153 | wgpuTextureViewRelease(pass->wgpu.color_atts[i].render_tex_view); |
13154 | pass->wgpu.color_atts[i].render_tex_view = 0; |
13155 | } |
13156 | if (pass->wgpu.color_atts[i].resolve_tex_view) { |
13157 | wgpuTextureViewRelease(pass->wgpu.color_atts[i].resolve_tex_view); |
13158 | pass->wgpu.color_atts[i].resolve_tex_view = 0; |
13159 | } |
13160 | } |
13161 | if (pass->wgpu.ds_att.render_tex_view) { |
13162 | wgpuTextureViewRelease(pass->wgpu.ds_att.render_tex_view); |
13163 | pass->wgpu.ds_att.render_tex_view = 0; |
13164 | } |
13165 | } |
13166 | |
13167 | _SOKOL_PRIVATE _sg_image_t* _sg_wgpu_pass_color_image(const _sg_pass_t* pass, int index) { |
13168 | SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); |
13169 | /* NOTE: may return null */ |
13170 | return pass->wgpu.color_atts[index].image; |
13171 | } |
13172 | |
13173 | _SOKOL_PRIVATE _sg_image_t* _sg_wgpu_pass_ds_image(const _sg_pass_t* pass) { |
13174 | /* NOTE: may return null */ |
13175 | SOKOL_ASSERT(pass); |
13176 | return pass->wgpu.ds_att.image; |
13177 | } |
13178 | |
13179 | _SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { |
13180 | SOKOL_ASSERT(action); |
13181 | SOKOL_ASSERT(!_sg.wgpu.in_pass); |
13182 | SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); |
13183 | SOKOL_ASSERT(_sg.wgpu.dev); |
13184 | SOKOL_ASSERT(_sg.wgpu.render_view_cb || _sg.wgpu.render_view_userdata_cb); |
13185 | SOKOL_ASSERT(_sg.wgpu.resolve_view_cb || _sg.wgpu.resolve_view_userdata_cb); |
13186 | SOKOL_ASSERT(_sg.wgpu.depth_stencil_view_cb || _sg.wgpu.depth_stencil_view_userdata_cb); |
13187 | _sg.wgpu.in_pass = true; |
13188 | _sg.wgpu.cur_width = w; |
13189 | _sg.wgpu.cur_height = h; |
13190 | _sg.wgpu.cur_pipeline = 0; |
13191 | _sg.wgpu.cur_pipeline_id.id = SG_INVALID_ID; |
13192 | |
13193 | SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); |
13194 | if (pass) { |
13195 | WGPURenderPassDescriptor wgpu_pass_desc; |
13196 | _sg_clear(&wgpu_pass_desc, sizeof(wgpu_pass_desc)); |
13197 | WGPURenderPassColorAttachmentDescriptor wgpu_color_att_desc[SG_MAX_COLOR_ATTACHMENTS]; |
13198 | _sg_clear(&wgpu_color_att_desc, sizeof(wgpu_color_att_desc)); |
13199 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); |
13200 | for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { |
13201 | const _sg_wgpu_attachment_t* wgpu_att = &pass->wgpu.color_atts[i]; |
13202 | wgpu_color_att_desc[i].loadOp = _sg_wgpu_load_op(action->colors[i].action); |
13203 | wgpu_color_att_desc[i].storeOp = WGPUStoreOp_Store; |
13204 | wgpu_color_att_desc[i].clearColor.r = action->colors[i].value.r; |
13205 | wgpu_color_att_desc[i].clearColor.g = action->colors[i].value.g; |
13206 | wgpu_color_att_desc[i].clearColor.b = action->colors[i].value.b; |
13207 | wgpu_color_att_desc[i].clearColor.a = action->colors[i].value.a; |
13208 | wgpu_color_att_desc[i].attachment = wgpu_att->render_tex_view; |
13209 | if (wgpu_att->image->cmn.sample_count > 1) { |
13210 | wgpu_color_att_desc[i].resolveTarget = wgpu_att->resolve_tex_view; |
13211 | } |
13212 | } |
13213 | wgpu_pass_desc.colorAttachmentCount = pass->cmn.num_color_atts; |
13214 | wgpu_pass_desc.colorAttachments = &wgpu_color_att_desc[0]; |
13215 | if (pass->wgpu.ds_att.image) { |
13216 | WGPURenderPassDepthStencilAttachmentDescriptor wgpu_ds_att_desc; |
13217 | _sg_clear(&wgpu_ds_att_desc, sizeof(wgpu_ds_att_desc)); |
13218 | wgpu_ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action); |
13219 | wgpu_ds_att_desc.clearDepth = action->depth.value; |
13220 | wgpu_ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action); |
13221 | wgpu_ds_att_desc.clearStencil = action->stencil.value; |
13222 | wgpu_ds_att_desc.attachment = pass->wgpu.ds_att.render_tex_view; |
13223 | wgpu_pass_desc.depthStencilAttachment = &wgpu_ds_att_desc; |
13224 | _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &wgpu_pass_desc); |
13225 | } |
13226 | } |
13227 | else { |
13228 | /* default render pass */ |
13229 | WGPUTextureView wgpu_render_view = _sg.wgpu.render_view_cb ? _sg.wgpu.render_view_cb() : _sg.wgpu.render_view_userdata_cb(_sg.wgpu.user_data); |
13230 | WGPUTextureView wgpu_resolve_view = _sg.wgpu.resolve_view_cb ? _sg.wgpu.resolve_view_cb() : _sg.wgpu.resolve_view_userdata_cb(_sg.wgpu.user_data); |
13231 | WGPUTextureView wgpu_depth_stencil_view = _sg.wgpu.depth_stencil_view_cb ? _sg.wgpu.depth_stencil_view_cb() : _sg.wgpu.depth_stencil_view_userdata_cb(_sg.wgpu.user_data); |
13232 | |
13233 | WGPURenderPassDescriptor pass_desc; |
13234 | _sg_clear(&pass_desc, sizeof(pass_desc)); |
13235 | WGPURenderPassColorAttachmentDescriptor color_att_desc; |
13236 | _sg_clear(&color_att_desc, sizeof(color_att_desc)); |
13237 | color_att_desc.loadOp = _sg_wgpu_load_op(action->colors[0].action); |
13238 | color_att_desc.clearColor.r = action->colors[0].value.r; |
13239 | color_att_desc.clearColor.g = action->colors[0].value.g; |
13240 | color_att_desc.clearColor.b = action->colors[0].value.b; |
13241 | color_att_desc.clearColor.a = action->colors[0].value.a; |
13242 | color_att_desc.attachment = wgpu_render_view; |
13243 | color_att_desc.resolveTarget = wgpu_resolve_view; /* null if no MSAA rendering */ |
13244 | pass_desc.colorAttachmentCount = 1; |
13245 | pass_desc.colorAttachments = &color_att_desc; |
13246 | WGPURenderPassDepthStencilAttachmentDescriptor ds_att_desc; |
13247 | _sg_clear(&ds_att_desc, sizeof(ds_att_desc)); |
13248 | ds_att_desc.attachment = wgpu_depth_stencil_view; |
13249 | SOKOL_ASSERT(0 != ds_att_desc.attachment); |
13250 | ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action); |
13251 | ds_att_desc.clearDepth = action->depth.value; |
13252 | ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action); |
13253 | ds_att_desc.clearStencil = action->stencil.value; |
13254 | pass_desc.depthStencilAttachment = &ds_att_desc; |
13255 | _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &pass_desc); |
13256 | } |
13257 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13258 | |
13259 | /* initial uniform buffer binding (required even if no uniforms are set in the frame) */ |
13260 | wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, |
13261 | 0, /* groupIndex 0 is reserved for uniform buffers */ |
13262 | _sg.wgpu.ub.bindgroup, |
13263 | SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS, |
13264 | &_sg.wgpu.ub.bind_offsets[0][0]); |
13265 | } |
13266 | |
13267 | _SOKOL_PRIVATE void _sg_wgpu_end_pass(void) { |
13268 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13269 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13270 | _sg.wgpu.in_pass = false; |
13271 | wgpuRenderPassEncoderEndPass(_sg.wgpu.pass_enc); |
13272 | wgpuRenderPassEncoderRelease(_sg.wgpu.pass_enc); |
13273 | _sg.wgpu.pass_enc = 0; |
13274 | } |
13275 | |
13276 | _SOKOL_PRIVATE void _sg_wgpu_commit(void) { |
13277 | SOKOL_ASSERT(!_sg.wgpu.in_pass); |
13278 | SOKOL_ASSERT(_sg.wgpu.queue); |
13279 | SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); |
13280 | SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); |
13281 | |
13282 | /* finish and submit this frame's work */ |
13283 | _sg_wgpu_ubpool_flush(); |
13284 | _sg_wgpu_staging_unmap(); |
13285 | |
13286 | WGPUCommandBuffer cmd_bufs[2]; |
13287 | |
13288 | WGPUCommandBufferDescriptor cmd_buf_desc; |
13289 | _sg_clear(&cmd_buf_desc, sizeof(cmd_buf_desc)); |
13290 | cmd_bufs[0] = wgpuCommandEncoderFinish(_sg.wgpu.staging_cmd_enc, &cmd_buf_desc); |
13291 | SOKOL_ASSERT(cmd_bufs[0]); |
13292 | wgpuCommandEncoderRelease(_sg.wgpu.staging_cmd_enc); |
13293 | _sg.wgpu.staging_cmd_enc = 0; |
13294 | |
13295 | cmd_bufs[1] = wgpuCommandEncoderFinish(_sg.wgpu.render_cmd_enc, &cmd_buf_desc); |
13296 | SOKOL_ASSERT(cmd_bufs[1]); |
13297 | wgpuCommandEncoderRelease(_sg.wgpu.render_cmd_enc); |
13298 | _sg.wgpu.render_cmd_enc = 0; |
13299 | |
13300 | wgpuQueueSubmit(_sg.wgpu.queue, 2, &cmd_bufs[0]); |
13301 | |
13302 | wgpuCommandBufferRelease(cmd_bufs[0]); |
13303 | wgpuCommandBufferRelease(cmd_bufs[1]); |
13304 | |
13305 | /* create a new render- and staging-command-encoders for next frame */ |
13306 | WGPUCommandEncoderDescriptor cmd_enc_desc; |
13307 | _sg_clear(&cmd_enc_desc, sizeof(cmd_enc_desc)); |
13308 | _sg.wgpu.staging_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); |
13309 | _sg.wgpu.render_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); |
13310 | |
13311 | /* grab new staging buffers for uniform- and vertex/image-updates */ |
13312 | _sg_wgpu_ubpool_next_frame(false); |
13313 | _sg_wgpu_staging_next_frame(false); |
13314 | } |
13315 | |
13316 | _SOKOL_PRIVATE void _sg_wgpu_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
13317 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13318 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13319 | float xf = (float) x; |
13320 | float yf = (float) (origin_top_left ? y : (_sg.wgpu.cur_height - (y + h))); |
13321 | float wf = (float) w; |
13322 | float hf = (float) h; |
13323 | wgpuRenderPassEncoderSetViewport(_sg.wgpu.pass_enc, xf, yf, wf, hf, 0.0f, 1.0f); |
13324 | } |
13325 | |
13326 | _SOKOL_PRIVATE void _sg_wgpu_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
13327 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13328 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13329 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13330 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13331 | |
13332 | /* clip against framebuffer rect */ |
13333 | x = _sg_min(_sg_max(0, x), _sg.wgpu.cur_width-1); |
13334 | y = _sg_min(_sg_max(0, y), _sg.wgpu.cur_height-1); |
13335 | if ((x + w) > _sg.wgpu.cur_width) { |
13336 | w = _sg.wgpu.cur_width - x; |
13337 | } |
13338 | if ((y + h) > _sg.wgpu.cur_height) { |
13339 | h = _sg.wgpu.cur_height - y; |
13340 | } |
13341 | w = _sg_max(w, 1); |
13342 | h = _sg_max(h, 1); |
13343 | |
13344 | uint32_t sx = (uint32_t) x; |
13345 | uint32_t sy = origin_top_left ? y : (_sg.wgpu.cur_height - (y + h)); |
13346 | uint32_t sw = w; |
13347 | uint32_t sh = h; |
13348 | wgpuRenderPassEncoderSetScissorRect(_sg.wgpu.pass_enc, sx, sy, sw, sh); |
13349 | } |
13350 | |
13351 | _SOKOL_PRIVATE void _sg_wgpu_apply_pipeline(_sg_pipeline_t* pip) { |
13352 | SOKOL_ASSERT(pip); |
13353 | SOKOL_ASSERT(pip->wgpu.pip); |
13354 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13355 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13356 | _sg.wgpu.draw_indexed = (pip->cmn.index_type != SG_INDEXTYPE_NONE); |
13357 | _sg.wgpu.cur_pipeline = pip; |
13358 | _sg.wgpu.cur_pipeline_id.id = pip->slot.id; |
13359 | wgpuRenderPassEncoderSetPipeline(_sg.wgpu.pass_enc, pip->wgpu.pip); |
13360 | wgpuRenderPassEncoderSetBlendColor(_sg.wgpu.pass_enc, (WGPUColor*)&pip->cmn.blend_color); |
13361 | wgpuRenderPassEncoderSetStencilReference(_sg.wgpu.pass_enc, pip->wgpu.stencil_ref); |
13362 | } |
13363 | |
13364 | _SOKOL_PRIVATE WGPUBindGroup _sg_wgpu_create_images_bindgroup(WGPUBindGroupLayout bgl, _sg_image_t** imgs, int num_imgs) { |
13365 | SOKOL_ASSERT(_sg.wgpu.dev); |
13366 | SOKOL_ASSERT(num_imgs <= _SG_WGPU_MAX_SHADERSTAGE_IMAGES); |
13367 | WGPUBindGroupBinding img_bgb[_SG_WGPU_MAX_SHADERSTAGE_IMAGES * 2]; |
13368 | _sg_clear(&img_bgb, sizeof(img_bgb)); |
13369 | for (int img_index = 0; img_index < num_imgs; img_index++) { |
13370 | WGPUBindGroupBinding* tex_bdg = &img_bgb[img_index*2 + 0]; |
13371 | WGPUBindGroupBinding* smp_bdg = &img_bgb[img_index*2 + 1]; |
13372 | tex_bdg->binding = img_index; |
13373 | tex_bdg->textureView = imgs[img_index]->wgpu.tex_view; |
13374 | smp_bdg->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES; |
13375 | smp_bdg->sampler = imgs[img_index]->wgpu.sampler; |
13376 | } |
13377 | WGPUBindGroupDescriptor bg_desc; |
13378 | _sg_clear(&bg_desc, sizeof(bg_desc)); |
13379 | bg_desc.layout = bgl; |
13380 | bg_desc.bindingCount = 2 * num_imgs; |
13381 | bg_desc.bindings = &img_bgb[0]; |
13382 | WGPUBindGroup bg = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc); |
13383 | SOKOL_ASSERT(bg); |
13384 | return bg; |
13385 | } |
13386 | |
13387 | _SOKOL_PRIVATE void _sg_wgpu_apply_bindings( |
13388 | _sg_pipeline_t* pip, |
13389 | _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, |
13390 | _sg_buffer_t* ib, int ib_offset, |
13391 | _sg_image_t** vs_imgs, int num_vs_imgs, |
13392 | _sg_image_t** fs_imgs, int num_fs_imgs) |
13393 | { |
13394 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13395 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13396 | SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); |
13397 | |
13398 | /* index buffer */ |
13399 | if (ib) { |
13400 | wgpuRenderPassEncoderSetIndexBuffer(_sg.wgpu.pass_enc, ib->wgpu.buf, ib_offset); |
13401 | } |
13402 | |
13403 | /* vertex buffers */ |
13404 | for (uint32_t slot = 0; slot < (uint32_t)num_vbs; slot++) { |
13405 | wgpuRenderPassEncoderSetVertexBuffer(_sg.wgpu.pass_enc, slot, vbs[slot]->wgpu.buf, (uint64_t)vb_offsets[slot]); |
13406 | } |
13407 | |
13408 | /* need to create throw-away bind groups for images */ |
13409 | if (num_vs_imgs > 0) { |
13410 | if (num_vs_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) { |
13411 | num_vs_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES; |
13412 | } |
13413 | WGPUBindGroupLayout vs_bgl = pip->shader->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout; |
13414 | SOKOL_ASSERT(vs_bgl); |
13415 | WGPUBindGroup vs_img_bg = _sg_wgpu_create_images_bindgroup(vs_bgl, vs_imgs, num_vs_imgs); |
13416 | wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 1, vs_img_bg, 0, 0); |
13417 | wgpuBindGroupRelease(vs_img_bg); |
13418 | } |
13419 | else { |
13420 | wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 1, _sg.wgpu.empty_bind_group, 0, 0); |
13421 | } |
13422 | if (num_fs_imgs > 0) { |
13423 | if (num_fs_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) { |
13424 | num_fs_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES; |
13425 | } |
13426 | WGPUBindGroupLayout fs_bgl = pip->shader->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout; |
13427 | SOKOL_ASSERT(fs_bgl); |
13428 | WGPUBindGroup fs_img_bg = _sg_wgpu_create_images_bindgroup(fs_bgl, fs_imgs, num_fs_imgs); |
13429 | wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 2, fs_img_bg, 0, 0); |
13430 | wgpuBindGroupRelease(fs_img_bg); |
13431 | } |
13432 | else { |
13433 | wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 2, _sg.wgpu.empty_bind_group, 0, 0); |
13434 | } |
13435 | } |
13436 | |
13437 | _SOKOL_PRIVATE void _sg_wgpu_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
13438 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13439 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13440 | SOKOL_ASSERT((_sg.wgpu.ub.offset + data->size) <= _sg.wgpu.ub.num_bytes); |
13441 | SOKOL_ASSERT((_sg.wgpu.ub.offset & (_SG_WGPU_STAGING_ALIGN-1)) == 0); |
13442 | SOKOL_ASSERT(_sg.wgpu.cur_pipeline && _sg.wgpu.cur_pipeline->shader); |
13443 | SOKOL_ASSERT(_sg.wgpu.cur_pipeline->slot.id == _sg.wgpu.cur_pipeline_id.id); |
13444 | SOKOL_ASSERT(_sg.wgpu.cur_pipeline->shader->slot.id == _sg.wgpu.cur_pipeline->cmn.shader_id.id); |
13445 | SOKOL_ASSERT(ub_index < _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); |
13446 | SOKOL_ASSERT(data->size <= _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); |
13447 | SOKOL_ASSERT(data->size <= _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE); |
13448 | SOKOL_ASSERT(0 != _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur]); |
13449 | |
13450 | uint8_t* dst_ptr = _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur] + _sg.wgpu.ub.offset; |
13451 | memcpy(dst_ptr, data->ptr, data->size); |
13452 | _sg.wgpu.ub.bind_offsets[stage_index][ub_index] = _sg.wgpu.ub.offset; |
13453 | wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, |
13454 | 0, /* groupIndex 0 is reserved for uniform buffers */ |
13455 | _sg.wgpu.ub.bindgroup, |
13456 | SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS, |
13457 | &_sg.wgpu.ub.bind_offsets[0][0]); |
13458 | _sg.wgpu.ub.offset = _sg_roundup(_sg.wgpu.ub.offset + data->size, _SG_WGPU_STAGING_ALIGN); |
13459 | } |
13460 | |
13461 | _SOKOL_PRIVATE void _sg_wgpu_draw(int base_element, int num_elements, int num_instances) { |
13462 | SOKOL_ASSERT(_sg.wgpu.in_pass); |
13463 | SOKOL_ASSERT(_sg.wgpu.pass_enc); |
13464 | if (_sg.wgpu.draw_indexed) { |
13465 | wgpuRenderPassEncoderDrawIndexed(_sg.wgpu.pass_enc, num_elements, num_instances, base_element, 0, 0); |
13466 | } |
13467 | else { |
13468 | wgpuRenderPassEncoderDraw(_sg.wgpu.pass_enc, num_elements, num_instances, base_element, 0); |
13469 | } |
13470 | } |
13471 | |
13472 | _SOKOL_PRIVATE void _sg_wgpu_update_buffer(_sg_buffer_t* buf, const sg_range* data) { |
13473 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
13474 | uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, 0, data->ptr, data->size); |
13475 | SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes); |
13476 | } |
13477 | |
13478 | _SOKOL_PRIVATE int _sg_wgpu_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { |
13479 | SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); |
13480 | _SOKOL_UNUSED(new_frame); |
13481 | uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, buf->cmn.append_pos, data->ptr, data->size); |
13482 | SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes); |
13483 | return (int)copied_num_bytes; |
13484 | } |
13485 | |
13486 | _SOKOL_PRIVATE void _sg_wgpu_update_image(_sg_image_t* img, const sg_image_data* data) { |
13487 | SOKOL_ASSERT(img && data); |
13488 | bool success = _sg_wgpu_staging_copy_to_texture(img, data); |
13489 | SOKOL_ASSERT(success); |
13490 | _SOKOL_UNUSED(success); |
13491 | } |
13492 | #endif |
13493 | |
13494 | /*== BACKEND API WRAPPERS ====================================================*/ |
13495 | static inline void _sg_setup_backend(const sg_desc* desc) { |
13496 | #if defined(_SOKOL_ANY_GL) |
13497 | _sg_gl_setup_backend(desc); |
13498 | #elif defined(SOKOL_METAL) |
13499 | _sg_mtl_setup_backend(desc); |
13500 | #elif defined(SOKOL_D3D11) |
13501 | _sg_d3d11_setup_backend(desc); |
13502 | #elif defined(SOKOL_WGPU) |
13503 | _sg_wgpu_setup_backend(desc); |
13504 | #elif defined(SOKOL_DUMMY_BACKEND) |
13505 | _sg_dummy_setup_backend(desc); |
13506 | #else |
13507 | #error("INVALID BACKEND"); |
13508 | #endif |
13509 | } |
13510 | |
13511 | static inline void _sg_discard_backend(void) { |
13512 | #if defined(_SOKOL_ANY_GL) |
13513 | _sg_gl_discard_backend(); |
13514 | #elif defined(SOKOL_METAL) |
13515 | _sg_mtl_discard_backend(); |
13516 | #elif defined(SOKOL_D3D11) |
13517 | _sg_d3d11_discard_backend(); |
13518 | #elif defined(SOKOL_WGPU) |
13519 | _sg_wgpu_discard_backend(); |
13520 | #elif defined(SOKOL_DUMMY_BACKEND) |
13521 | _sg_dummy_discard_backend(); |
13522 | #else |
13523 | #error("INVALID BACKEND"); |
13524 | #endif |
13525 | } |
13526 | |
13527 | static inline void _sg_reset_state_cache(void) { |
13528 | #if defined(_SOKOL_ANY_GL) |
13529 | _sg_gl_reset_state_cache(); |
13530 | #elif defined(SOKOL_METAL) |
13531 | _sg_mtl_reset_state_cache(); |
13532 | #elif defined(SOKOL_D3D11) |
13533 | _sg_d3d11_reset_state_cache(); |
13534 | #elif defined(SOKOL_WGPU) |
13535 | _sg_wgpu_reset_state_cache(); |
13536 | #elif defined(SOKOL_DUMMY_BACKEND) |
13537 | _sg_dummy_reset_state_cache(); |
13538 | #else |
13539 | #error("INVALID BACKEND"); |
13540 | #endif |
13541 | } |
13542 | |
13543 | static inline void _sg_activate_context(_sg_context_t* ctx) { |
13544 | #if defined(_SOKOL_ANY_GL) |
13545 | _sg_gl_activate_context(ctx); |
13546 | #elif defined(SOKOL_METAL) |
13547 | _sg_mtl_activate_context(ctx); |
13548 | #elif defined(SOKOL_D3D11) |
13549 | _sg_d3d11_activate_context(ctx); |
13550 | #elif defined(SOKOL_WGPU) |
13551 | _sg_wgpu_activate_context(ctx); |
13552 | #elif defined(SOKOL_DUMMY_BACKEND) |
13553 | _sg_dummy_activate_context(ctx); |
13554 | #else |
13555 | #error("INVALID BACKEND"); |
13556 | #endif |
13557 | } |
13558 | |
13559 | static inline sg_resource_state _sg_create_context(_sg_context_t* ctx) { |
13560 | #if defined(_SOKOL_ANY_GL) |
13561 | return _sg_gl_create_context(ctx); |
13562 | #elif defined(SOKOL_METAL) |
13563 | return _sg_mtl_create_context(ctx); |
13564 | #elif defined(SOKOL_D3D11) |
13565 | return _sg_d3d11_create_context(ctx); |
13566 | #elif defined(SOKOL_WGPU) |
13567 | return _sg_wgpu_create_context(ctx); |
13568 | #elif defined(SOKOL_DUMMY_BACKEND) |
13569 | return _sg_dummy_create_context(ctx); |
13570 | #else |
13571 | #error("INVALID BACKEND"); |
13572 | #endif |
13573 | } |
13574 | |
13575 | static inline void _sg_discard_context(_sg_context_t* ctx) { |
13576 | #if defined(_SOKOL_ANY_GL) |
13577 | _sg_gl_discard_context(ctx); |
13578 | #elif defined(SOKOL_METAL) |
13579 | _sg_mtl_discard_context(ctx); |
13580 | #elif defined(SOKOL_D3D11) |
13581 | _sg_d3d11_discard_context(ctx); |
13582 | #elif defined(SOKOL_WGPU) |
13583 | _sg_wgpu_discard_context(ctx); |
13584 | #elif defined(SOKOL_DUMMY_BACKEND) |
13585 | _sg_dummy_discard_context(ctx); |
13586 | #else |
13587 | #error("INVALID BACKEND"); |
13588 | #endif |
13589 | } |
13590 | |
13591 | static inline sg_resource_state _sg_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
13592 | #if defined(_SOKOL_ANY_GL) |
13593 | return _sg_gl_create_buffer(buf, desc); |
13594 | #elif defined(SOKOL_METAL) |
13595 | return _sg_mtl_create_buffer(buf, desc); |
13596 | #elif defined(SOKOL_D3D11) |
13597 | return _sg_d3d11_create_buffer(buf, desc); |
13598 | #elif defined(SOKOL_WGPU) |
13599 | return _sg_wgpu_create_buffer(buf, desc); |
13600 | #elif defined(SOKOL_DUMMY_BACKEND) |
13601 | return _sg_dummy_create_buffer(buf, desc); |
13602 | #else |
13603 | #error("INVALID BACKEND"); |
13604 | #endif |
13605 | } |
13606 | |
13607 | static inline void _sg_discard_buffer(_sg_buffer_t* buf) { |
13608 | #if defined(_SOKOL_ANY_GL) |
13609 | _sg_gl_discard_buffer(buf); |
13610 | #elif defined(SOKOL_METAL) |
13611 | _sg_mtl_discard_buffer(buf); |
13612 | #elif defined(SOKOL_D3D11) |
13613 | _sg_d3d11_discard_buffer(buf); |
13614 | #elif defined(SOKOL_WGPU) |
13615 | _sg_wgpu_discard_buffer(buf); |
13616 | #elif defined(SOKOL_DUMMY_BACKEND) |
13617 | _sg_dummy_discard_buffer(buf); |
13618 | #else |
13619 | #error("INVALID BACKEND"); |
13620 | #endif |
13621 | } |
13622 | |
13623 | static inline sg_resource_state _sg_create_image(_sg_image_t* img, const sg_image_desc* desc) { |
13624 | #if defined(_SOKOL_ANY_GL) |
13625 | return _sg_gl_create_image(img, desc); |
13626 | #elif defined(SOKOL_METAL) |
13627 | return _sg_mtl_create_image(img, desc); |
13628 | #elif defined(SOKOL_D3D11) |
13629 | return _sg_d3d11_create_image(img, desc); |
13630 | #elif defined(SOKOL_WGPU) |
13631 | return _sg_wgpu_create_image(img, desc); |
13632 | #elif defined(SOKOL_DUMMY_BACKEND) |
13633 | return _sg_dummy_create_image(img, desc); |
13634 | #else |
13635 | #error("INVALID BACKEND"); |
13636 | #endif |
13637 | } |
13638 | |
13639 | static inline void _sg_discard_image(_sg_image_t* img) { |
13640 | #if defined(_SOKOL_ANY_GL) |
13641 | _sg_gl_discard_image(img); |
13642 | #elif defined(SOKOL_METAL) |
13643 | _sg_mtl_discard_image(img); |
13644 | #elif defined(SOKOL_D3D11) |
13645 | _sg_d3d11_discard_image(img); |
13646 | #elif defined(SOKOL_WGPU) |
13647 | _sg_wgpu_discard_image(img); |
13648 | #elif defined(SOKOL_DUMMY_BACKEND) |
13649 | _sg_dummy_discard_image(img); |
13650 | #else |
13651 | #error("INVALID BACKEND"); |
13652 | #endif |
13653 | } |
13654 | |
13655 | static inline sg_resource_state _sg_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
13656 | #if defined(_SOKOL_ANY_GL) |
13657 | return _sg_gl_create_shader(shd, desc); |
13658 | #elif defined(SOKOL_METAL) |
13659 | return _sg_mtl_create_shader(shd, desc); |
13660 | #elif defined(SOKOL_D3D11) |
13661 | return _sg_d3d11_create_shader(shd, desc); |
13662 | #elif defined(SOKOL_WGPU) |
13663 | return _sg_wgpu_create_shader(shd, desc); |
13664 | #elif defined(SOKOL_DUMMY_BACKEND) |
13665 | return _sg_dummy_create_shader(shd, desc); |
13666 | #else |
13667 | #error("INVALID BACKEND"); |
13668 | #endif |
13669 | } |
13670 | |
13671 | static inline void _sg_discard_shader(_sg_shader_t* shd) { |
13672 | #if defined(_SOKOL_ANY_GL) |
13673 | _sg_gl_discard_shader(shd); |
13674 | #elif defined(SOKOL_METAL) |
13675 | _sg_mtl_discard_shader(shd); |
13676 | #elif defined(SOKOL_D3D11) |
13677 | _sg_d3d11_discard_shader(shd); |
13678 | #elif defined(SOKOL_WGPU) |
13679 | _sg_wgpu_discard_shader(shd); |
13680 | #elif defined(SOKOL_DUMMY_BACKEND) |
13681 | _sg_dummy_discard_shader(shd); |
13682 | #else |
13683 | #error("INVALID BACKEND"); |
13684 | #endif |
13685 | } |
13686 | |
13687 | static inline sg_resource_state _sg_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { |
13688 | #if defined(_SOKOL_ANY_GL) |
13689 | return _sg_gl_create_pipeline(pip, shd, desc); |
13690 | #elif defined(SOKOL_METAL) |
13691 | return _sg_mtl_create_pipeline(pip, shd, desc); |
13692 | #elif defined(SOKOL_D3D11) |
13693 | return _sg_d3d11_create_pipeline(pip, shd, desc); |
13694 | #elif defined(SOKOL_WGPU) |
13695 | return _sg_wgpu_create_pipeline(pip, shd, desc); |
13696 | #elif defined(SOKOL_DUMMY_BACKEND) |
13697 | return _sg_dummy_create_pipeline(pip, shd, desc); |
13698 | #else |
13699 | #error("INVALID BACKEND"); |
13700 | #endif |
13701 | } |
13702 | |
13703 | static inline void _sg_discard_pipeline(_sg_pipeline_t* pip) { |
13704 | #if defined(_SOKOL_ANY_GL) |
13705 | _sg_gl_discard_pipeline(pip); |
13706 | #elif defined(SOKOL_METAL) |
13707 | _sg_mtl_discard_pipeline(pip); |
13708 | #elif defined(SOKOL_D3D11) |
13709 | _sg_d3d11_discard_pipeline(pip); |
13710 | #elif defined(SOKOL_WGPU) |
13711 | _sg_wgpu_discard_pipeline(pip); |
13712 | #elif defined(SOKOL_DUMMY_BACKEND) |
13713 | _sg_dummy_discard_pipeline(pip); |
13714 | #else |
13715 | #error("INVALID BACKEND"); |
13716 | #endif |
13717 | } |
13718 | |
13719 | static inline sg_resource_state _sg_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { |
13720 | #if defined(_SOKOL_ANY_GL) |
13721 | return _sg_gl_create_pass(pass, att_images, desc); |
13722 | #elif defined(SOKOL_METAL) |
13723 | return _sg_mtl_create_pass(pass, att_images, desc); |
13724 | #elif defined(SOKOL_D3D11) |
13725 | return _sg_d3d11_create_pass(pass, att_images, desc); |
13726 | #elif defined(SOKOL_WGPU) |
13727 | return _sg_wgpu_create_pass(pass, att_images, desc); |
13728 | #elif defined(SOKOL_DUMMY_BACKEND) |
13729 | return _sg_dummy_create_pass(pass, att_images, desc); |
13730 | #else |
13731 | #error("INVALID BACKEND"); |
13732 | #endif |
13733 | } |
13734 | |
13735 | static inline void _sg_discard_pass(_sg_pass_t* pass) { |
13736 | #if defined(_SOKOL_ANY_GL) |
13737 | _sg_gl_discard_pass(pass); |
13738 | #elif defined(SOKOL_METAL) |
13739 | _sg_mtl_discard_pass(pass); |
13740 | #elif defined(SOKOL_D3D11) |
13741 | _sg_d3d11_discard_pass(pass); |
13742 | #elif defined(SOKOL_WGPU) |
13743 | return _sg_wgpu_discard_pass(pass); |
13744 | #elif defined(SOKOL_DUMMY_BACKEND) |
13745 | _sg_dummy_discard_pass(pass); |
13746 | #else |
13747 | #error("INVALID BACKEND"); |
13748 | #endif |
13749 | } |
13750 | |
13751 | static inline _sg_image_t* _sg_pass_color_image(const _sg_pass_t* pass, int index) { |
13752 | #if defined(_SOKOL_ANY_GL) |
13753 | return _sg_gl_pass_color_image(pass, index); |
13754 | #elif defined(SOKOL_METAL) |
13755 | return _sg_mtl_pass_color_image(pass, index); |
13756 | #elif defined(SOKOL_D3D11) |
13757 | return _sg_d3d11_pass_color_image(pass, index); |
13758 | #elif defined(SOKOL_WGPU) |
13759 | return _sg_wgpu_pass_color_image(pass, index); |
13760 | #elif defined(SOKOL_DUMMY_BACKEND) |
13761 | return _sg_dummy_pass_color_image(pass, index); |
13762 | #else |
13763 | #error("INVALID BACKEND"); |
13764 | #endif |
13765 | } |
13766 | |
13767 | static inline _sg_image_t* _sg_pass_ds_image(const _sg_pass_t* pass) { |
13768 | #if defined(_SOKOL_ANY_GL) |
13769 | return _sg_gl_pass_ds_image(pass); |
13770 | #elif defined(SOKOL_METAL) |
13771 | return _sg_mtl_pass_ds_image(pass); |
13772 | #elif defined(SOKOL_D3D11) |
13773 | return _sg_d3d11_pass_ds_image(pass); |
13774 | #elif defined(SOKOL_WGPU) |
13775 | return _sg_wgpu_pass_ds_image(pass); |
13776 | #elif defined(SOKOL_DUMMY_BACKEND) |
13777 | return _sg_dummy_pass_ds_image(pass); |
13778 | #else |
13779 | #error("INVALID BACKEND"); |
13780 | #endif |
13781 | } |
13782 | |
13783 | static inline void _sg_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { |
13784 | #if defined(_SOKOL_ANY_GL) |
13785 | _sg_gl_begin_pass(pass, action, w, h); |
13786 | #elif defined(SOKOL_METAL) |
13787 | _sg_mtl_begin_pass(pass, action, w, h); |
13788 | #elif defined(SOKOL_D3D11) |
13789 | _sg_d3d11_begin_pass(pass, action, w, h); |
13790 | #elif defined(SOKOL_WGPU) |
13791 | _sg_wgpu_begin_pass(pass, action, w, h); |
13792 | #elif defined(SOKOL_DUMMY_BACKEND) |
13793 | _sg_dummy_begin_pass(pass, action, w, h); |
13794 | #else |
13795 | #error("INVALID BACKEND"); |
13796 | #endif |
13797 | } |
13798 | |
13799 | static inline void _sg_end_pass(void) { |
13800 | #if defined(_SOKOL_ANY_GL) |
13801 | _sg_gl_end_pass(); |
13802 | #elif defined(SOKOL_METAL) |
13803 | _sg_mtl_end_pass(); |
13804 | #elif defined(SOKOL_D3D11) |
13805 | _sg_d3d11_end_pass(); |
13806 | #elif defined(SOKOL_WGPU) |
13807 | _sg_wgpu_end_pass(); |
13808 | #elif defined(SOKOL_DUMMY_BACKEND) |
13809 | _sg_dummy_end_pass(); |
13810 | #else |
13811 | #error("INVALID BACKEND"); |
13812 | #endif |
13813 | } |
13814 | |
13815 | static inline void _sg_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
13816 | #if defined(_SOKOL_ANY_GL) |
13817 | _sg_gl_apply_viewport(x, y, w, h, origin_top_left); |
13818 | #elif defined(SOKOL_METAL) |
13819 | _sg_mtl_apply_viewport(x, y, w, h, origin_top_left); |
13820 | #elif defined(SOKOL_D3D11) |
13821 | _sg_d3d11_apply_viewport(x, y, w, h, origin_top_left); |
13822 | #elif defined(SOKOL_WGPU) |
13823 | _sg_wgpu_apply_viewport(x, y, w, h, origin_top_left); |
13824 | #elif defined(SOKOL_DUMMY_BACKEND) |
13825 | _sg_dummy_apply_viewport(x, y, w, h, origin_top_left); |
13826 | #else |
13827 | #error("INVALID BACKEND"); |
13828 | #endif |
13829 | } |
13830 | |
13831 | static inline void _sg_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
13832 | #if defined(_SOKOL_ANY_GL) |
13833 | _sg_gl_apply_scissor_rect(x, y, w, h, origin_top_left); |
13834 | #elif defined(SOKOL_METAL) |
13835 | _sg_mtl_apply_scissor_rect(x, y, w, h, origin_top_left); |
13836 | #elif defined(SOKOL_D3D11) |
13837 | _sg_d3d11_apply_scissor_rect(x, y, w, h, origin_top_left); |
13838 | #elif defined(SOKOL_WGPU) |
13839 | _sg_wgpu_apply_scissor_rect(x, y, w, h, origin_top_left); |
13840 | #elif defined(SOKOL_DUMMY_BACKEND) |
13841 | _sg_dummy_apply_scissor_rect(x, y, w, h, origin_top_left); |
13842 | #else |
13843 | #error("INVALID BACKEND"); |
13844 | #endif |
13845 | } |
13846 | |
13847 | static inline void _sg_apply_pipeline(_sg_pipeline_t* pip) { |
13848 | #if defined(_SOKOL_ANY_GL) |
13849 | _sg_gl_apply_pipeline(pip); |
13850 | #elif defined(SOKOL_METAL) |
13851 | _sg_mtl_apply_pipeline(pip); |
13852 | #elif defined(SOKOL_D3D11) |
13853 | _sg_d3d11_apply_pipeline(pip); |
13854 | #elif defined(SOKOL_WGPU) |
13855 | _sg_wgpu_apply_pipeline(pip); |
13856 | #elif defined(SOKOL_DUMMY_BACKEND) |
13857 | _sg_dummy_apply_pipeline(pip); |
13858 | #else |
13859 | #error("INVALID BACKEND"); |
13860 | #endif |
13861 | } |
13862 | |
13863 | static inline void _sg_apply_bindings( |
13864 | _sg_pipeline_t* pip, |
13865 | _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, |
13866 | _sg_buffer_t* ib, int ib_offset, |
13867 | _sg_image_t** vs_imgs, int num_vs_imgs, |
13868 | _sg_image_t** fs_imgs, int num_fs_imgs) |
13869 | { |
13870 | #if defined(_SOKOL_ANY_GL) |
13871 | _sg_gl_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
13872 | #elif defined(SOKOL_METAL) |
13873 | _sg_mtl_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
13874 | #elif defined(SOKOL_D3D11) |
13875 | _sg_d3d11_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
13876 | #elif defined(SOKOL_WGPU) |
13877 | _sg_wgpu_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
13878 | #elif defined(SOKOL_DUMMY_BACKEND) |
13879 | _sg_dummy_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
13880 | #else |
13881 | #error("INVALID BACKEND"); |
13882 | #endif |
13883 | } |
13884 | |
13885 | static inline void _sg_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
13886 | #if defined(_SOKOL_ANY_GL) |
13887 | _sg_gl_apply_uniforms(stage_index, ub_index, data); |
13888 | #elif defined(SOKOL_METAL) |
13889 | _sg_mtl_apply_uniforms(stage_index, ub_index, data); |
13890 | #elif defined(SOKOL_D3D11) |
13891 | _sg_d3d11_apply_uniforms(stage_index, ub_index, data); |
13892 | #elif defined(SOKOL_WGPU) |
13893 | _sg_wgpu_apply_uniforms(stage_index, ub_index, data); |
13894 | #elif defined(SOKOL_DUMMY_BACKEND) |
13895 | _sg_dummy_apply_uniforms(stage_index, ub_index, data); |
13896 | #else |
13897 | #error("INVALID BACKEND"); |
13898 | #endif |
13899 | } |
13900 | |
13901 | static inline void _sg_draw(int base_element, int num_elements, int num_instances) { |
13902 | #if defined(_SOKOL_ANY_GL) |
13903 | _sg_gl_draw(base_element, num_elements, num_instances); |
13904 | #elif defined(SOKOL_METAL) |
13905 | _sg_mtl_draw(base_element, num_elements, num_instances); |
13906 | #elif defined(SOKOL_D3D11) |
13907 | _sg_d3d11_draw(base_element, num_elements, num_instances); |
13908 | #elif defined(SOKOL_WGPU) |
13909 | _sg_wgpu_draw(base_element, num_elements, num_instances); |
13910 | #elif defined(SOKOL_DUMMY_BACKEND) |
13911 | _sg_dummy_draw(base_element, num_elements, num_instances); |
13912 | #else |
13913 | #error("INVALID BACKEND"); |
13914 | #endif |
13915 | } |
13916 | |
13917 | static inline void _sg_commit(void) { |
13918 | #if defined(_SOKOL_ANY_GL) |
13919 | _sg_gl_commit(); |
13920 | #elif defined(SOKOL_METAL) |
13921 | _sg_mtl_commit(); |
13922 | #elif defined(SOKOL_D3D11) |
13923 | _sg_d3d11_commit(); |
13924 | #elif defined(SOKOL_WGPU) |
13925 | _sg_wgpu_commit(); |
13926 | #elif defined(SOKOL_DUMMY_BACKEND) |
13927 | _sg_dummy_commit(); |
13928 | #else |
13929 | #error("INVALID BACKEND"); |
13930 | #endif |
13931 | } |
13932 | |
13933 | static inline void _sg_update_buffer(_sg_buffer_t* buf, const sg_range* data) { |
13934 | #if defined(_SOKOL_ANY_GL) |
13935 | _sg_gl_update_buffer(buf, data); |
13936 | #elif defined(SOKOL_METAL) |
13937 | _sg_mtl_update_buffer(buf, data); |
13938 | #elif defined(SOKOL_D3D11) |
13939 | _sg_d3d11_update_buffer(buf, data); |
13940 | #elif defined(SOKOL_WGPU) |
13941 | _sg_wgpu_update_buffer(buf, data); |
13942 | #elif defined(SOKOL_DUMMY_BACKEND) |
13943 | _sg_dummy_update_buffer(buf, data); |
13944 | #else |
13945 | #error("INVALID BACKEND"); |
13946 | #endif |
13947 | } |
13948 | |
13949 | static inline int _sg_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { |
13950 | #if defined(_SOKOL_ANY_GL) |
13951 | return _sg_gl_append_buffer(buf, data, new_frame); |
13952 | #elif defined(SOKOL_METAL) |
13953 | return _sg_mtl_append_buffer(buf, data, new_frame); |
13954 | #elif defined(SOKOL_D3D11) |
13955 | return _sg_d3d11_append_buffer(buf, data, new_frame); |
13956 | #elif defined(SOKOL_WGPU) |
13957 | return _sg_wgpu_append_buffer(buf, data, new_frame); |
13958 | #elif defined(SOKOL_DUMMY_BACKEND) |
13959 | return _sg_dummy_append_buffer(buf, data, new_frame); |
13960 | #else |
13961 | #error("INVALID BACKEND"); |
13962 | #endif |
13963 | } |
13964 | |
13965 | static inline void _sg_update_image(_sg_image_t* img, const sg_image_data* data) { |
13966 | #if defined(_SOKOL_ANY_GL) |
13967 | _sg_gl_update_image(img, data); |
13968 | #elif defined(SOKOL_METAL) |
13969 | _sg_mtl_update_image(img, data); |
13970 | #elif defined(SOKOL_D3D11) |
13971 | _sg_d3d11_update_image(img, data); |
13972 | #elif defined(SOKOL_WGPU) |
13973 | _sg_wgpu_update_image(img, data); |
13974 | #elif defined(SOKOL_DUMMY_BACKEND) |
13975 | _sg_dummy_update_image(img, data); |
13976 | #else |
13977 | #error("INVALID BACKEND"); |
13978 | #endif |
13979 | } |
13980 | |
13981 | /*== RESOURCE POOLS ==========================================================*/ |
13982 | |
13983 | _SOKOL_PRIVATE void _sg_init_pool(_sg_pool_t* pool, int num) { |
13984 | SOKOL_ASSERT(pool && (num >= 1)); |
13985 | /* slot 0 is reserved for the 'invalid id', so bump the pool size by 1 */ |
13986 | pool->size = num + 1; |
13987 | pool->queue_top = 0; |
13988 | /* generation counters indexable by pool slot index, slot 0 is reserved */ |
13989 | size_t gen_ctrs_size = sizeof(uint32_t) * (size_t)pool->size; |
13990 | pool->gen_ctrs = (uint32_t*)_sg_malloc_clear(gen_ctrs_size); |
13991 | /* it's not a bug to only reserve 'num' here */ |
13992 | pool->free_queue = (int*) _sg_malloc_clear(sizeof(int) * (size_t)num); |
13993 | /* never allocate the zero-th pool item since the invalid id is 0 */ |
13994 | for (int i = pool->size-1; i >= 1; i--) { |
13995 | pool->free_queue[pool->queue_top++] = i; |
13996 | } |
13997 | } |
13998 | |
13999 | _SOKOL_PRIVATE void _sg_discard_pool(_sg_pool_t* pool) { |
14000 | SOKOL_ASSERT(pool); |
14001 | SOKOL_ASSERT(pool->free_queue); |
14002 | _sg_free(pool->free_queue); |
14003 | pool->free_queue = 0; |
14004 | SOKOL_ASSERT(pool->gen_ctrs); |
14005 | _sg_free(pool->gen_ctrs); |
14006 | pool->gen_ctrs = 0; |
14007 | pool->size = 0; |
14008 | pool->queue_top = 0; |
14009 | } |
14010 | |
14011 | _SOKOL_PRIVATE int _sg_pool_alloc_index(_sg_pool_t* pool) { |
14012 | SOKOL_ASSERT(pool); |
14013 | SOKOL_ASSERT(pool->free_queue); |
14014 | if (pool->queue_top > 0) { |
14015 | int slot_index = pool->free_queue[--pool->queue_top]; |
14016 | SOKOL_ASSERT((slot_index > 0) && (slot_index < pool->size)); |
14017 | return slot_index; |
14018 | } |
14019 | else { |
14020 | /* pool exhausted */ |
14021 | return _SG_INVALID_SLOT_INDEX; |
14022 | } |
14023 | } |
14024 | |
14025 | _SOKOL_PRIVATE void _sg_pool_free_index(_sg_pool_t* pool, int slot_index) { |
14026 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < pool->size)); |
14027 | SOKOL_ASSERT(pool); |
14028 | SOKOL_ASSERT(pool->free_queue); |
14029 | SOKOL_ASSERT(pool->queue_top < pool->size); |
14030 | #ifdef SOKOL_DEBUG |
14031 | /* debug check against double-free */ |
14032 | for (int i = 0; i < pool->queue_top; i++) { |
14033 | SOKOL_ASSERT(pool->free_queue[i] != slot_index); |
14034 | } |
14035 | #endif |
14036 | pool->free_queue[pool->queue_top++] = slot_index; |
14037 | SOKOL_ASSERT(pool->queue_top <= (pool->size-1)); |
14038 | } |
14039 | |
14040 | _SOKOL_PRIVATE void _sg_reset_slot(_sg_slot_t* slot) { |
14041 | SOKOL_ASSERT(slot); |
14042 | _sg_clear(slot, sizeof(_sg_slot_t)); |
14043 | } |
14044 | |
14045 | _SOKOL_PRIVATE void _sg_reset_buffer_to_alloc_state(_sg_buffer_t* buf) { |
14046 | SOKOL_ASSERT(buf); |
14047 | _sg_slot_t slot = buf->slot; |
14048 | _sg_clear(buf, sizeof(_sg_buffer_t)); |
14049 | buf->slot = slot; |
14050 | buf->slot.state = SG_RESOURCESTATE_ALLOC; |
14051 | } |
14052 | |
14053 | _SOKOL_PRIVATE void _sg_reset_image_to_alloc_state(_sg_image_t* img) { |
14054 | SOKOL_ASSERT(img); |
14055 | _sg_slot_t slot = img->slot; |
14056 | _sg_clear(img, sizeof(_sg_image_t)); |
14057 | img->slot = slot; |
14058 | img->slot.state = SG_RESOURCESTATE_ALLOC; |
14059 | } |
14060 | |
14061 | _SOKOL_PRIVATE void _sg_reset_shader_to_alloc_state(_sg_shader_t* shd) { |
14062 | SOKOL_ASSERT(shd); |
14063 | _sg_slot_t slot = shd->slot; |
14064 | _sg_clear(shd, sizeof(_sg_shader_t)); |
14065 | shd->slot = slot; |
14066 | shd->slot.state = SG_RESOURCESTATE_ALLOC; |
14067 | } |
14068 | |
14069 | _SOKOL_PRIVATE void _sg_reset_pipeline_to_alloc_state(_sg_pipeline_t* pip) { |
14070 | SOKOL_ASSERT(pip); |
14071 | _sg_slot_t slot = pip->slot; |
14072 | _sg_clear(pip, sizeof(_sg_pipeline_t)); |
14073 | pip->slot = slot; |
14074 | pip->slot.state = SG_RESOURCESTATE_ALLOC; |
14075 | } |
14076 | |
14077 | _SOKOL_PRIVATE void _sg_reset_pass_to_alloc_state(_sg_pass_t* pass) { |
14078 | SOKOL_ASSERT(pass); |
14079 | _sg_slot_t slot = pass->slot; |
14080 | _sg_clear(pass, sizeof(_sg_pass_t)); |
14081 | pass->slot = slot; |
14082 | pass->slot.state = SG_RESOURCESTATE_ALLOC; |
14083 | } |
14084 | |
14085 | _SOKOL_PRIVATE void _sg_reset_context_to_alloc_state(_sg_context_t* ctx) { |
14086 | SOKOL_ASSERT(ctx); |
14087 | _sg_slot_t slot = ctx->slot; |
14088 | _sg_clear(ctx, sizeof(_sg_context_t)); |
14089 | ctx->slot = slot; |
14090 | ctx->slot.state = SG_RESOURCESTATE_ALLOC; |
14091 | } |
14092 | |
14093 | _SOKOL_PRIVATE void _sg_setup_pools(_sg_pools_t* p, const sg_desc* desc) { |
14094 | SOKOL_ASSERT(p); |
14095 | SOKOL_ASSERT(desc); |
14096 | /* note: the pools here will have an additional item, since slot 0 is reserved */ |
14097 | SOKOL_ASSERT((desc->buffer_pool_size > 0) && (desc->buffer_pool_size < _SG_MAX_POOL_SIZE)); |
14098 | _sg_init_pool(&p->buffer_pool, desc->buffer_pool_size); |
14099 | size_t buffer_pool_byte_size = sizeof(_sg_buffer_t) * (size_t)p->buffer_pool.size; |
14100 | p->buffers = (_sg_buffer_t*) _sg_malloc_clear(buffer_pool_byte_size); |
14101 | |
14102 | SOKOL_ASSERT((desc->image_pool_size > 0) && (desc->image_pool_size < _SG_MAX_POOL_SIZE)); |
14103 | _sg_init_pool(&p->image_pool, desc->image_pool_size); |
14104 | size_t image_pool_byte_size = sizeof(_sg_image_t) * (size_t)p->image_pool.size; |
14105 | p->images = (_sg_image_t*) _sg_malloc_clear(image_pool_byte_size); |
14106 | |
14107 | SOKOL_ASSERT((desc->shader_pool_size > 0) && (desc->shader_pool_size < _SG_MAX_POOL_SIZE)); |
14108 | _sg_init_pool(&p->shader_pool, desc->shader_pool_size); |
14109 | size_t shader_pool_byte_size = sizeof(_sg_shader_t) * (size_t)p->shader_pool.size; |
14110 | p->shaders = (_sg_shader_t*) _sg_malloc_clear(shader_pool_byte_size); |
14111 | |
14112 | SOKOL_ASSERT((desc->pipeline_pool_size > 0) && (desc->pipeline_pool_size < _SG_MAX_POOL_SIZE)); |
14113 | _sg_init_pool(&p->pipeline_pool, desc->pipeline_pool_size); |
14114 | size_t pipeline_pool_byte_size = sizeof(_sg_pipeline_t) * (size_t)p->pipeline_pool.size; |
14115 | p->pipelines = (_sg_pipeline_t*) _sg_malloc_clear(pipeline_pool_byte_size); |
14116 | |
14117 | SOKOL_ASSERT((desc->pass_pool_size > 0) && (desc->pass_pool_size < _SG_MAX_POOL_SIZE)); |
14118 | _sg_init_pool(&p->pass_pool, desc->pass_pool_size); |
14119 | size_t pass_pool_byte_size = sizeof(_sg_pass_t) * (size_t)p->pass_pool.size; |
14120 | p->passes = (_sg_pass_t*) _sg_malloc_clear(pass_pool_byte_size); |
14121 | |
14122 | SOKOL_ASSERT((desc->context_pool_size > 0) && (desc->context_pool_size < _SG_MAX_POOL_SIZE)); |
14123 | _sg_init_pool(&p->context_pool, desc->context_pool_size); |
14124 | size_t context_pool_byte_size = sizeof(_sg_context_t) * (size_t)p->context_pool.size; |
14125 | p->contexts = (_sg_context_t*) _sg_malloc_clear(context_pool_byte_size); |
14126 | } |
14127 | |
14128 | _SOKOL_PRIVATE void _sg_discard_pools(_sg_pools_t* p) { |
14129 | SOKOL_ASSERT(p); |
14130 | _sg_free(p->contexts); p->contexts = 0; |
14131 | _sg_free(p->passes); p->passes = 0; |
14132 | _sg_free(p->pipelines); p->pipelines = 0; |
14133 | _sg_free(p->shaders); p->shaders = 0; |
14134 | _sg_free(p->images); p->images = 0; |
14135 | _sg_free(p->buffers); p->buffers = 0; |
14136 | _sg_discard_pool(&p->context_pool); |
14137 | _sg_discard_pool(&p->pass_pool); |
14138 | _sg_discard_pool(&p->pipeline_pool); |
14139 | _sg_discard_pool(&p->shader_pool); |
14140 | _sg_discard_pool(&p->image_pool); |
14141 | _sg_discard_pool(&p->buffer_pool); |
14142 | } |
14143 | |
14144 | /* allocate the slot at slot_index: |
14145 | - bump the slot's generation counter |
14146 | - create a resource id from the generation counter and slot index |
14147 | - set the slot's id to this id |
14148 | - set the slot's state to ALLOC |
14149 | - return the resource id |
14150 | */ |
14151 | _SOKOL_PRIVATE uint32_t _sg_slot_alloc(_sg_pool_t* pool, _sg_slot_t* slot, int slot_index) { |
14152 | /* FIXME: add handling for an overflowing generation counter, |
14153 | for now, just overflow (another option is to disable |
14154 | the slot) |
14155 | */ |
14156 | SOKOL_ASSERT(pool && pool->gen_ctrs); |
14157 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < pool->size)); |
14158 | SOKOL_ASSERT((slot->state == SG_RESOURCESTATE_INITIAL) && (slot->id == SG_INVALID_ID)); |
14159 | uint32_t ctr = ++pool->gen_ctrs[slot_index]; |
14160 | slot->id = (ctr<<_SG_SLOT_SHIFT)|(slot_index & _SG_SLOT_MASK); |
14161 | slot->state = SG_RESOURCESTATE_ALLOC; |
14162 | return slot->id; |
14163 | } |
14164 | |
14165 | /* extract slot index from id */ |
14166 | _SOKOL_PRIVATE int _sg_slot_index(uint32_t id) { |
14167 | int slot_index = (int) (id & _SG_SLOT_MASK); |
14168 | SOKOL_ASSERT(_SG_INVALID_SLOT_INDEX != slot_index); |
14169 | return slot_index; |
14170 | } |
14171 | |
14172 | /* returns pointer to resource by id without matching id check */ |
14173 | _SOKOL_PRIVATE _sg_buffer_t* _sg_buffer_at(const _sg_pools_t* p, uint32_t buf_id) { |
14174 | SOKOL_ASSERT(p && (SG_INVALID_ID != buf_id)); |
14175 | int slot_index = _sg_slot_index(buf_id); |
14176 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->buffer_pool.size)); |
14177 | return &p->buffers[slot_index]; |
14178 | } |
14179 | |
14180 | _SOKOL_PRIVATE _sg_image_t* _sg_image_at(const _sg_pools_t* p, uint32_t img_id) { |
14181 | SOKOL_ASSERT(p && (SG_INVALID_ID != img_id)); |
14182 | int slot_index = _sg_slot_index(img_id); |
14183 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->image_pool.size)); |
14184 | return &p->images[slot_index]; |
14185 | } |
14186 | |
14187 | _SOKOL_PRIVATE _sg_shader_t* _sg_shader_at(const _sg_pools_t* p, uint32_t shd_id) { |
14188 | SOKOL_ASSERT(p && (SG_INVALID_ID != shd_id)); |
14189 | int slot_index = _sg_slot_index(shd_id); |
14190 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->shader_pool.size)); |
14191 | return &p->shaders[slot_index]; |
14192 | } |
14193 | |
14194 | _SOKOL_PRIVATE _sg_pipeline_t* _sg_pipeline_at(const _sg_pools_t* p, uint32_t pip_id) { |
14195 | SOKOL_ASSERT(p && (SG_INVALID_ID != pip_id)); |
14196 | int slot_index = _sg_slot_index(pip_id); |
14197 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->pipeline_pool.size)); |
14198 | return &p->pipelines[slot_index]; |
14199 | } |
14200 | |
14201 | _SOKOL_PRIVATE _sg_pass_t* _sg_pass_at(const _sg_pools_t* p, uint32_t pass_id) { |
14202 | SOKOL_ASSERT(p && (SG_INVALID_ID != pass_id)); |
14203 | int slot_index = _sg_slot_index(pass_id); |
14204 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->pass_pool.size)); |
14205 | return &p->passes[slot_index]; |
14206 | } |
14207 | |
14208 | _SOKOL_PRIVATE _sg_context_t* _sg_context_at(const _sg_pools_t* p, uint32_t context_id) { |
14209 | SOKOL_ASSERT(p && (SG_INVALID_ID != context_id)); |
14210 | int slot_index = _sg_slot_index(context_id); |
14211 | SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->context_pool.size)); |
14212 | return &p->contexts[slot_index]; |
14213 | } |
14214 | |
14215 | /* returns pointer to resource with matching id check, may return 0 */ |
14216 | _SOKOL_PRIVATE _sg_buffer_t* _sg_lookup_buffer(const _sg_pools_t* p, uint32_t buf_id) { |
14217 | if (SG_INVALID_ID != buf_id) { |
14218 | _sg_buffer_t* buf = _sg_buffer_at(p, buf_id); |
14219 | if (buf->slot.id == buf_id) { |
14220 | return buf; |
14221 | } |
14222 | } |
14223 | return 0; |
14224 | } |
14225 | |
14226 | _SOKOL_PRIVATE _sg_image_t* _sg_lookup_image(const _sg_pools_t* p, uint32_t img_id) { |
14227 | if (SG_INVALID_ID != img_id) { |
14228 | _sg_image_t* img = _sg_image_at(p, img_id); |
14229 | if (img->slot.id == img_id) { |
14230 | return img; |
14231 | } |
14232 | } |
14233 | return 0; |
14234 | } |
14235 | |
14236 | _SOKOL_PRIVATE _sg_shader_t* _sg_lookup_shader(const _sg_pools_t* p, uint32_t shd_id) { |
14237 | SOKOL_ASSERT(p); |
14238 | if (SG_INVALID_ID != shd_id) { |
14239 | _sg_shader_t* shd = _sg_shader_at(p, shd_id); |
14240 | if (shd->slot.id == shd_id) { |
14241 | return shd; |
14242 | } |
14243 | } |
14244 | return 0; |
14245 | } |
14246 | |
14247 | _SOKOL_PRIVATE _sg_pipeline_t* _sg_lookup_pipeline(const _sg_pools_t* p, uint32_t pip_id) { |
14248 | SOKOL_ASSERT(p); |
14249 | if (SG_INVALID_ID != pip_id) { |
14250 | _sg_pipeline_t* pip = _sg_pipeline_at(p, pip_id); |
14251 | if (pip->slot.id == pip_id) { |
14252 | return pip; |
14253 | } |
14254 | } |
14255 | return 0; |
14256 | } |
14257 | |
14258 | _SOKOL_PRIVATE _sg_pass_t* _sg_lookup_pass(const _sg_pools_t* p, uint32_t pass_id) { |
14259 | SOKOL_ASSERT(p); |
14260 | if (SG_INVALID_ID != pass_id) { |
14261 | _sg_pass_t* pass = _sg_pass_at(p, pass_id); |
14262 | if (pass->slot.id == pass_id) { |
14263 | return pass; |
14264 | } |
14265 | } |
14266 | return 0; |
14267 | } |
14268 | |
14269 | _SOKOL_PRIVATE _sg_context_t* _sg_lookup_context(const _sg_pools_t* p, uint32_t ctx_id) { |
14270 | SOKOL_ASSERT(p); |
14271 | if (SG_INVALID_ID != ctx_id) { |
14272 | _sg_context_t* ctx = _sg_context_at(p, ctx_id); |
14273 | if (ctx->slot.id == ctx_id) { |
14274 | return ctx; |
14275 | } |
14276 | } |
14277 | return 0; |
14278 | } |
14279 | |
14280 | _SOKOL_PRIVATE void _sg_discard_all_resources(_sg_pools_t* p, uint32_t ctx_id) { |
14281 | /* this is a bit dumb since it loops over all pool slots to |
14282 | find the occupied slots, on the other hand it is only ever |
14283 | executed at shutdown |
14284 | NOTE: ONLY EXECUTE THIS AT SHUTDOWN |
14285 | ...because the free queues will not be reset |
14286 | and the resource slots not be cleared! |
14287 | */ |
14288 | for (int i = 1; i < p->buffer_pool.size; i++) { |
14289 | if (p->buffers[i].slot.ctx_id == ctx_id) { |
14290 | sg_resource_state state = p->buffers[i].slot.state; |
14291 | if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { |
14292 | _sg_discard_buffer(&p->buffers[i]); |
14293 | } |
14294 | } |
14295 | } |
14296 | for (int i = 1; i < p->image_pool.size; i++) { |
14297 | if (p->images[i].slot.ctx_id == ctx_id) { |
14298 | sg_resource_state state = p->images[i].slot.state; |
14299 | if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { |
14300 | _sg_discard_image(&p->images[i]); |
14301 | } |
14302 | } |
14303 | } |
14304 | for (int i = 1; i < p->shader_pool.size; i++) { |
14305 | if (p->shaders[i].slot.ctx_id == ctx_id) { |
14306 | sg_resource_state state = p->shaders[i].slot.state; |
14307 | if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { |
14308 | _sg_discard_shader(&p->shaders[i]); |
14309 | } |
14310 | } |
14311 | } |
14312 | for (int i = 1; i < p->pipeline_pool.size; i++) { |
14313 | if (p->pipelines[i].slot.ctx_id == ctx_id) { |
14314 | sg_resource_state state = p->pipelines[i].slot.state; |
14315 | if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { |
14316 | _sg_discard_pipeline(&p->pipelines[i]); |
14317 | } |
14318 | } |
14319 | } |
14320 | for (int i = 1; i < p->pass_pool.size; i++) { |
14321 | if (p->passes[i].slot.ctx_id == ctx_id) { |
14322 | sg_resource_state state = p->passes[i].slot.state; |
14323 | if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { |
14324 | _sg_discard_pass(&p->passes[i]); |
14325 | } |
14326 | } |
14327 | } |
14328 | } |
14329 | |
14330 | /*== VALIDATION LAYER ========================================================*/ |
14331 | #if defined(SOKOL_DEBUG) |
14332 | /* return a human readable string for an _sg_validate_error */ |
14333 | _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) { |
14334 | switch (err) { |
14335 | /* buffer creation validation errors */ |
14336 | case _SG_VALIDATE_BUFFERDESC_CANARY: return "sg_buffer_desc not initialized"; |
14337 | case _SG_VALIDATE_BUFFERDESC_SIZE: return "sg_buffer_desc.size cannot be 0"; |
14338 | case _SG_VALIDATE_BUFFERDESC_DATA: return "immutable buffers must be initialized with data (sg_buffer_desc.data.ptr and sg_buffer_desc.data.size)"; |
14339 | case _SG_VALIDATE_BUFFERDESC_DATA_SIZE: return "immutable buffer data size differs from buffer size"; |
14340 | case _SG_VALIDATE_BUFFERDESC_NO_DATA: return "dynamic/stream usage buffers cannot be initialized with data"; |
14341 | |
14342 | /* image data (in image creation and updating) */ |
14343 | case _SG_VALIDATE_IMAGEDATA_NODATA: return "sg_image_data: no data (.ptr and/or .size is zero)"; |
14344 | case _SG_VALIDATE_IMAGEDATA_DATA_SIZE: return "sg_image_data: data size doesn't match expected surface size"; |
14345 | |
14346 | /* image creation validation errros */ |
14347 | case _SG_VALIDATE_IMAGEDESC_CANARY: return "sg_image_desc not initialized"; |
14348 | case _SG_VALIDATE_IMAGEDESC_WIDTH: return "sg_image_desc.width must be > 0"; |
14349 | case _SG_VALIDATE_IMAGEDESC_HEIGHT: return "sg_image_desc.height must be > 0"; |
14350 | case _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT: return "invalid pixel format for render-target image"; |
14351 | case _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT: return "invalid pixel format for non-render-target image"; |
14352 | case _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT: return "non-render-target images cannot be multisampled"; |
14353 | case _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT: return "MSAA not supported for this pixel format"; |
14354 | case _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE: return "render target images must be SG_USAGE_IMMUTABLE"; |
14355 | case _SG_VALIDATE_IMAGEDESC_RT_NO_DATA: return "render target images cannot be initialized with data"; |
14356 | case _SG_VALIDATE_IMAGEDESC_INJECTED_NO_DATA: return "images with injected textures cannot be initialized with data"; |
14357 | case _SG_VALIDATE_IMAGEDESC_DYNAMIC_NO_DATA: return "dynamic/stream images cannot be initialized with data"; |
14358 | case _SG_VALIDATE_IMAGEDESC_COMPRESSED_IMMUTABLE: return "compressed images must be immutable"; |
14359 | |
14360 | /* shader creation */ |
14361 | case _SG_VALIDATE_SHADERDESC_CANARY: return "sg_shader_desc not initialized"; |
14362 | case _SG_VALIDATE_SHADERDESC_SOURCE: return "shader source code required"; |
14363 | case _SG_VALIDATE_SHADERDESC_BYTECODE: return "shader byte code required"; |
14364 | case _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE: return "shader source or byte code required"; |
14365 | case _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE: return "shader byte code length (in bytes) required"; |
14366 | case _SG_VALIDATE_SHADERDESC_NO_CONT_UBS: return "shader uniform blocks must occupy continuous slots"; |
14367 | case _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS: return "uniform block members must occupy continuous slots"; |
14368 | case _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS: return "GL backend requires uniform block member declarations"; |
14369 | case _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME: return "uniform block member name missing"; |
14370 | case _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH: return "size of uniform block members doesn't match uniform block size"; |
14371 | case _SG_VALIDATE_SHADERDESC_UB_ARRAY_COUNT: return "uniform array count must be >= 1"; |
14372 | case _SG_VALIDATE_SHADERDESC_UB_STD140_ARRAY_TYPE: return "uniform arrays only allowed for FLOAT4, INT4, MAT4 in std140 layout"; |
14373 | |
14374 | case _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS: return "shader images must occupy continuous slots"; |
14375 | case _SG_VALIDATE_SHADERDESC_IMG_NAME: return "GL backend requires uniform block member names"; |
14376 | case _SG_VALIDATE_SHADERDESC_ATTR_NAMES: return "GLES2 backend requires vertex attribute names"; |
14377 | case _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS: return "D3D11 backend requires vertex attribute semantics"; |
14378 | case _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG: return "vertex attribute name/semantic string too long (max len 16)"; |
14379 | |
14380 | /* pipeline creation */ |
14381 | case _SG_VALIDATE_PIPELINEDESC_CANARY: return "sg_pipeline_desc not initialized"; |
14382 | case _SG_VALIDATE_PIPELINEDESC_SHADER: return "sg_pipeline_desc.shader missing or invalid"; |
14383 | case _SG_VALIDATE_PIPELINEDESC_NO_ATTRS: return "sg_pipeline_desc.layout.attrs is empty or not continuous"; |
14384 | case _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4: return "sg_pipeline_desc.layout.buffers[].stride must be multiple of 4"; |
14385 | case _SG_VALIDATE_PIPELINEDESC_ATTR_NAME: return "GLES2/WebGL missing vertex attribute name in shader"; |
14386 | case _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS: return "D3D11 missing vertex attribute semantics in shader"; |
14387 | |
14388 | /* pass creation */ |
14389 | case _SG_VALIDATE_PASSDESC_CANARY: return "sg_pass_desc not initialized"; |
14390 | case _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS: return "sg_pass_desc.color_attachments[0] must be valid"; |
14391 | case _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS: return "color attachments must occupy continuous slots"; |
14392 | case _SG_VALIDATE_PASSDESC_IMAGE: return "pass attachment image is not valid"; |
14393 | case _SG_VALIDATE_PASSDESC_MIPLEVEL: return "pass attachment mip level is bigger than image has mipmaps"; |
14394 | case _SG_VALIDATE_PASSDESC_FACE: return "pass attachment image is cubemap, but face index is too big"; |
14395 | case _SG_VALIDATE_PASSDESC_LAYER: return "pass attachment image is array texture, but layer index is too big"; |
14396 | case _SG_VALIDATE_PASSDESC_SLICE: return "pass attachment image is 3d texture, but slice value is too big"; |
14397 | case _SG_VALIDATE_PASSDESC_IMAGE_NO_RT: return "pass attachment image must be render targets"; |
14398 | case _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT: return "pass color-attachment images must have a renderable pixel format"; |
14399 | case _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT: return "pass depth-attachment image must have depth pixel format"; |
14400 | case _SG_VALIDATE_PASSDESC_IMAGE_SIZES: return "all pass attachments must have the same size"; |
14401 | case _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS: return "all pass attachments must have the same sample count"; |
14402 | |
14403 | /* sg_begin_pass */ |
14404 | case _SG_VALIDATE_BEGINPASS_PASS: return "sg_begin_pass: pass must be valid"; |
14405 | case _SG_VALIDATE_BEGINPASS_IMAGE: return "sg_begin_pass: one or more attachment images are not valid"; |
14406 | |
14407 | /* sg_apply_pipeline */ |
14408 | case _SG_VALIDATE_APIP_PIPELINE_VALID_ID: return "sg_apply_pipeline: invalid pipeline id provided"; |
14409 | case _SG_VALIDATE_APIP_PIPELINE_EXISTS: return "sg_apply_pipeline: pipeline object no longer alive"; |
14410 | case _SG_VALIDATE_APIP_PIPELINE_VALID: return "sg_apply_pipeline: pipeline object not in valid state"; |
14411 | case _SG_VALIDATE_APIP_SHADER_EXISTS: return "sg_apply_pipeline: shader object no longer alive"; |
14412 | case _SG_VALIDATE_APIP_SHADER_VALID: return "sg_apply_pipeline: shader object not in valid state"; |
14413 | case _SG_VALIDATE_APIP_ATT_COUNT: return "sg_apply_pipeline: number of pipeline color attachments doesn't match number of pass color attachments"; |
14414 | case _SG_VALIDATE_APIP_COLOR_FORMAT: return "sg_apply_pipeline: pipeline color attachment pixel format doesn't match pass color attachment pixel format"; |
14415 | case _SG_VALIDATE_APIP_DEPTH_FORMAT: return "sg_apply_pipeline: pipeline depth pixel_format doesn't match pass depth attachment pixel format"; |
14416 | case _SG_VALIDATE_APIP_SAMPLE_COUNT: return "sg_apply_pipeline: pipeline MSAA sample count doesn't match render pass attachment sample count"; |
14417 | |
14418 | /* sg_apply_bindings */ |
14419 | case _SG_VALIDATE_ABND_PIPELINE: return "sg_apply_bindings: must be called after sg_apply_pipeline"; |
14420 | case _SG_VALIDATE_ABND_PIPELINE_EXISTS: return "sg_apply_bindings: currently applied pipeline object no longer alive"; |
14421 | case _SG_VALIDATE_ABND_PIPELINE_VALID: return "sg_apply_bindings: currently applied pipeline object not in valid state"; |
14422 | case _SG_VALIDATE_ABND_VBS: return "sg_apply_bindings: number of vertex buffers doesn't match number of pipeline vertex layouts"; |
14423 | case _SG_VALIDATE_ABND_VB_EXISTS: return "sg_apply_bindings: vertex buffer no longer alive"; |
14424 | case _SG_VALIDATE_ABND_VB_TYPE: return "sg_apply_bindings: buffer in vertex buffer slot is not a SG_BUFFERTYPE_VERTEXBUFFER"; |
14425 | case _SG_VALIDATE_ABND_VB_OVERFLOW: return "sg_apply_bindings: buffer in vertex buffer slot is overflown"; |
14426 | case _SG_VALIDATE_ABND_NO_IB: return "sg_apply_bindings: pipeline object defines indexed rendering, but no index buffer provided"; |
14427 | case _SG_VALIDATE_ABND_IB: return "sg_apply_bindings: pipeline object defines non-indexed rendering, but index buffer provided"; |
14428 | case _SG_VALIDATE_ABND_IB_EXISTS: return "sg_apply_bindings: index buffer no longer alive"; |
14429 | case _SG_VALIDATE_ABND_IB_TYPE: return "sg_apply_bindings: buffer in index buffer slot is not a SG_BUFFERTYPE_INDEXBUFFER"; |
14430 | case _SG_VALIDATE_ABND_IB_OVERFLOW: return "sg_apply_bindings: buffer in index buffer slot is overflown"; |
14431 | case _SG_VALIDATE_ABND_VS_IMGS: return "sg_apply_bindings: vertex shader image count doesn't match sg_shader_desc"; |
14432 | case _SG_VALIDATE_ABND_VS_IMG_EXISTS: return "sg_apply_bindings: vertex shader image no longer alive"; |
14433 | case _SG_VALIDATE_ABND_VS_IMG_TYPES: return "sg_apply_bindings: one or more vertex shader image types don't match sg_shader_desc"; |
14434 | case _SG_VALIDATE_ABND_FS_IMGS: return "sg_apply_bindings: fragment shader image count doesn't match sg_shader_desc"; |
14435 | case _SG_VALIDATE_ABND_FS_IMG_EXISTS: return "sg_apply_bindings: fragment shader image no longer alive"; |
14436 | case _SG_VALIDATE_ABND_FS_IMG_TYPES: return "sg_apply_bindings: one or more fragment shader image types don't match sg_shader_desc"; |
14437 | |
14438 | /* sg_apply_uniforms */ |
14439 | case _SG_VALIDATE_AUB_NO_PIPELINE: return "sg_apply_uniforms: must be called after sg_apply_pipeline()"; |
14440 | case _SG_VALIDATE_AUB_NO_UB_AT_SLOT: return "sg_apply_uniforms: no uniform block declaration at this shader stage UB slot"; |
14441 | case _SG_VALIDATE_AUB_SIZE: return "sg_apply_uniforms: data size exceeds declared uniform block size"; |
14442 | |
14443 | /* sg_update_buffer */ |
14444 | case _SG_VALIDATE_UPDATEBUF_USAGE: return "sg_update_buffer: cannot update immutable buffer"; |
14445 | case _SG_VALIDATE_UPDATEBUF_SIZE: return "sg_update_buffer: update size is bigger than buffer size"; |
14446 | case _SG_VALIDATE_UPDATEBUF_ONCE: return "sg_update_buffer: only one update allowed per buffer and frame"; |
14447 | case _SG_VALIDATE_UPDATEBUF_APPEND: return "sg_update_buffer: cannot call sg_update_buffer and sg_append_buffer in same frame"; |
14448 | |
14449 | /* sg_append_buffer */ |
14450 | case _SG_VALIDATE_APPENDBUF_USAGE: return "sg_append_buffer: cannot append to immutable buffer"; |
14451 | case _SG_VALIDATE_APPENDBUF_SIZE: return "sg_append_buffer: overall appended size is bigger than buffer size"; |
14452 | case _SG_VALIDATE_APPENDBUF_UPDATE: return "sg_append_buffer: cannot call sg_append_buffer and sg_update_buffer in same frame"; |
14453 | |
14454 | /* sg_update_image */ |
14455 | case _SG_VALIDATE_UPDIMG_USAGE: return "sg_update_image: cannot update immutable image"; |
14456 | case _SG_VALIDATE_UPDIMG_ONCE: return "sg_update_image: only one update allowed per image and frame"; |
14457 | |
14458 | default: return "unknown validation error"; |
14459 | } |
14460 | } |
14461 | #endif /* defined(SOKOL_DEBUG) */ |
14462 | |
14463 | /*-- validation checks -------------------------------------------------------*/ |
14464 | #if defined(SOKOL_DEBUG) |
14465 | _SOKOL_PRIVATE void _sg_validate_begin(void) { |
14466 | _sg.validate_error = _SG_VALIDATE_SUCCESS; |
14467 | } |
14468 | |
14469 | _SOKOL_PRIVATE void _sg_validate(bool cond, _sg_validate_error_t err) { |
14470 | if (!cond) { |
14471 | _sg.validate_error = err; |
14472 | SG_LOG(_sg_validate_string(err)); |
14473 | } |
14474 | } |
14475 | |
14476 | _SOKOL_PRIVATE bool _sg_validate_end(void) { |
14477 | if (_sg.validate_error != _SG_VALIDATE_SUCCESS) { |
14478 | #if !defined(SOKOL_VALIDATE_NON_FATAL) |
14479 | SG_LOG("^^^^ SOKOL-GFX VALIDATION FAILED, TERMINATING ^^^^"); |
14480 | SOKOL_ASSERT(false); |
14481 | #endif |
14482 | return false; |
14483 | } |
14484 | else { |
14485 | return true; |
14486 | } |
14487 | } |
14488 | #endif |
14489 | |
14490 | _SOKOL_PRIVATE bool _sg_validate_buffer_desc(const sg_buffer_desc* desc) { |
14491 | #if !defined(SOKOL_DEBUG) |
14492 | _SOKOL_UNUSED(desc); |
14493 | return true; |
14494 | #else |
14495 | if (_sg.desc.disable_validation) { |
14496 | return true; |
14497 | } |
14498 | SOKOL_ASSERT(desc); |
14499 | SOKOL_VALIDATE_BEGIN(); |
14500 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY); |
14501 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY); |
14502 | SOKOL_VALIDATE(desc->size > 0, _SG_VALIDATE_BUFFERDESC_SIZE); |
14503 | bool injected = (0 != desc->gl_buffers[0]) || |
14504 | (0 != desc->mtl_buffers[0]) || |
14505 | (0 != desc->d3d11_buffer) || |
14506 | (0 != desc->wgpu_buffer); |
14507 | if (!injected && (desc->usage == SG_USAGE_IMMUTABLE)) { |
14508 | SOKOL_VALIDATE((0 != desc->data.ptr) && (desc->data.size > 0), _SG_VALIDATE_BUFFERDESC_DATA); |
14509 | SOKOL_VALIDATE(desc->size == desc->data.size, _SG_VALIDATE_BUFFERDESC_DATA_SIZE); |
14510 | } |
14511 | else { |
14512 | SOKOL_VALIDATE(0 == desc->data.ptr, _SG_VALIDATE_BUFFERDESC_NO_DATA); |
14513 | } |
14514 | return SOKOL_VALIDATE_END(); |
14515 | #endif |
14516 | } |
14517 | |
14518 | _SOKOL_PRIVATE void _sg_validate_image_data(const sg_image_data* data, sg_pixel_format fmt, int width, int height, int num_faces, int num_mips, int num_slices) { |
14519 | #if !defined(SOKOL_DEBUG) |
14520 | _SOKOL_UNUSED(data); |
14521 | _SOKOL_UNUSED(fmt); |
14522 | _SOKOL_UNUSED(width); |
14523 | _SOKOL_UNUSED(height); |
14524 | _SOKOL_UNUSED(num_faces); |
14525 | _SOKOL_UNUSED(num_mips); |
14526 | _SOKOL_UNUSED(num_slices); |
14527 | #else |
14528 | for (int face_index = 0; face_index < num_faces; face_index++) { |
14529 | for (int mip_index = 0; mip_index < num_mips; mip_index++) { |
14530 | const bool has_data = data->subimage[face_index][mip_index].ptr != 0; |
14531 | const bool has_size = data->subimage[face_index][mip_index].size > 0; |
14532 | SOKOL_VALIDATE(has_data && has_size, _SG_VALIDATE_IMAGEDATA_NODATA); |
14533 | const int mip_width = _sg_max(width >> mip_index, 1); |
14534 | const int mip_height = _sg_max(height >> mip_index, 1); |
14535 | const int bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, 1); |
14536 | const int expected_size = bytes_per_slice * num_slices; |
14537 | SOKOL_VALIDATE(expected_size == (int)data->subimage[face_index][mip_index].size, _SG_VALIDATE_IMAGEDATA_DATA_SIZE); |
14538 | } |
14539 | } |
14540 | #endif |
14541 | } |
14542 | |
14543 | _SOKOL_PRIVATE bool _sg_validate_image_desc(const sg_image_desc* desc) { |
14544 | #if !defined(SOKOL_DEBUG) |
14545 | _SOKOL_UNUSED(desc); |
14546 | return true; |
14547 | #else |
14548 | if (_sg.desc.disable_validation) { |
14549 | return true; |
14550 | } |
14551 | SOKOL_ASSERT(desc); |
14552 | SOKOL_VALIDATE_BEGIN(); |
14553 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY); |
14554 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY); |
14555 | SOKOL_VALIDATE(desc->width > 0, _SG_VALIDATE_IMAGEDESC_WIDTH); |
14556 | SOKOL_VALIDATE(desc->height > 0, _SG_VALIDATE_IMAGEDESC_HEIGHT); |
14557 | const sg_pixel_format fmt = desc->pixel_format; |
14558 | const sg_usage usage = desc->usage; |
14559 | const bool injected = (0 != desc->gl_textures[0]) || |
14560 | (0 != desc->mtl_textures[0]) || |
14561 | (0 != desc->d3d11_texture) || |
14562 | (0 != desc->wgpu_texture); |
14563 | if (desc->render_target) { |
14564 | SOKOL_ASSERT(((int)fmt >= 0) && ((int)fmt < _SG_PIXELFORMAT_NUM)); |
14565 | SOKOL_VALIDATE(_sg.formats[fmt].render, _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT); |
14566 | /* on GLES2, sample count for render targets is completely ignored */ |
14567 | #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
14568 | if (!_sg.gl.gles2) { |
14569 | #endif |
14570 | if (desc->sample_count > 1) { |
14571 | SOKOL_VALIDATE(_sg.features.msaa_render_targets && _sg.formats[fmt].msaa, _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT); |
14572 | } |
14573 | #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
14574 | } |
14575 | #endif |
14576 | SOKOL_VALIDATE(usage == SG_USAGE_IMMUTABLE, _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE); |
14577 | SOKOL_VALIDATE(desc->data.subimage[0][0].ptr==0, _SG_VALIDATE_IMAGEDESC_RT_NO_DATA); |
14578 | } |
14579 | else { |
14580 | SOKOL_VALIDATE(desc->sample_count <= 1, _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT); |
14581 | const bool valid_nonrt_fmt = !_sg_is_valid_rendertarget_depth_format(fmt); |
14582 | SOKOL_VALIDATE(valid_nonrt_fmt, _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT); |
14583 | const bool is_compressed = _sg_is_compressed_pixel_format(desc->pixel_format); |
14584 | const bool is_immutable = (usage == SG_USAGE_IMMUTABLE); |
14585 | if (is_compressed) { |
14586 | SOKOL_VALIDATE(is_immutable, _SG_VALIDATE_IMAGEDESC_COMPRESSED_IMMUTABLE); |
14587 | } |
14588 | if (!injected && is_immutable) { |
14589 | // image desc must have valid data |
14590 | _sg_validate_image_data(&desc->data, |
14591 | desc->pixel_format, |
14592 | desc->width, |
14593 | desc->height, |
14594 | (desc->type == SG_IMAGETYPE_CUBE) ? 6 : 1, |
14595 | desc->num_mipmaps, |
14596 | desc->num_slices); |
14597 | } |
14598 | else { |
14599 | // image desc must not have data |
14600 | for (int face_index = 0; face_index < SG_CUBEFACE_NUM; face_index++) { |
14601 | for (int mip_index = 0; mip_index < SG_MAX_MIPMAPS; mip_index++) { |
14602 | const bool no_data = 0 == desc->data.subimage[face_index][mip_index].ptr; |
14603 | const bool no_size = 0 == desc->data.subimage[face_index][mip_index].size; |
14604 | if (injected) { |
14605 | SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_INJECTED_NO_DATA); |
14606 | } |
14607 | if (!is_immutable) { |
14608 | SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_DYNAMIC_NO_DATA); |
14609 | } |
14610 | } |
14611 | } |
14612 | } |
14613 | } |
14614 | return SOKOL_VALIDATE_END(); |
14615 | #endif |
14616 | } |
14617 | |
14618 | _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { |
14619 | #if !defined(SOKOL_DEBUG) |
14620 | _SOKOL_UNUSED(desc); |
14621 | return true; |
14622 | #else |
14623 | if (_sg.desc.disable_validation) { |
14624 | return true; |
14625 | } |
14626 | SOKOL_ASSERT(desc); |
14627 | SOKOL_VALIDATE_BEGIN(); |
14628 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY); |
14629 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY); |
14630 | #if defined(SOKOL_GLES2) |
14631 | SOKOL_VALIDATE(0 != desc->attrs[0].name, _SG_VALIDATE_SHADERDESC_ATTR_NAMES); |
14632 | #elif defined(SOKOL_D3D11) |
14633 | SOKOL_VALIDATE(0 != desc->attrs[0].sem_name, _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS); |
14634 | #endif |
14635 | #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
14636 | /* on GL, must provide shader source code */ |
14637 | SOKOL_VALIDATE(0 != desc->vs.source, _SG_VALIDATE_SHADERDESC_SOURCE); |
14638 | SOKOL_VALIDATE(0 != desc->fs.source, _SG_VALIDATE_SHADERDESC_SOURCE); |
14639 | #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11) |
14640 | /* on Metal or D3D11, must provide shader source code or byte code */ |
14641 | SOKOL_VALIDATE((0 != desc->vs.source)||(0 != desc->vs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); |
14642 | SOKOL_VALIDATE((0 != desc->fs.source)||(0 != desc->fs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); |
14643 | #elif defined(SOKOL_WGPU) |
14644 | /* on WGPU byte code must be provided */ |
14645 | SOKOL_VALIDATE((0 != desc->vs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_BYTECODE); |
14646 | SOKOL_VALIDATE((0 != desc->fs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_BYTECODE); |
14647 | #else |
14648 | /* Dummy Backend, don't require source or bytecode */ |
14649 | #endif |
14650 | for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { |
14651 | if (desc->attrs[i].name) { |
14652 | SOKOL_VALIDATE(strlen(desc->attrs[i].name) < _SG_STRING_SIZE, _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG); |
14653 | } |
14654 | if (desc->attrs[i].sem_name) { |
14655 | SOKOL_VALIDATE(strlen(desc->attrs[i].sem_name) < _SG_STRING_SIZE, _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG); |
14656 | } |
14657 | } |
14658 | /* if shader byte code, the size must also be provided */ |
14659 | if (0 != desc->vs.bytecode.ptr) { |
14660 | SOKOL_VALIDATE(desc->vs.bytecode.size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); |
14661 | } |
14662 | if (0 != desc->fs.bytecode.ptr) { |
14663 | SOKOL_VALIDATE(desc->fs.bytecode.size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); |
14664 | } |
14665 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
14666 | const sg_shader_stage_desc* stage_desc = (stage_index == 0)? &desc->vs : &desc->fs; |
14667 | bool uniform_blocks_continuous = true; |
14668 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
14669 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
14670 | if (ub_desc->size > 0) { |
14671 | SOKOL_VALIDATE(uniform_blocks_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UBS); |
14672 | #if defined(_SOKOL_ANY_GL) |
14673 | bool uniforms_continuous = true; |
14674 | uint32_t uniform_offset = 0; |
14675 | int num_uniforms = 0; |
14676 | for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { |
14677 | const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; |
14678 | if (u_desc->type != SG_UNIFORMTYPE_INVALID) { |
14679 | SOKOL_VALIDATE(uniforms_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS); |
14680 | #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
14681 | SOKOL_VALIDATE(0 != u_desc->name, _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME); |
14682 | #endif |
14683 | const int array_count = u_desc->array_count; |
14684 | SOKOL_VALIDATE(array_count > 0, _SG_VALIDATE_SHADERDESC_UB_ARRAY_COUNT); |
14685 | const uint32_t u_align = _sg_uniform_alignment(u_desc->type, array_count, ub_desc->layout); |
14686 | const uint32_t u_size = _sg_uniform_size(u_desc->type, array_count, ub_desc->layout); |
14687 | uniform_offset = _sg_align_u32(uniform_offset, u_align); |
14688 | uniform_offset += u_size; |
14689 | num_uniforms++; |
14690 | // with std140, arrays are only allowed for FLOAT4, INT4, MAT4 |
14691 | if (ub_desc->layout == SG_UNIFORMLAYOUT_STD140) { |
14692 | if (array_count > 1) { |
14693 | SOKOL_VALIDATE((u_desc->type == SG_UNIFORMTYPE_FLOAT4) || (u_desc->type == SG_UNIFORMTYPE_INT4) || (u_desc->type == SG_UNIFORMTYPE_MAT4), _SG_VALIDATE_SHADERDESC_UB_STD140_ARRAY_TYPE); |
14694 | } |
14695 | } |
14696 | } |
14697 | else { |
14698 | uniforms_continuous = false; |
14699 | } |
14700 | } |
14701 | if (ub_desc->layout == SG_UNIFORMLAYOUT_STD140) { |
14702 | uniform_offset = _sg_align_u32(uniform_offset, 16); |
14703 | } |
14704 | SOKOL_VALIDATE((size_t)uniform_offset == ub_desc->size, _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH); |
14705 | SOKOL_VALIDATE(num_uniforms > 0, _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS); |
14706 | #endif |
14707 | } |
14708 | else { |
14709 | uniform_blocks_continuous = false; |
14710 | } |
14711 | } |
14712 | bool images_continuous = true; |
14713 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
14714 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
14715 | if (img_desc->image_type != _SG_IMAGETYPE_DEFAULT) { |
14716 | SOKOL_VALIDATE(images_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS); |
14717 | #if defined(SOKOL_GLES2) |
14718 | SOKOL_VALIDATE(0 != img_desc->name, _SG_VALIDATE_SHADERDESC_IMG_NAME); |
14719 | #endif |
14720 | } |
14721 | else { |
14722 | images_continuous = false; |
14723 | } |
14724 | } |
14725 | } |
14726 | return SOKOL_VALIDATE_END(); |
14727 | #endif |
14728 | } |
14729 | |
14730 | _SOKOL_PRIVATE bool _sg_validate_pipeline_desc(const sg_pipeline_desc* desc) { |
14731 | #if !defined(SOKOL_DEBUG) |
14732 | _SOKOL_UNUSED(desc); |
14733 | return true; |
14734 | #else |
14735 | if (_sg.desc.disable_validation) { |
14736 | return true; |
14737 | } |
14738 | SOKOL_ASSERT(desc); |
14739 | SOKOL_VALIDATE_BEGIN(); |
14740 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY); |
14741 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY); |
14742 | SOKOL_VALIDATE(desc->shader.id != SG_INVALID_ID, _SG_VALIDATE_PIPELINEDESC_SHADER); |
14743 | for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) { |
14744 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[buf_index]; |
14745 | if (l_desc->stride == 0) { |
14746 | continue; |
14747 | } |
14748 | SOKOL_VALIDATE((l_desc->stride & 3) == 0, _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4); |
14749 | } |
14750 | SOKOL_VALIDATE(desc->layout.attrs[0].format != SG_VERTEXFORMAT_INVALID, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS); |
14751 | const _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id); |
14752 | SOKOL_VALIDATE(0 != shd, _SG_VALIDATE_PIPELINEDESC_SHADER); |
14753 | if (shd) { |
14754 | SOKOL_VALIDATE(shd->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PIPELINEDESC_SHADER); |
14755 | bool attrs_cont = true; |
14756 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
14757 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
14758 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
14759 | attrs_cont = false; |
14760 | continue; |
14761 | } |
14762 | SOKOL_VALIDATE(attrs_cont, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS); |
14763 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
14764 | #if defined(SOKOL_GLES2) |
14765 | /* on GLES2, vertex attribute names must be provided */ |
14766 | SOKOL_VALIDATE(!_sg_strempty(&shd->gl.attrs[attr_index].name), _SG_VALIDATE_PIPELINEDESC_ATTR_NAME); |
14767 | #elif defined(SOKOL_D3D11) |
14768 | /* on D3D11, semantic names (and semantic indices) must be provided */ |
14769 | SOKOL_VALIDATE(!_sg_strempty(&shd->d3d11.attrs[attr_index].sem_name), _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS); |
14770 | #endif |
14771 | } |
14772 | } |
14773 | return SOKOL_VALIDATE_END(); |
14774 | #endif |
14775 | } |
14776 | |
14777 | _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { |
14778 | #if !defined(SOKOL_DEBUG) |
14779 | _SOKOL_UNUSED(desc); |
14780 | return true; |
14781 | #else |
14782 | if (_sg.desc.disable_validation) { |
14783 | return true; |
14784 | } |
14785 | SOKOL_ASSERT(desc); |
14786 | SOKOL_VALIDATE_BEGIN(); |
14787 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); |
14788 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); |
14789 | bool atts_cont = true; |
14790 | int width = -1, height = -1, sample_count = -1; |
14791 | for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { |
14792 | const sg_pass_attachment_desc* att = &desc->color_attachments[att_index]; |
14793 | if (att->image.id == SG_INVALID_ID) { |
14794 | SOKOL_VALIDATE(att_index > 0, _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS); |
14795 | atts_cont = false; |
14796 | continue; |
14797 | } |
14798 | SOKOL_VALIDATE(atts_cont, _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS); |
14799 | const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id); |
14800 | SOKOL_ASSERT(img); |
14801 | SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); |
14802 | SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); |
14803 | if (img->cmn.type == SG_IMAGETYPE_CUBE) { |
14804 | SOKOL_VALIDATE(att->slice < 6, _SG_VALIDATE_PASSDESC_FACE); |
14805 | } |
14806 | else if (img->cmn.type == SG_IMAGETYPE_ARRAY) { |
14807 | SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_LAYER); |
14808 | } |
14809 | else if (img->cmn.type == SG_IMAGETYPE_3D) { |
14810 | SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_SLICE); |
14811 | } |
14812 | SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); |
14813 | if (att_index == 0) { |
14814 | width = img->cmn.width >> att->mip_level; |
14815 | height = img->cmn.height >> att->mip_level; |
14816 | sample_count = img->cmn.sample_count; |
14817 | } |
14818 | else { |
14819 | SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
14820 | SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
14821 | SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); |
14822 | } |
14823 | SOKOL_VALIDATE(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT); |
14824 | } |
14825 | if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { |
14826 | const sg_pass_attachment_desc* att = &desc->depth_stencil_attachment; |
14827 | const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id); |
14828 | SOKOL_ASSERT(img); |
14829 | SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); |
14830 | SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); |
14831 | if (img->cmn.type == SG_IMAGETYPE_CUBE) { |
14832 | SOKOL_VALIDATE(att->slice < 6, _SG_VALIDATE_PASSDESC_FACE); |
14833 | } |
14834 | else if (img->cmn.type == SG_IMAGETYPE_ARRAY) { |
14835 | SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_LAYER); |
14836 | } |
14837 | else if (img->cmn.type == SG_IMAGETYPE_3D) { |
14838 | SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_SLICE); |
14839 | } |
14840 | SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); |
14841 | SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
14842 | SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
14843 | SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); |
14844 | SOKOL_VALIDATE(_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT); |
14845 | } |
14846 | return SOKOL_VALIDATE_END(); |
14847 | #endif |
14848 | } |
14849 | |
14850 | _SOKOL_PRIVATE bool _sg_validate_begin_pass(_sg_pass_t* pass) { |
14851 | #if !defined(SOKOL_DEBUG) |
14852 | _SOKOL_UNUSED(pass); |
14853 | return true; |
14854 | #else |
14855 | if (_sg.desc.disable_validation) { |
14856 | return true; |
14857 | } |
14858 | SOKOL_VALIDATE_BEGIN(); |
14859 | SOKOL_VALIDATE(pass->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_PASS); |
14860 | |
14861 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
14862 | const _sg_pass_attachment_t* att = &pass->cmn.color_atts[i]; |
14863 | const _sg_image_t* img = _sg_pass_color_image(pass, i); |
14864 | if (img) { |
14865 | SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); |
14866 | SOKOL_VALIDATE(img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); |
14867 | } |
14868 | } |
14869 | const _sg_image_t* ds_img = _sg_pass_ds_image(pass); |
14870 | if (ds_img) { |
14871 | const _sg_pass_attachment_t* att = &pass->cmn.ds_att; |
14872 | SOKOL_VALIDATE(ds_img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); |
14873 | SOKOL_VALIDATE(ds_img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); |
14874 | } |
14875 | return SOKOL_VALIDATE_END(); |
14876 | #endif |
14877 | } |
14878 | |
14879 | _SOKOL_PRIVATE bool _sg_validate_apply_pipeline(sg_pipeline pip_id) { |
14880 | #if !defined(SOKOL_DEBUG) |
14881 | _SOKOL_UNUSED(pip_id); |
14882 | return true; |
14883 | #else |
14884 | if (_sg.desc.disable_validation) { |
14885 | return true; |
14886 | } |
14887 | SOKOL_VALIDATE_BEGIN(); |
14888 | /* the pipeline object must be alive and valid */ |
14889 | SOKOL_VALIDATE(pip_id.id != SG_INVALID_ID, _SG_VALIDATE_APIP_PIPELINE_VALID_ID); |
14890 | const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
14891 | SOKOL_VALIDATE(pip != 0, _SG_VALIDATE_APIP_PIPELINE_EXISTS); |
14892 | if (!pip) { |
14893 | return SOKOL_VALIDATE_END(); |
14894 | } |
14895 | SOKOL_VALIDATE(pip->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_APIP_PIPELINE_VALID); |
14896 | /* the pipeline's shader must be alive and valid */ |
14897 | SOKOL_ASSERT(pip->shader); |
14898 | SOKOL_VALIDATE(pip->shader->slot.id == pip->cmn.shader_id.id, _SG_VALIDATE_APIP_SHADER_EXISTS); |
14899 | SOKOL_VALIDATE(pip->shader->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_APIP_SHADER_VALID); |
14900 | /* check that pipeline attributes match current pass attributes */ |
14901 | const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, _sg.cur_pass.id); |
14902 | if (pass) { |
14903 | /* an offscreen pass */ |
14904 | SOKOL_VALIDATE(pip->cmn.color_attachment_count == pass->cmn.num_color_atts, _SG_VALIDATE_APIP_ATT_COUNT); |
14905 | for (int i = 0; i < pip->cmn.color_attachment_count; i++) { |
14906 | const _sg_image_t* att_img = _sg_pass_color_image(pass, i); |
14907 | SOKOL_VALIDATE(pip->cmn.color_formats[i] == att_img->cmn.pixel_format, _SG_VALIDATE_APIP_COLOR_FORMAT); |
14908 | SOKOL_VALIDATE(pip->cmn.sample_count == att_img->cmn.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); |
14909 | } |
14910 | const _sg_image_t* att_dsimg = _sg_pass_ds_image(pass); |
14911 | if (att_dsimg) { |
14912 | SOKOL_VALIDATE(pip->cmn.depth_format == att_dsimg->cmn.pixel_format, _SG_VALIDATE_APIP_DEPTH_FORMAT); |
14913 | } |
14914 | else { |
14915 | SOKOL_VALIDATE(pip->cmn.depth_format == SG_PIXELFORMAT_NONE, _SG_VALIDATE_APIP_DEPTH_FORMAT); |
14916 | } |
14917 | } |
14918 | else { |
14919 | /* default pass */ |
14920 | SOKOL_VALIDATE(pip->cmn.color_attachment_count == 1, _SG_VALIDATE_APIP_ATT_COUNT); |
14921 | SOKOL_VALIDATE(pip->cmn.color_formats[0] == _sg.desc.context.color_format, _SG_VALIDATE_APIP_COLOR_FORMAT); |
14922 | SOKOL_VALIDATE(pip->cmn.depth_format == _sg.desc.context.depth_format, _SG_VALIDATE_APIP_DEPTH_FORMAT); |
14923 | SOKOL_VALIDATE(pip->cmn.sample_count == _sg.desc.context.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); |
14924 | } |
14925 | return SOKOL_VALIDATE_END(); |
14926 | #endif |
14927 | } |
14928 | |
14929 | _SOKOL_PRIVATE bool _sg_validate_apply_bindings(const sg_bindings* bindings) { |
14930 | #if !defined(SOKOL_DEBUG) |
14931 | _SOKOL_UNUSED(bindings); |
14932 | return true; |
14933 | #else |
14934 | if (_sg.desc.disable_validation) { |
14935 | return true; |
14936 | } |
14937 | SOKOL_VALIDATE_BEGIN(); |
14938 | |
14939 | /* a pipeline object must have been applied */ |
14940 | SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_ABND_PIPELINE); |
14941 | const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); |
14942 | SOKOL_VALIDATE(pip != 0, _SG_VALIDATE_ABND_PIPELINE_EXISTS); |
14943 | if (!pip) { |
14944 | return SOKOL_VALIDATE_END(); |
14945 | } |
14946 | SOKOL_VALIDATE(pip->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_ABND_PIPELINE_VALID); |
14947 | SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); |
14948 | |
14949 | /* has expected vertex buffers, and vertex buffers still exist */ |
14950 | for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { |
14951 | if (bindings->vertex_buffers[i].id != SG_INVALID_ID) { |
14952 | SOKOL_VALIDATE(pip->cmn.vertex_layout_valid[i], _SG_VALIDATE_ABND_VBS); |
14953 | /* buffers in vertex-buffer-slots must be of type SG_BUFFERTYPE_VERTEXBUFFER */ |
14954 | const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, bindings->vertex_buffers[i].id); |
14955 | SOKOL_VALIDATE(buf != 0, _SG_VALIDATE_ABND_VB_EXISTS); |
14956 | if (buf && buf->slot.state == SG_RESOURCESTATE_VALID) { |
14957 | SOKOL_VALIDATE(SG_BUFFERTYPE_VERTEXBUFFER == buf->cmn.type, _SG_VALIDATE_ABND_VB_TYPE); |
14958 | SOKOL_VALIDATE(!buf->cmn.append_overflow, _SG_VALIDATE_ABND_VB_OVERFLOW); |
14959 | } |
14960 | } |
14961 | else { |
14962 | /* vertex buffer provided in a slot which has no vertex layout in pipeline */ |
14963 | SOKOL_VALIDATE(!pip->cmn.vertex_layout_valid[i], _SG_VALIDATE_ABND_VBS); |
14964 | } |
14965 | } |
14966 | |
14967 | /* index buffer expected or not, and index buffer still exists */ |
14968 | if (pip->cmn.index_type == SG_INDEXTYPE_NONE) { |
14969 | /* pipeline defines non-indexed rendering, but index buffer provided */ |
14970 | SOKOL_VALIDATE(bindings->index_buffer.id == SG_INVALID_ID, _SG_VALIDATE_ABND_IB); |
14971 | } |
14972 | else { |
14973 | /* pipeline defines indexed rendering, but no index buffer provided */ |
14974 | SOKOL_VALIDATE(bindings->index_buffer.id != SG_INVALID_ID, _SG_VALIDATE_ABND_NO_IB); |
14975 | } |
14976 | if (bindings->index_buffer.id != SG_INVALID_ID) { |
14977 | /* buffer in index-buffer-slot must be of type SG_BUFFERTYPE_INDEXBUFFER */ |
14978 | const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, bindings->index_buffer.id); |
14979 | SOKOL_VALIDATE(buf != 0, _SG_VALIDATE_ABND_IB_EXISTS); |
14980 | if (buf && buf->slot.state == SG_RESOURCESTATE_VALID) { |
14981 | SOKOL_VALIDATE(SG_BUFFERTYPE_INDEXBUFFER == buf->cmn.type, _SG_VALIDATE_ABND_IB_TYPE); |
14982 | SOKOL_VALIDATE(!buf->cmn.append_overflow, _SG_VALIDATE_ABND_IB_OVERFLOW); |
14983 | } |
14984 | } |
14985 | |
14986 | /* has expected vertex shader images */ |
14987 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
14988 | _sg_shader_stage_t* stage = &pip->shader->cmn.stage[SG_SHADERSTAGE_VS]; |
14989 | if (bindings->vs_images[i].id != SG_INVALID_ID) { |
14990 | SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ABND_VS_IMGS); |
14991 | const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id); |
14992 | SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_VS_IMG_EXISTS); |
14993 | if (img && img->slot.state == SG_RESOURCESTATE_VALID) { |
14994 | SOKOL_VALIDATE(img->cmn.type == stage->images[i].image_type, _SG_VALIDATE_ABND_VS_IMG_TYPES); |
14995 | } |
14996 | } |
14997 | else { |
14998 | SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ABND_VS_IMGS); |
14999 | } |
15000 | } |
15001 | |
15002 | /* has expected fragment shader images */ |
15003 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
15004 | _sg_shader_stage_t* stage = &pip->shader->cmn.stage[SG_SHADERSTAGE_FS]; |
15005 | if (bindings->fs_images[i].id != SG_INVALID_ID) { |
15006 | SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ABND_FS_IMGS); |
15007 | const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id); |
15008 | SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_FS_IMG_EXISTS); |
15009 | if (img && img->slot.state == SG_RESOURCESTATE_VALID) { |
15010 | SOKOL_VALIDATE(img->cmn.type == stage->images[i].image_type, _SG_VALIDATE_ABND_FS_IMG_TYPES); |
15011 | } |
15012 | } |
15013 | else { |
15014 | SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ABND_FS_IMGS); |
15015 | } |
15016 | } |
15017 | return SOKOL_VALIDATE_END(); |
15018 | #endif |
15019 | } |
15020 | |
15021 | _SOKOL_PRIVATE bool _sg_validate_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { |
15022 | #if !defined(SOKOL_DEBUG) |
15023 | _SOKOL_UNUSED(stage_index); |
15024 | _SOKOL_UNUSED(ub_index); |
15025 | _SOKOL_UNUSED(data); |
15026 | return true; |
15027 | #else |
15028 | if (_sg.desc.disable_validation) { |
15029 | return true; |
15030 | } |
15031 | SOKOL_ASSERT((stage_index == SG_SHADERSTAGE_VS) || (stage_index == SG_SHADERSTAGE_FS)); |
15032 | SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); |
15033 | SOKOL_VALIDATE_BEGIN(); |
15034 | SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_AUB_NO_PIPELINE); |
15035 | const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); |
15036 | SOKOL_ASSERT(pip && (pip->slot.id == _sg.cur_pipeline.id)); |
15037 | SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->cmn.shader_id.id)); |
15038 | |
15039 | /* check that there is a uniform block at 'stage' and 'ub_index' */ |
15040 | const _sg_shader_stage_t* stage = &pip->shader->cmn.stage[stage_index]; |
15041 | SOKOL_VALIDATE(ub_index < stage->num_uniform_blocks, _SG_VALIDATE_AUB_NO_UB_AT_SLOT); |
15042 | |
15043 | /* check that the provided data size doesn't exceed the uniform block size */ |
15044 | SOKOL_VALIDATE(data->size <= stage->uniform_blocks[ub_index].size, _SG_VALIDATE_AUB_SIZE); |
15045 | |
15046 | return SOKOL_VALIDATE_END(); |
15047 | #endif |
15048 | } |
15049 | |
15050 | _SOKOL_PRIVATE bool _sg_validate_update_buffer(const _sg_buffer_t* buf, const sg_range* data) { |
15051 | #if !defined(SOKOL_DEBUG) |
15052 | _SOKOL_UNUSED(buf); |
15053 | _SOKOL_UNUSED(data); |
15054 | return true; |
15055 | #else |
15056 | if (_sg.desc.disable_validation) { |
15057 | return true; |
15058 | } |
15059 | SOKOL_ASSERT(buf && data && data->ptr); |
15060 | SOKOL_VALIDATE_BEGIN(); |
15061 | SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDATEBUF_USAGE); |
15062 | SOKOL_VALIDATE(buf->cmn.size >= (int)data->size, _SG_VALIDATE_UPDATEBUF_SIZE); |
15063 | SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_ONCE); |
15064 | SOKOL_VALIDATE(buf->cmn.append_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_APPEND); |
15065 | return SOKOL_VALIDATE_END(); |
15066 | #endif |
15067 | } |
15068 | |
15069 | _SOKOL_PRIVATE bool _sg_validate_append_buffer(const _sg_buffer_t* buf, const sg_range* data) { |
15070 | #if !defined(SOKOL_DEBUG) |
15071 | _SOKOL_UNUSED(buf); |
15072 | _SOKOL_UNUSED(data); |
15073 | return true; |
15074 | #else |
15075 | if (_sg.desc.disable_validation) { |
15076 | return true; |
15077 | } |
15078 | SOKOL_ASSERT(buf && data && data->ptr); |
15079 | SOKOL_VALIDATE_BEGIN(); |
15080 | SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_APPENDBUF_USAGE); |
15081 | SOKOL_VALIDATE(buf->cmn.size >= (buf->cmn.append_pos + (int)data->size), _SG_VALIDATE_APPENDBUF_SIZE); |
15082 | SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_APPENDBUF_UPDATE); |
15083 | return SOKOL_VALIDATE_END(); |
15084 | #endif |
15085 | } |
15086 | |
15087 | _SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image_t* img, const sg_image_data* data) { |
15088 | #if !defined(SOKOL_DEBUG) |
15089 | _SOKOL_UNUSED(img); |
15090 | _SOKOL_UNUSED(data); |
15091 | return true; |
15092 | #else |
15093 | if (_sg.desc.disable_validation) { |
15094 | return true; |
15095 | } |
15096 | SOKOL_ASSERT(img && data); |
15097 | SOKOL_VALIDATE_BEGIN(); |
15098 | SOKOL_VALIDATE(img->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDIMG_USAGE); |
15099 | SOKOL_VALIDATE(img->cmn.upd_frame_index != _sg.frame_index, _SG_VALIDATE_UPDIMG_ONCE); |
15100 | _sg_validate_image_data(data, |
15101 | img->cmn.pixel_format, |
15102 | img->cmn.width, |
15103 | img->cmn.height, |
15104 | (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6 : 1, |
15105 | img->cmn.num_mipmaps, |
15106 | img->cmn.num_slices); |
15107 | return SOKOL_VALIDATE_END(); |
15108 | #endif |
15109 | } |
15110 | |
15111 | /*== fill in desc default values =============================================*/ |
15112 | _SOKOL_PRIVATE sg_buffer_desc _sg_buffer_desc_defaults(const sg_buffer_desc* desc) { |
15113 | sg_buffer_desc def = *desc; |
15114 | def.type = _sg_def(def.type, SG_BUFFERTYPE_VERTEXBUFFER); |
15115 | def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE); |
15116 | if (def.size == 0) { |
15117 | def.size = def.data.size; |
15118 | } |
15119 | else if (def.data.size == 0) { |
15120 | def.data.size = def.size; |
15121 | } |
15122 | return def; |
15123 | } |
15124 | |
15125 | _SOKOL_PRIVATE sg_image_desc _sg_image_desc_defaults(const sg_image_desc* desc) { |
15126 | sg_image_desc def = *desc; |
15127 | def.type = _sg_def(def.type, SG_IMAGETYPE_2D); |
15128 | def.num_slices = _sg_def(def.num_slices, 1); |
15129 | def.num_mipmaps = _sg_def(def.num_mipmaps, 1); |
15130 | def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE); |
15131 | if (desc->render_target) { |
15132 | def.pixel_format = _sg_def(def.pixel_format, _sg.desc.context.color_format); |
15133 | def.sample_count = _sg_def(def.sample_count, _sg.desc.context.sample_count); |
15134 | } |
15135 | else { |
15136 | def.pixel_format = _sg_def(def.pixel_format, SG_PIXELFORMAT_RGBA8); |
15137 | def.sample_count = _sg_def(def.sample_count, 1); |
15138 | } |
15139 | def.min_filter = _sg_def(def.min_filter, SG_FILTER_NEAREST); |
15140 | def.mag_filter = _sg_def(def.mag_filter, SG_FILTER_NEAREST); |
15141 | def.wrap_u = _sg_def(def.wrap_u, SG_WRAP_REPEAT); |
15142 | def.wrap_v = _sg_def(def.wrap_v, SG_WRAP_REPEAT); |
15143 | def.wrap_w = _sg_def(def.wrap_w, SG_WRAP_REPEAT); |
15144 | def.border_color = _sg_def(def.border_color, SG_BORDERCOLOR_OPAQUE_BLACK); |
15145 | def.max_anisotropy = _sg_def(def.max_anisotropy, 1); |
15146 | def.max_lod = _sg_def_flt(def.max_lod, FLT_MAX); |
15147 | return def; |
15148 | } |
15149 | |
15150 | _SOKOL_PRIVATE sg_shader_desc _sg_shader_desc_defaults(const sg_shader_desc* desc) { |
15151 | sg_shader_desc def = *desc; |
15152 | #if defined(SOKOL_METAL) |
15153 | def.vs.entry = _sg_def(def.vs.entry, "_main"); |
15154 | def.fs.entry = _sg_def(def.fs.entry, "_main"); |
15155 | #else |
15156 | def.vs.entry = _sg_def(def.vs.entry, "main"); |
15157 | def.fs.entry = _sg_def(def.fs.entry, "main"); |
15158 | #endif |
15159 | #if defined(SOKOL_D3D11) |
15160 | if (def.vs.source) { |
15161 | def.vs.d3d11_target = _sg_def(def.vs.d3d11_target, "vs_4_0"); |
15162 | } |
15163 | if (def.fs.source) { |
15164 | def.fs.d3d11_target = _sg_def(def.fs.d3d11_target, "ps_4_0"); |
15165 | } |
15166 | #endif |
15167 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
15168 | sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &def.vs : &def.fs; |
15169 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
15170 | sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
15171 | if (0 == ub_desc->size) { |
15172 | break; |
15173 | } |
15174 | ub_desc->layout = _sg_def(ub_desc->layout, SG_UNIFORMLAYOUT_NATIVE); |
15175 | for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { |
15176 | sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; |
15177 | if (u_desc->type == SG_UNIFORMTYPE_INVALID) { |
15178 | break; |
15179 | } |
15180 | u_desc->array_count = _sg_def(u_desc->array_count, 1); |
15181 | } |
15182 | } |
15183 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
15184 | sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
15185 | if (img_desc->image_type == _SG_IMAGETYPE_DEFAULT) { |
15186 | break; |
15187 | } |
15188 | img_desc->sampler_type = _sg_def(img_desc->sampler_type, SG_SAMPLERTYPE_FLOAT); |
15189 | } |
15190 | } |
15191 | return def; |
15192 | } |
15193 | |
15194 | _SOKOL_PRIVATE sg_pipeline_desc _sg_pipeline_desc_defaults(const sg_pipeline_desc* desc) { |
15195 | sg_pipeline_desc def = *desc; |
15196 | |
15197 | def.primitive_type = _sg_def(def.primitive_type, SG_PRIMITIVETYPE_TRIANGLES); |
15198 | def.index_type = _sg_def(def.index_type, SG_INDEXTYPE_NONE); |
15199 | def.cull_mode = _sg_def(def.cull_mode, SG_CULLMODE_NONE); |
15200 | def.face_winding = _sg_def(def.face_winding, SG_FACEWINDING_CW); |
15201 | def.sample_count = _sg_def(def.sample_count, _sg.desc.context.sample_count); |
15202 | |
15203 | def.stencil.front.compare = _sg_def(def.stencil.front.compare, SG_COMPAREFUNC_ALWAYS); |
15204 | def.stencil.front.fail_op = _sg_def(def.stencil.front.fail_op, SG_STENCILOP_KEEP); |
15205 | def.stencil.front.depth_fail_op = _sg_def(def.stencil.front.depth_fail_op, SG_STENCILOP_KEEP); |
15206 | def.stencil.front.pass_op = _sg_def(def.stencil.front.pass_op, SG_STENCILOP_KEEP); |
15207 | def.stencil.back.compare = _sg_def(def.stencil.back.compare, SG_COMPAREFUNC_ALWAYS); |
15208 | def.stencil.back.fail_op = _sg_def(def.stencil.back.fail_op, SG_STENCILOP_KEEP); |
15209 | def.stencil.back.depth_fail_op = _sg_def(def.stencil.back.depth_fail_op, SG_STENCILOP_KEEP); |
15210 | def.stencil.back.pass_op = _sg_def(def.stencil.back.pass_op, SG_STENCILOP_KEEP); |
15211 | |
15212 | def.depth.compare = _sg_def(def.depth.compare, SG_COMPAREFUNC_ALWAYS); |
15213 | def.depth.pixel_format = _sg_def(def.depth.pixel_format, _sg.desc.context.depth_format); |
15214 | def.color_count = _sg_def(def.color_count, 1); |
15215 | if (def.color_count > SG_MAX_COLOR_ATTACHMENTS) { |
15216 | def.color_count = SG_MAX_COLOR_ATTACHMENTS; |
15217 | } |
15218 | for (int i = 0; i < def.color_count; i++) { |
15219 | sg_color_state* cs = &def.colors[i]; |
15220 | cs->pixel_format = _sg_def(cs->pixel_format, _sg.desc.context.color_format); |
15221 | cs->write_mask = _sg_def(cs->write_mask, SG_COLORMASK_RGBA); |
15222 | sg_blend_state* bs = &def.colors[i].blend; |
15223 | bs->src_factor_rgb = _sg_def(bs->src_factor_rgb, SG_BLENDFACTOR_ONE); |
15224 | bs->dst_factor_rgb = _sg_def(bs->dst_factor_rgb, SG_BLENDFACTOR_ZERO); |
15225 | bs->op_rgb = _sg_def(bs->op_rgb, SG_BLENDOP_ADD); |
15226 | bs->src_factor_alpha = _sg_def(bs->src_factor_alpha, SG_BLENDFACTOR_ONE); |
15227 | bs->dst_factor_alpha = _sg_def(bs->dst_factor_alpha, SG_BLENDFACTOR_ZERO); |
15228 | bs->op_alpha = _sg_def(bs->op_alpha, SG_BLENDOP_ADD); |
15229 | } |
15230 | |
15231 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
15232 | sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index]; |
15233 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
15234 | break; |
15235 | } |
15236 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
15237 | sg_buffer_layout_desc* b_desc = &def.layout.buffers[a_desc->buffer_index]; |
15238 | b_desc->step_func = _sg_def(b_desc->step_func, SG_VERTEXSTEP_PER_VERTEX); |
15239 | b_desc->step_rate = _sg_def(b_desc->step_rate, 1); |
15240 | } |
15241 | |
15242 | /* resolve vertex layout strides and offsets */ |
15243 | int auto_offset[SG_MAX_SHADERSTAGE_BUFFERS]; |
15244 | _sg_clear(auto_offset, sizeof(auto_offset)); |
15245 | bool use_auto_offset = true; |
15246 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
15247 | /* to use computed offsets, *all* attr offsets must be 0 */ |
15248 | if (def.layout.attrs[attr_index].offset != 0) { |
15249 | use_auto_offset = false; |
15250 | } |
15251 | } |
15252 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
15253 | sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index]; |
15254 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
15255 | break; |
15256 | } |
15257 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
15258 | if (use_auto_offset) { |
15259 | a_desc->offset = auto_offset[a_desc->buffer_index]; |
15260 | } |
15261 | auto_offset[a_desc->buffer_index] += _sg_vertexformat_bytesize(a_desc->format); |
15262 | } |
15263 | /* compute vertex strides if needed */ |
15264 | for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) { |
15265 | sg_buffer_layout_desc* l_desc = &def.layout.buffers[buf_index]; |
15266 | if (l_desc->stride == 0) { |
15267 | l_desc->stride = auto_offset[buf_index]; |
15268 | } |
15269 | } |
15270 | |
15271 | return def; |
15272 | } |
15273 | |
15274 | _SOKOL_PRIVATE sg_pass_desc _sg_pass_desc_defaults(const sg_pass_desc* desc) { |
15275 | /* FIXME: no values to replace in sg_pass_desc? */ |
15276 | sg_pass_desc def = *desc; |
15277 | return def; |
15278 | } |
15279 | |
15280 | /*== allocate/initialize resource private functions ==========================*/ |
15281 | _SOKOL_PRIVATE sg_buffer _sg_alloc_buffer(void) { |
15282 | sg_buffer res; |
15283 | int slot_index = _sg_pool_alloc_index(&_sg.pools.buffer_pool); |
15284 | if (_SG_INVALID_SLOT_INDEX != slot_index) { |
15285 | res.id = _sg_slot_alloc(&_sg.pools.buffer_pool, &_sg.pools.buffers[slot_index].slot, slot_index); |
15286 | } |
15287 | else { |
15288 | /* pool is exhausted */ |
15289 | res.id = SG_INVALID_ID; |
15290 | } |
15291 | return res; |
15292 | } |
15293 | |
15294 | _SOKOL_PRIVATE sg_image _sg_alloc_image(void) { |
15295 | sg_image res; |
15296 | int slot_index = _sg_pool_alloc_index(&_sg.pools.image_pool); |
15297 | if (_SG_INVALID_SLOT_INDEX != slot_index) { |
15298 | res.id = _sg_slot_alloc(&_sg.pools.image_pool, &_sg.pools.images[slot_index].slot, slot_index); |
15299 | } |
15300 | else { |
15301 | /* pool is exhausted */ |
15302 | res.id = SG_INVALID_ID; |
15303 | } |
15304 | return res; |
15305 | } |
15306 | |
15307 | _SOKOL_PRIVATE sg_shader _sg_alloc_shader(void) { |
15308 | sg_shader res; |
15309 | int slot_index = _sg_pool_alloc_index(&_sg.pools.shader_pool); |
15310 | if (_SG_INVALID_SLOT_INDEX != slot_index) { |
15311 | res.id = _sg_slot_alloc(&_sg.pools.shader_pool, &_sg.pools.shaders[slot_index].slot, slot_index); |
15312 | } |
15313 | else { |
15314 | /* pool is exhausted */ |
15315 | res.id = SG_INVALID_ID; |
15316 | } |
15317 | return res; |
15318 | } |
15319 | |
15320 | _SOKOL_PRIVATE sg_pipeline _sg_alloc_pipeline(void) { |
15321 | sg_pipeline res; |
15322 | int slot_index = _sg_pool_alloc_index(&_sg.pools.pipeline_pool); |
15323 | if (_SG_INVALID_SLOT_INDEX != slot_index) { |
15324 | res.id =_sg_slot_alloc(&_sg.pools.pipeline_pool, &_sg.pools.pipelines[slot_index].slot, slot_index); |
15325 | } |
15326 | else { |
15327 | /* pool is exhausted */ |
15328 | res.id = SG_INVALID_ID; |
15329 | } |
15330 | return res; |
15331 | } |
15332 | |
15333 | _SOKOL_PRIVATE sg_pass _sg_alloc_pass(void) { |
15334 | sg_pass res; |
15335 | int slot_index = _sg_pool_alloc_index(&_sg.pools.pass_pool); |
15336 | if (_SG_INVALID_SLOT_INDEX != slot_index) { |
15337 | res.id = _sg_slot_alloc(&_sg.pools.pass_pool, &_sg.pools.passes[slot_index].slot, slot_index); |
15338 | } |
15339 | else { |
15340 | /* pool is exhausted */ |
15341 | res.id = SG_INVALID_ID; |
15342 | } |
15343 | return res; |
15344 | } |
15345 | |
15346 | _SOKOL_PRIVATE void _sg_dealloc_buffer(_sg_buffer_t* buf) { |
15347 | SOKOL_ASSERT(buf && (buf->slot.state == SG_RESOURCESTATE_ALLOC) && (buf->slot.id != SG_INVALID_ID)); |
15348 | _sg_pool_free_index(&_sg.pools.buffer_pool, _sg_slot_index(buf->slot.id)); |
15349 | _sg_reset_slot(&buf->slot); |
15350 | } |
15351 | |
15352 | _SOKOL_PRIVATE void _sg_dealloc_image(_sg_image_t* img) { |
15353 | SOKOL_ASSERT(img && (img->slot.state == SG_RESOURCESTATE_ALLOC) && (img->slot.id != SG_INVALID_ID)); |
15354 | _sg_pool_free_index(&_sg.pools.image_pool, _sg_slot_index(img->slot.id)); |
15355 | _sg_reset_slot(&img->slot); |
15356 | } |
15357 | |
15358 | _SOKOL_PRIVATE void _sg_dealloc_shader(_sg_shader_t* shd) { |
15359 | SOKOL_ASSERT(shd && (shd->slot.state == SG_RESOURCESTATE_ALLOC) && (shd->slot.id != SG_INVALID_ID)); |
15360 | _sg_pool_free_index(&_sg.pools.shader_pool, _sg_slot_index(shd->slot.id)); |
15361 | _sg_reset_slot(&shd->slot); |
15362 | } |
15363 | |
15364 | _SOKOL_PRIVATE void _sg_dealloc_pipeline(_sg_pipeline_t* pip) { |
15365 | SOKOL_ASSERT(pip && (pip->slot.state == SG_RESOURCESTATE_ALLOC) && (pip->slot.id != SG_INVALID_ID)); |
15366 | _sg_pool_free_index(&_sg.pools.pipeline_pool, _sg_slot_index(pip->slot.id)); |
15367 | _sg_reset_slot(&pip->slot); |
15368 | } |
15369 | |
15370 | _SOKOL_PRIVATE void _sg_dealloc_pass(_sg_pass_t* pass) { |
15371 | SOKOL_ASSERT(pass && (pass->slot.state == SG_RESOURCESTATE_ALLOC) && (pass->slot.id != SG_INVALID_ID)); |
15372 | _sg_pool_free_index(&_sg.pools.pass_pool, _sg_slot_index(pass->slot.id)); |
15373 | _sg_reset_slot(&pass->slot); |
15374 | } |
15375 | |
15376 | _SOKOL_PRIVATE void _sg_init_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { |
15377 | SOKOL_ASSERT(buf && (buf->slot.state == SG_RESOURCESTATE_ALLOC)); |
15378 | SOKOL_ASSERT(desc); |
15379 | buf->slot.ctx_id = _sg.active_context.id; |
15380 | if (_sg_validate_buffer_desc(desc)) { |
15381 | buf->slot.state = _sg_create_buffer(buf, desc); |
15382 | } |
15383 | else { |
15384 | buf->slot.state = SG_RESOURCESTATE_FAILED; |
15385 | } |
15386 | SOKOL_ASSERT((buf->slot.state == SG_RESOURCESTATE_VALID)||(buf->slot.state == SG_RESOURCESTATE_FAILED)); |
15387 | } |
15388 | |
15389 | _SOKOL_PRIVATE void _sg_init_image(_sg_image_t* img, const sg_image_desc* desc) { |
15390 | SOKOL_ASSERT(img && (img->slot.state == SG_RESOURCESTATE_ALLOC)); |
15391 | SOKOL_ASSERT(desc); |
15392 | img->slot.ctx_id = _sg.active_context.id; |
15393 | if (_sg_validate_image_desc(desc)) { |
15394 | img->slot.state = _sg_create_image(img, desc); |
15395 | } |
15396 | else { |
15397 | img->slot.state = SG_RESOURCESTATE_FAILED; |
15398 | } |
15399 | SOKOL_ASSERT((img->slot.state == SG_RESOURCESTATE_VALID)||(img->slot.state == SG_RESOURCESTATE_FAILED)); |
15400 | } |
15401 | |
15402 | _SOKOL_PRIVATE void _sg_init_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { |
15403 | SOKOL_ASSERT(shd && (shd->slot.state == SG_RESOURCESTATE_ALLOC)); |
15404 | SOKOL_ASSERT(desc); |
15405 | shd->slot.ctx_id = _sg.active_context.id; |
15406 | if (_sg_validate_shader_desc(desc)) { |
15407 | shd->slot.state = _sg_create_shader(shd, desc); |
15408 | } |
15409 | else { |
15410 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
15411 | } |
15412 | SOKOL_ASSERT((shd->slot.state == SG_RESOURCESTATE_VALID)||(shd->slot.state == SG_RESOURCESTATE_FAILED)); |
15413 | } |
15414 | |
15415 | _SOKOL_PRIVATE void _sg_init_pipeline(_sg_pipeline_t* pip, const sg_pipeline_desc* desc) { |
15416 | SOKOL_ASSERT(pip && (pip->slot.state == SG_RESOURCESTATE_ALLOC)); |
15417 | SOKOL_ASSERT(desc); |
15418 | pip->slot.ctx_id = _sg.active_context.id; |
15419 | if (_sg_validate_pipeline_desc(desc)) { |
15420 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id); |
15421 | if (shd && (shd->slot.state == SG_RESOURCESTATE_VALID)) { |
15422 | pip->slot.state = _sg_create_pipeline(pip, shd, desc); |
15423 | } |
15424 | else { |
15425 | pip->slot.state = SG_RESOURCESTATE_FAILED; |
15426 | } |
15427 | } |
15428 | else { |
15429 | pip->slot.state = SG_RESOURCESTATE_FAILED; |
15430 | } |
15431 | SOKOL_ASSERT((pip->slot.state == SG_RESOURCESTATE_VALID)||(pip->slot.state == SG_RESOURCESTATE_FAILED)); |
15432 | } |
15433 | |
15434 | _SOKOL_PRIVATE void _sg_init_pass(_sg_pass_t* pass, const sg_pass_desc* desc) { |
15435 | SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC); |
15436 | SOKOL_ASSERT(desc); |
15437 | pass->slot.ctx_id = _sg.active_context.id; |
15438 | if (_sg_validate_pass_desc(desc)) { |
15439 | /* lookup pass attachment image pointers */ |
15440 | _sg_image_t* att_imgs[SG_MAX_COLOR_ATTACHMENTS + 1]; |
15441 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
15442 | if (desc->color_attachments[i].image.id) { |
15443 | att_imgs[i] = _sg_lookup_image(&_sg.pools, desc->color_attachments[i].image.id); |
15444 | if (!(att_imgs[i] && att_imgs[i]->slot.state == SG_RESOURCESTATE_VALID)) { |
15445 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
15446 | return; |
15447 | } |
15448 | } |
15449 | else { |
15450 | att_imgs[i] = 0; |
15451 | } |
15452 | } |
15453 | const int ds_att_index = SG_MAX_COLOR_ATTACHMENTS; |
15454 | if (desc->depth_stencil_attachment.image.id) { |
15455 | att_imgs[ds_att_index] = _sg_lookup_image(&_sg.pools, desc->depth_stencil_attachment.image.id); |
15456 | if (!(att_imgs[ds_att_index] && att_imgs[ds_att_index]->slot.state == SG_RESOURCESTATE_VALID)) { |
15457 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
15458 | return; |
15459 | } |
15460 | } |
15461 | else { |
15462 | att_imgs[ds_att_index] = 0; |
15463 | } |
15464 | pass->slot.state = _sg_create_pass(pass, att_imgs, desc); |
15465 | } |
15466 | else { |
15467 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
15468 | } |
15469 | SOKOL_ASSERT((pass->slot.state == SG_RESOURCESTATE_VALID)||(pass->slot.state == SG_RESOURCESTATE_FAILED)); |
15470 | } |
15471 | |
15472 | _SOKOL_PRIVATE void _sg_uninit_buffer(_sg_buffer_t* buf) { |
15473 | SOKOL_ASSERT(buf && ((buf->slot.state == SG_RESOURCESTATE_VALID) || (buf->slot.state == SG_RESOURCESTATE_FAILED))); |
15474 | if (buf->slot.ctx_id == _sg.active_context.id) { |
15475 | _sg_discard_buffer(buf); |
15476 | _sg_reset_buffer_to_alloc_state(buf); |
15477 | } |
15478 | else { |
15479 | SG_LOG("_sg_uninit_buffer: active context mismatch (must be same as for creation)"); |
15480 | _SG_TRACE_NOARGS(err_context_mismatch); |
15481 | } |
15482 | } |
15483 | |
15484 | _SOKOL_PRIVATE void _sg_uninit_image(_sg_image_t* img) { |
15485 | SOKOL_ASSERT(img && ((img->slot.state == SG_RESOURCESTATE_VALID) || (img->slot.state == SG_RESOURCESTATE_FAILED))); |
15486 | if (img->slot.ctx_id == _sg.active_context.id) { |
15487 | _sg_discard_image(img); |
15488 | _sg_reset_image_to_alloc_state(img); |
15489 | } |
15490 | else { |
15491 | SG_LOG("_sg_uninit_image: active context mismatch (must be same as for creation)"); |
15492 | _SG_TRACE_NOARGS(err_context_mismatch); |
15493 | } |
15494 | } |
15495 | |
15496 | _SOKOL_PRIVATE void _sg_uninit_shader(_sg_shader_t* shd) { |
15497 | SOKOL_ASSERT(shd && ((shd->slot.state == SG_RESOURCESTATE_VALID) || (shd->slot.state == SG_RESOURCESTATE_FAILED))); |
15498 | if (shd->slot.ctx_id == _sg.active_context.id) { |
15499 | _sg_discard_shader(shd); |
15500 | _sg_reset_shader_to_alloc_state(shd); |
15501 | } |
15502 | else { |
15503 | SG_LOG("_sg_uninit_shader: active context mismatch (must be same as for creation)"); |
15504 | _SG_TRACE_NOARGS(err_context_mismatch); |
15505 | } |
15506 | } |
15507 | |
15508 | _SOKOL_PRIVATE void _sg_uninit_pipeline(_sg_pipeline_t* pip) { |
15509 | SOKOL_ASSERT(pip && ((pip->slot.state == SG_RESOURCESTATE_VALID) || (pip->slot.state == SG_RESOURCESTATE_FAILED))); |
15510 | if (pip->slot.ctx_id == _sg.active_context.id) { |
15511 | _sg_discard_pipeline(pip); |
15512 | _sg_reset_pipeline_to_alloc_state(pip); |
15513 | } |
15514 | else { |
15515 | SG_LOG("_sg_uninit_pipeline: active context mismatch (must be same as for creation)"); |
15516 | _SG_TRACE_NOARGS(err_context_mismatch); |
15517 | } |
15518 | } |
15519 | |
15520 | _SOKOL_PRIVATE void _sg_uninit_pass(_sg_pass_t* pass) { |
15521 | SOKOL_ASSERT(pass && ((pass->slot.state == SG_RESOURCESTATE_VALID) || (pass->slot.state == SG_RESOURCESTATE_FAILED))); |
15522 | if (pass->slot.ctx_id == _sg.active_context.id) { |
15523 | _sg_discard_pass(pass); |
15524 | _sg_reset_pass_to_alloc_state(pass); |
15525 | } |
15526 | else { |
15527 | SG_LOG("_sg_uninit_pass: active context mismatch (must be same as for creation)"); |
15528 | _SG_TRACE_NOARGS(err_context_mismatch); |
15529 | } |
15530 | } |
15531 | |
15532 | _SOKOL_PRIVATE void _sg_setup_commit_listeners(const sg_desc* desc) { |
15533 | SOKOL_ASSERT(desc->max_commit_listeners > 0); |
15534 | SOKOL_ASSERT(0 == _sg.commit_listeners.items); |
15535 | SOKOL_ASSERT(0 == _sg.commit_listeners.num); |
15536 | SOKOL_ASSERT(0 == _sg.commit_listeners.upper); |
15537 | _sg.commit_listeners.num = desc->max_commit_listeners; |
15538 | const size_t size = (size_t)_sg.commit_listeners.num * sizeof(sg_commit_listener); |
15539 | _sg.commit_listeners.items = (sg_commit_listener*)_sg_malloc_clear(size); |
15540 | } |
15541 | |
15542 | _SOKOL_PRIVATE void _sg_discard_commit_listeners(void) { |
15543 | SOKOL_ASSERT(0 != _sg.commit_listeners.items); |
15544 | _sg_free(_sg.commit_listeners.items); |
15545 | _sg.commit_listeners.items = 0; |
15546 | } |
15547 | |
15548 | _SOKOL_PRIVATE void _sg_notify_commit_listeners(void) { |
15549 | SOKOL_ASSERT(_sg.commit_listeners.items); |
15550 | for (int i = 0; i < _sg.commit_listeners.upper; i++) { |
15551 | const sg_commit_listener* listener = &_sg.commit_listeners.items[i]; |
15552 | if (listener->func) { |
15553 | listener->func(listener->user_data); |
15554 | } |
15555 | } |
15556 | } |
15557 | |
15558 | _SOKOL_PRIVATE bool _sg_add_commit_listener(const sg_commit_listener* new_listener) { |
15559 | SOKOL_ASSERT(new_listener && new_listener->func); |
15560 | SOKOL_ASSERT(_sg.commit_listeners.items); |
15561 | // first check if the listener hadn't been added already |
15562 | for (int i = 0; i < _sg.commit_listeners.upper; i++) { |
15563 | const sg_commit_listener* slot = &_sg.commit_listeners.items[i]; |
15564 | if ((slot->func == new_listener->func) && (slot->user_data == new_listener->user_data)) { |
15565 | SG_LOG("attempting to add identical commit listener\n"); |
15566 | return false; |
15567 | } |
15568 | } |
15569 | // first try to plug a hole |
15570 | sg_commit_listener* slot = 0; |
15571 | for (int i = 0; i < _sg.commit_listeners.upper; i++) { |
15572 | if (_sg.commit_listeners.items[i].func == 0) { |
15573 | slot = &_sg.commit_listeners.items[i]; |
15574 | break; |
15575 | } |
15576 | } |
15577 | if (!slot) { |
15578 | // append to end |
15579 | if (_sg.commit_listeners.upper < _sg.commit_listeners.num) { |
15580 | slot = &_sg.commit_listeners.items[_sg.commit_listeners.upper++]; |
15581 | } |
15582 | } |
15583 | if (!slot) { |
15584 | SG_LOG("commit listener array full\n"); |
15585 | return false; |
15586 | } |
15587 | *slot = *new_listener; |
15588 | return true; |
15589 | } |
15590 | |
15591 | _SOKOL_PRIVATE bool _sg_remove_commit_listener(const sg_commit_listener* listener) { |
15592 | SOKOL_ASSERT(listener && listener->func); |
15593 | SOKOL_ASSERT(_sg.commit_listeners.items); |
15594 | for (int i = 0; i < _sg.commit_listeners.upper; i++) { |
15595 | sg_commit_listener* slot = &_sg.commit_listeners.items[i]; |
15596 | // both the function pointer and user data must match! |
15597 | if ((slot->func == listener->func) && (slot->user_data == listener->user_data)) { |
15598 | slot->func = 0; |
15599 | slot->user_data = 0; |
15600 | // NOTE: since _sg_add_commit_listener() already catches duplicates, |
15601 | // we don't need to worry about them here |
15602 | return true; |
15603 | } |
15604 | } |
15605 | return false; |
15606 | } |
15607 | |
15608 | _SOKOL_PRIVATE sg_desc _sg_desc_defaults(const sg_desc* desc) { |
15609 | /* |
15610 | NOTE: on WebGPU, the default color pixel format MUST be provided, |
15611 | cannot be a default compile-time constant. |
15612 | */ |
15613 | sg_desc res = *desc; |
15614 | #if defined(SOKOL_WGPU) |
15615 | SOKOL_ASSERT(SG_PIXELFORMAT_NONE != res.context.color_format); |
15616 | #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11) |
15617 | res.context.color_format = _sg_def(res.context.color_format, SG_PIXELFORMAT_BGRA8); |
15618 | #else |
15619 | res.context.color_format = _sg_def(res.context.color_format, SG_PIXELFORMAT_RGBA8); |
15620 | #endif |
15621 | res.context.depth_format = _sg_def(res.context.depth_format, SG_PIXELFORMAT_DEPTH_STENCIL); |
15622 | res.context.sample_count = _sg_def(res.context.sample_count, 1); |
15623 | res.buffer_pool_size = _sg_def(res.buffer_pool_size, _SG_DEFAULT_BUFFER_POOL_SIZE); |
15624 | res.image_pool_size = _sg_def(res.image_pool_size, _SG_DEFAULT_IMAGE_POOL_SIZE); |
15625 | res.shader_pool_size = _sg_def(res.shader_pool_size, _SG_DEFAULT_SHADER_POOL_SIZE); |
15626 | res.pipeline_pool_size = _sg_def(res.pipeline_pool_size, _SG_DEFAULT_PIPELINE_POOL_SIZE); |
15627 | res.pass_pool_size = _sg_def(res.pass_pool_size, _SG_DEFAULT_PASS_POOL_SIZE); |
15628 | res.context_pool_size = _sg_def(res.context_pool_size, _SG_DEFAULT_CONTEXT_POOL_SIZE); |
15629 | res.uniform_buffer_size = _sg_def(res.uniform_buffer_size, _SG_DEFAULT_UB_SIZE); |
15630 | res.staging_buffer_size = _sg_def(res.staging_buffer_size, _SG_DEFAULT_STAGING_SIZE); |
15631 | res.sampler_cache_size = _sg_def(res.sampler_cache_size, _SG_DEFAULT_SAMPLER_CACHE_CAPACITY); |
15632 | res.max_commit_listeners = _sg_def(res.max_commit_listeners, _SG_DEFAULT_MAX_COMMIT_LISTENERS); |
15633 | return res; |
15634 | } |
15635 | |
15636 | /*== PUBLIC API FUNCTIONS ====================================================*/ |
15637 | |
15638 | SOKOL_API_IMPL void sg_setup(const sg_desc* desc) { |
15639 | SOKOL_ASSERT(desc); |
15640 | SOKOL_ASSERT((desc->_start_canary == 0) && (desc->_end_canary == 0)); |
15641 | SOKOL_ASSERT((desc->allocator.alloc && desc->allocator.free) || (!desc->allocator.alloc && !desc->allocator.free)); |
15642 | _SG_CLEAR_ARC_STRUCT(_sg_state_t, _sg); |
15643 | _sg.desc = _sg_desc_defaults(desc); |
15644 | _sg_setup_pools(&_sg.pools, &_sg.desc); |
15645 | _sg_setup_commit_listeners(&_sg.desc); |
15646 | _sg.frame_index = 1; |
15647 | _sg_setup_backend(&_sg.desc); |
15648 | _sg.valid = true; |
15649 | sg_setup_context(); |
15650 | } |
15651 | |
15652 | SOKOL_API_IMPL void sg_shutdown(void) { |
15653 | /* can only delete resources for the currently set context here, if multiple |
15654 | contexts are used, the app code must take care of properly releasing them |
15655 | (since only the app code can switch between 3D-API contexts) |
15656 | */ |
15657 | if (_sg.active_context.id != SG_INVALID_ID) { |
15658 | _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, _sg.active_context.id); |
15659 | if (ctx) { |
15660 | _sg_discard_all_resources(&_sg.pools, _sg.active_context.id); |
15661 | _sg_discard_context(ctx); |
15662 | } |
15663 | } |
15664 | _sg_discard_backend(); |
15665 | _sg_discard_commit_listeners(); |
15666 | _sg_discard_pools(&_sg.pools); |
15667 | _SG_CLEAR_ARC_STRUCT(_sg_state_t, _sg); |
15668 | } |
15669 | |
15670 | SOKOL_API_IMPL bool sg_isvalid(void) { |
15671 | return _sg.valid; |
15672 | } |
15673 | |
15674 | SOKOL_API_IMPL sg_desc sg_query_desc(void) { |
15675 | SOKOL_ASSERT(_sg.valid); |
15676 | return _sg.desc; |
15677 | } |
15678 | |
15679 | SOKOL_API_IMPL sg_backend sg_query_backend(void) { |
15680 | SOKOL_ASSERT(_sg.valid); |
15681 | return _sg.backend; |
15682 | } |
15683 | |
15684 | SOKOL_API_IMPL sg_features sg_query_features(void) { |
15685 | SOKOL_ASSERT(_sg.valid); |
15686 | return _sg.features; |
15687 | } |
15688 | |
15689 | SOKOL_API_IMPL sg_limits sg_query_limits(void) { |
15690 | SOKOL_ASSERT(_sg.valid); |
15691 | return _sg.limits; |
15692 | } |
15693 | |
15694 | SOKOL_API_IMPL sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt) { |
15695 | SOKOL_ASSERT(_sg.valid); |
15696 | int fmt_index = (int) fmt; |
15697 | SOKOL_ASSERT((fmt_index > SG_PIXELFORMAT_NONE) && (fmt_index < _SG_PIXELFORMAT_NUM)); |
15698 | return _sg.formats[fmt_index]; |
15699 | } |
15700 | |
15701 | SOKOL_API_IMPL sg_context sg_setup_context(void) { |
15702 | SOKOL_ASSERT(_sg.valid); |
15703 | sg_context res; |
15704 | int slot_index = _sg_pool_alloc_index(&_sg.pools.context_pool); |
15705 | if (_SG_INVALID_SLOT_INDEX != slot_index) { |
15706 | res.id = _sg_slot_alloc(&_sg.pools.context_pool, &_sg.pools.contexts[slot_index].slot, slot_index); |
15707 | _sg_context_t* ctx = _sg_context_at(&_sg.pools, res.id); |
15708 | ctx->slot.state = _sg_create_context(ctx); |
15709 | SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_VALID); |
15710 | _sg_activate_context(ctx); |
15711 | } |
15712 | else { |
15713 | /* pool is exhausted */ |
15714 | res.id = SG_INVALID_ID; |
15715 | } |
15716 | _sg.active_context = res; |
15717 | return res; |
15718 | } |
15719 | |
15720 | SOKOL_API_IMPL void sg_discard_context(sg_context ctx_id) { |
15721 | SOKOL_ASSERT(_sg.valid); |
15722 | _sg_discard_all_resources(&_sg.pools, ctx_id.id); |
15723 | _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id); |
15724 | if (ctx) { |
15725 | _sg_discard_context(ctx); |
15726 | _sg_reset_context_to_alloc_state(ctx); |
15727 | _sg_reset_slot(&ctx->slot); |
15728 | _sg_pool_free_index(&_sg.pools.context_pool, _sg_slot_index(ctx_id.id)); |
15729 | } |
15730 | _sg.active_context.id = SG_INVALID_ID; |
15731 | _sg_activate_context(0); |
15732 | } |
15733 | |
15734 | SOKOL_API_IMPL void sg_activate_context(sg_context ctx_id) { |
15735 | SOKOL_ASSERT(_sg.valid); |
15736 | _sg.active_context = ctx_id; |
15737 | _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id); |
15738 | /* NOTE: ctx can be 0 here if the context is no longer valid */ |
15739 | _sg_activate_context(ctx); |
15740 | } |
15741 | |
15742 | SOKOL_API_IMPL sg_trace_hooks sg_install_trace_hooks(const sg_trace_hooks* trace_hooks) { |
15743 | SOKOL_ASSERT(_sg.valid); |
15744 | SOKOL_ASSERT(trace_hooks); |
15745 | _SOKOL_UNUSED(trace_hooks); |
15746 | #if defined(SOKOL_TRACE_HOOKS) |
15747 | sg_trace_hooks old_hooks = _sg.hooks; |
15748 | _sg.hooks = *trace_hooks; |
15749 | #else |
15750 | static sg_trace_hooks old_hooks; |
15751 | SG_LOG("sg_install_trace_hooks() called, but SG_TRACE_HOOKS is not defined!"); |
15752 | #endif |
15753 | return old_hooks; |
15754 | } |
15755 | |
15756 | SOKOL_API_IMPL sg_buffer sg_alloc_buffer(void) { |
15757 | SOKOL_ASSERT(_sg.valid); |
15758 | sg_buffer res = _sg_alloc_buffer(); |
15759 | _SG_TRACE_ARGS(alloc_buffer, res); |
15760 | return res; |
15761 | } |
15762 | |
15763 | SOKOL_API_IMPL sg_image sg_alloc_image(void) { |
15764 | SOKOL_ASSERT(_sg.valid); |
15765 | sg_image res = _sg_alloc_image(); |
15766 | _SG_TRACE_ARGS(alloc_image, res); |
15767 | return res; |
15768 | } |
15769 | |
15770 | SOKOL_API_IMPL sg_shader sg_alloc_shader(void) { |
15771 | SOKOL_ASSERT(_sg.valid); |
15772 | sg_shader res = _sg_alloc_shader(); |
15773 | _SG_TRACE_ARGS(alloc_shader, res); |
15774 | return res; |
15775 | } |
15776 | |
15777 | SOKOL_API_IMPL sg_pipeline sg_alloc_pipeline(void) { |
15778 | SOKOL_ASSERT(_sg.valid); |
15779 | sg_pipeline res = _sg_alloc_pipeline(); |
15780 | _SG_TRACE_ARGS(alloc_pipeline, res); |
15781 | return res; |
15782 | } |
15783 | |
15784 | SOKOL_API_IMPL sg_pass sg_alloc_pass(void) { |
15785 | SOKOL_ASSERT(_sg.valid); |
15786 | sg_pass res = _sg_alloc_pass(); |
15787 | _SG_TRACE_ARGS(alloc_pass, res); |
15788 | return res; |
15789 | } |
15790 | |
15791 | SOKOL_API_IMPL void sg_dealloc_buffer(sg_buffer buf_id) { |
15792 | SOKOL_ASSERT(_sg.valid); |
15793 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
15794 | if (buf) { |
15795 | if (buf->slot.state == SG_RESOURCESTATE_ALLOC) { |
15796 | _sg_dealloc_buffer(buf); |
15797 | } |
15798 | else { |
15799 | SG_LOG("sg_dealloc_buffer: buffer must be in ALLOC state\n"); |
15800 | } |
15801 | } |
15802 | _SG_TRACE_ARGS(dealloc_buffer, buf_id); |
15803 | } |
15804 | |
15805 | SOKOL_API_IMPL void sg_dealloc_image(sg_image img_id) { |
15806 | SOKOL_ASSERT(_sg.valid); |
15807 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
15808 | if (img) { |
15809 | if (img->slot.state == SG_RESOURCESTATE_ALLOC) { |
15810 | _sg_dealloc_image(img); |
15811 | } |
15812 | else { |
15813 | SG_LOG("sg_dealloc_image: image must be in ALLOC state\n"); |
15814 | } |
15815 | } |
15816 | _SG_TRACE_ARGS(dealloc_image, img_id); |
15817 | } |
15818 | |
15819 | SOKOL_API_IMPL void sg_dealloc_shader(sg_shader shd_id) { |
15820 | SOKOL_ASSERT(_sg.valid); |
15821 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
15822 | if (shd) { |
15823 | if (shd->slot.state == SG_RESOURCESTATE_ALLOC) { |
15824 | _sg_dealloc_shader(shd); |
15825 | } |
15826 | else { |
15827 | SG_LOG("sg_dealloc_shader: shader must be in ALLOC state\n"); |
15828 | } |
15829 | } |
15830 | _SG_TRACE_ARGS(dealloc_shader, shd_id); |
15831 | } |
15832 | |
15833 | SOKOL_API_IMPL void sg_dealloc_pipeline(sg_pipeline pip_id) { |
15834 | SOKOL_ASSERT(_sg.valid); |
15835 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
15836 | if (pip) { |
15837 | if (pip->slot.state == SG_RESOURCESTATE_ALLOC) { |
15838 | _sg_dealloc_pipeline(pip); |
15839 | } |
15840 | else { |
15841 | SG_LOG("sg_dealloc_pipeline: pipeline must be in ALLOC state\n"); |
15842 | } |
15843 | } |
15844 | _SG_TRACE_ARGS(dealloc_pipeline, pip_id); |
15845 | } |
15846 | |
15847 | SOKOL_API_IMPL void sg_dealloc_pass(sg_pass pass_id) { |
15848 | SOKOL_ASSERT(_sg.valid); |
15849 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
15850 | if (pass) { |
15851 | if (pass->slot.state == SG_RESOURCESTATE_ALLOC) { |
15852 | _sg_dealloc_pass(pass); |
15853 | } |
15854 | else { |
15855 | SG_LOG("sg_dealloc_pass: pass must be in ALLOC state\n"); |
15856 | } |
15857 | } |
15858 | _SG_TRACE_ARGS(dealloc_pass, pass_id); |
15859 | } |
15860 | |
15861 | SOKOL_API_IMPL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc) { |
15862 | SOKOL_ASSERT(_sg.valid); |
15863 | sg_buffer_desc desc_def = _sg_buffer_desc_defaults(desc); |
15864 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
15865 | if (buf) { |
15866 | if (buf->slot.state == SG_RESOURCESTATE_ALLOC) { |
15867 | _sg_init_buffer(buf, &desc_def); |
15868 | SOKOL_ASSERT((buf->slot.state == SG_RESOURCESTATE_VALID) || (buf->slot.state == SG_RESOURCESTATE_FAILED)); |
15869 | } |
15870 | else { |
15871 | SG_LOG("sg_init_buffer: buffer must be in alloc state\n"); |
15872 | } |
15873 | } |
15874 | _SG_TRACE_ARGS(init_buffer, buf_id, &desc_def); |
15875 | } |
15876 | |
15877 | SOKOL_API_IMPL void sg_init_image(sg_image img_id, const sg_image_desc* desc) { |
15878 | SOKOL_ASSERT(_sg.valid); |
15879 | sg_image_desc desc_def = _sg_image_desc_defaults(desc); |
15880 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
15881 | if (img) { |
15882 | if (img->slot.state == SG_RESOURCESTATE_ALLOC) { |
15883 | _sg_init_image(img, &desc_def); |
15884 | SOKOL_ASSERT((img->slot.state == SG_RESOURCESTATE_VALID) || (img->slot.state == SG_RESOURCESTATE_FAILED)); |
15885 | } |
15886 | else { |
15887 | SG_LOG("sg_init_image: image must be in alloc state\n"); |
15888 | } |
15889 | } |
15890 | _SG_TRACE_ARGS(init_image, img_id, &desc_def); |
15891 | } |
15892 | |
15893 | SOKOL_API_IMPL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc) { |
15894 | SOKOL_ASSERT(_sg.valid); |
15895 | sg_shader_desc desc_def = _sg_shader_desc_defaults(desc); |
15896 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
15897 | if (shd) { |
15898 | if (shd->slot.state == SG_RESOURCESTATE_ALLOC) { |
15899 | _sg_init_shader(shd, &desc_def); |
15900 | SOKOL_ASSERT((shd->slot.state == SG_RESOURCESTATE_VALID) || (shd->slot.state == SG_RESOURCESTATE_FAILED)); |
15901 | } |
15902 | else { |
15903 | SG_LOG("sg_init_shader: shader must be in alloc state\n"); |
15904 | } |
15905 | } |
15906 | _SG_TRACE_ARGS(init_shader, shd_id, &desc_def); |
15907 | } |
15908 | |
15909 | SOKOL_API_IMPL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc) { |
15910 | SOKOL_ASSERT(_sg.valid); |
15911 | sg_pipeline_desc desc_def = _sg_pipeline_desc_defaults(desc); |
15912 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
15913 | if (pip) { |
15914 | if (pip->slot.state == SG_RESOURCESTATE_ALLOC) { |
15915 | _sg_init_pipeline(pip, &desc_def); |
15916 | SOKOL_ASSERT((pip->slot.state == SG_RESOURCESTATE_VALID) || (pip->slot.state == SG_RESOURCESTATE_FAILED)); |
15917 | } |
15918 | else { |
15919 | SG_LOG("sg_init_pipeline: pipeline must be in alloc state\n"); |
15920 | } |
15921 | } |
15922 | _SG_TRACE_ARGS(init_pipeline, pip_id, &desc_def); |
15923 | } |
15924 | |
15925 | SOKOL_API_IMPL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc) { |
15926 | SOKOL_ASSERT(_sg.valid); |
15927 | sg_pass_desc desc_def = _sg_pass_desc_defaults(desc); |
15928 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
15929 | if (pass) { |
15930 | if (pass->slot.state == SG_RESOURCESTATE_ALLOC) { |
15931 | _sg_init_pass(pass, &desc_def); |
15932 | SOKOL_ASSERT((pass->slot.state == SG_RESOURCESTATE_VALID) || (pass->slot.state == SG_RESOURCESTATE_FAILED)); |
15933 | } |
15934 | else { |
15935 | SG_LOG("sg_init_pass: pass must be in alloc state\n"); |
15936 | } |
15937 | } |
15938 | _SG_TRACE_ARGS(init_pass, pass_id, &desc_def); |
15939 | } |
15940 | |
15941 | SOKOL_API_IMPL void sg_uninit_buffer(sg_buffer buf_id) { |
15942 | SOKOL_ASSERT(_sg.valid); |
15943 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
15944 | if (buf) { |
15945 | if ((buf->slot.state == SG_RESOURCESTATE_VALID) || (buf->slot.state == SG_RESOURCESTATE_FAILED)) { |
15946 | _sg_uninit_buffer(buf); |
15947 | SOKOL_ASSERT(buf->slot.state == SG_RESOURCESTATE_ALLOC); |
15948 | } |
15949 | else { |
15950 | SG_LOG("sg_uninit_buffer: buffer must be in VALID or FAILED state\n"); |
15951 | } |
15952 | } |
15953 | _SG_TRACE_ARGS(uninit_buffer, buf_id); |
15954 | } |
15955 | |
15956 | SOKOL_API_IMPL void sg_uninit_image(sg_image img_id) { |
15957 | SOKOL_ASSERT(_sg.valid); |
15958 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
15959 | if (img) { |
15960 | if ((img->slot.state == SG_RESOURCESTATE_VALID) || (img->slot.state == SG_RESOURCESTATE_FAILED)) { |
15961 | _sg_uninit_image(img); |
15962 | SOKOL_ASSERT(img->slot.state == SG_RESOURCESTATE_ALLOC); |
15963 | } |
15964 | else { |
15965 | SG_LOG("sg_uninit_image: image must be in VALID or FAILED state\n"); |
15966 | } |
15967 | } |
15968 | _SG_TRACE_ARGS(uninit_image, img_id); |
15969 | } |
15970 | |
15971 | SOKOL_API_IMPL void sg_uninit_shader(sg_shader shd_id) { |
15972 | SOKOL_ASSERT(_sg.valid); |
15973 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
15974 | if (shd) { |
15975 | if ((shd->slot.state == SG_RESOURCESTATE_VALID) || (shd->slot.state == SG_RESOURCESTATE_FAILED)) { |
15976 | _sg_uninit_shader(shd); |
15977 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_ALLOC); |
15978 | } |
15979 | else { |
15980 | SG_LOG("sg_uninit_shader: shader must be in VALID or FAILED state\n"); |
15981 | } |
15982 | } |
15983 | _SG_TRACE_ARGS(uninit_shader, shd_id); |
15984 | } |
15985 | |
15986 | SOKOL_API_IMPL void sg_uninit_pipeline(sg_pipeline pip_id) { |
15987 | SOKOL_ASSERT(_sg.valid); |
15988 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
15989 | if (pip) { |
15990 | if ((pip->slot.state == SG_RESOURCESTATE_VALID) || (pip->slot.state == SG_RESOURCESTATE_FAILED)) { |
15991 | _sg_uninit_pipeline(pip); |
15992 | SOKOL_ASSERT(pip->slot.state == SG_RESOURCESTATE_ALLOC); |
15993 | } |
15994 | else { |
15995 | SG_LOG("sg_uninit_pipeline: pipeline must be in VALID or FAILED state\n"); |
15996 | } |
15997 | } |
15998 | _SG_TRACE_ARGS(uninit_pipeline, pip_id); |
15999 | } |
16000 | |
16001 | SOKOL_API_IMPL void sg_uninit_pass(sg_pass pass_id) { |
16002 | SOKOL_ASSERT(_sg.valid); |
16003 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
16004 | if (pass) { |
16005 | if ((pass->slot.state == SG_RESOURCESTATE_VALID) || (pass->slot.state == SG_RESOURCESTATE_FAILED)) { |
16006 | _sg_uninit_pass(pass); |
16007 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_ALLOC); |
16008 | } |
16009 | else { |
16010 | SG_LOG("sg_uninit_pass: pass must be in VALID or FAILED state\n"); |
16011 | } |
16012 | } |
16013 | _SG_TRACE_ARGS(uninit_pass, pass_id); |
16014 | } |
16015 | |
16016 | /*-- set allocated resource to failed state ----------------------------------*/ |
16017 | SOKOL_API_IMPL void sg_fail_buffer(sg_buffer buf_id) { |
16018 | SOKOL_ASSERT(_sg.valid); |
16019 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16020 | if (buf) { |
16021 | if (buf->slot.state == SG_RESOURCESTATE_ALLOC) { |
16022 | buf->slot.ctx_id = _sg.active_context.id; |
16023 | buf->slot.state = SG_RESOURCESTATE_FAILED; |
16024 | } |
16025 | else { |
16026 | SG_LOG("sg_fail_buffer: buffer must be in ALLOC state\n"); |
16027 | } |
16028 | } |
16029 | _SG_TRACE_ARGS(fail_buffer, buf_id); |
16030 | } |
16031 | |
16032 | SOKOL_API_IMPL void sg_fail_image(sg_image img_id) { |
16033 | SOKOL_ASSERT(_sg.valid); |
16034 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
16035 | if (img) { |
16036 | if (img->slot.state == SG_RESOURCESTATE_ALLOC) { |
16037 | img->slot.ctx_id = _sg.active_context.id; |
16038 | img->slot.state = SG_RESOURCESTATE_FAILED; |
16039 | } |
16040 | else { |
16041 | SG_LOG("sg_fail_image: image must be in ALLOC state\n"); |
16042 | } |
16043 | } |
16044 | _SG_TRACE_ARGS(fail_image, img_id); |
16045 | } |
16046 | |
16047 | SOKOL_API_IMPL void sg_fail_shader(sg_shader shd_id) { |
16048 | SOKOL_ASSERT(_sg.valid); |
16049 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
16050 | if (shd) { |
16051 | if (shd->slot.state == SG_RESOURCESTATE_ALLOC) { |
16052 | shd->slot.ctx_id = _sg.active_context.id; |
16053 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
16054 | } |
16055 | else { |
16056 | SG_LOG("sg_fail_shader: shader must be in ALLOC state\n"); |
16057 | } |
16058 | } |
16059 | _SG_TRACE_ARGS(fail_shader, shd_id); |
16060 | } |
16061 | |
16062 | SOKOL_API_IMPL void sg_fail_pipeline(sg_pipeline pip_id) { |
16063 | SOKOL_ASSERT(_sg.valid); |
16064 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
16065 | if (pip) { |
16066 | if (pip->slot.state == SG_RESOURCESTATE_ALLOC) { |
16067 | pip->slot.ctx_id = _sg.active_context.id; |
16068 | pip->slot.state = SG_RESOURCESTATE_FAILED; |
16069 | } |
16070 | else { |
16071 | SG_LOG("sg_fail_pipeline: pipeline must be in ALLOC state\n"); |
16072 | } |
16073 | } |
16074 | _SG_TRACE_ARGS(fail_pipeline, pip_id); |
16075 | } |
16076 | |
16077 | SOKOL_API_IMPL void sg_fail_pass(sg_pass pass_id) { |
16078 | SOKOL_ASSERT(_sg.valid); |
16079 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
16080 | if (pass) { |
16081 | if (pass->slot.state == SG_RESOURCESTATE_ALLOC) { |
16082 | pass->slot.ctx_id = _sg.active_context.id; |
16083 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
16084 | } |
16085 | else { |
16086 | SG_LOG("sg_fail_pass: pass must be in ALLOC state\n"); |
16087 | } |
16088 | } |
16089 | _SG_TRACE_ARGS(fail_pass, pass_id); |
16090 | } |
16091 | |
16092 | /*-- get resource state */ |
16093 | SOKOL_API_IMPL sg_resource_state sg_query_buffer_state(sg_buffer buf_id) { |
16094 | SOKOL_ASSERT(_sg.valid); |
16095 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16096 | sg_resource_state res = buf ? buf->slot.state : SG_RESOURCESTATE_INVALID; |
16097 | return res; |
16098 | } |
16099 | |
16100 | SOKOL_API_IMPL sg_resource_state sg_query_image_state(sg_image img_id) { |
16101 | SOKOL_ASSERT(_sg.valid); |
16102 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
16103 | sg_resource_state res = img ? img->slot.state : SG_RESOURCESTATE_INVALID; |
16104 | return res; |
16105 | } |
16106 | |
16107 | SOKOL_API_IMPL sg_resource_state sg_query_shader_state(sg_shader shd_id) { |
16108 | SOKOL_ASSERT(_sg.valid); |
16109 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
16110 | sg_resource_state res = shd ? shd->slot.state : SG_RESOURCESTATE_INVALID; |
16111 | return res; |
16112 | } |
16113 | |
16114 | SOKOL_API_IMPL sg_resource_state sg_query_pipeline_state(sg_pipeline pip_id) { |
16115 | SOKOL_ASSERT(_sg.valid); |
16116 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
16117 | sg_resource_state res = pip ? pip->slot.state : SG_RESOURCESTATE_INVALID; |
16118 | return res; |
16119 | } |
16120 | |
16121 | SOKOL_API_IMPL sg_resource_state sg_query_pass_state(sg_pass pass_id) { |
16122 | SOKOL_ASSERT(_sg.valid); |
16123 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
16124 | sg_resource_state res = pass ? pass->slot.state : SG_RESOURCESTATE_INVALID; |
16125 | return res; |
16126 | } |
16127 | |
16128 | /*-- allocate and initialize resource ----------------------------------------*/ |
16129 | SOKOL_API_IMPL sg_buffer sg_make_buffer(const sg_buffer_desc* desc) { |
16130 | SOKOL_ASSERT(_sg.valid); |
16131 | SOKOL_ASSERT(desc); |
16132 | sg_buffer_desc desc_def = _sg_buffer_desc_defaults(desc); |
16133 | sg_buffer buf_id = _sg_alloc_buffer(); |
16134 | if (buf_id.id != SG_INVALID_ID) { |
16135 | _sg_buffer_t* buf = _sg_buffer_at(&_sg.pools, buf_id.id); |
16136 | SOKOL_ASSERT(buf && (buf->slot.state == SG_RESOURCESTATE_ALLOC)); |
16137 | _sg_init_buffer(buf, &desc_def); |
16138 | SOKOL_ASSERT((buf->slot.state == SG_RESOURCESTATE_VALID) || (buf->slot.state == SG_RESOURCESTATE_FAILED)); |
16139 | } |
16140 | else { |
16141 | SG_LOG("buffer pool exhausted!"); |
16142 | _SG_TRACE_NOARGS(err_buffer_pool_exhausted); |
16143 | } |
16144 | _SG_TRACE_ARGS(make_buffer, &desc_def, buf_id); |
16145 | return buf_id; |
16146 | } |
16147 | |
16148 | SOKOL_API_IMPL sg_image sg_make_image(const sg_image_desc* desc) { |
16149 | SOKOL_ASSERT(_sg.valid); |
16150 | SOKOL_ASSERT(desc); |
16151 | sg_image_desc desc_def = _sg_image_desc_defaults(desc); |
16152 | sg_image img_id = _sg_alloc_image(); |
16153 | if (img_id.id != SG_INVALID_ID) { |
16154 | _sg_image_t* img = _sg_image_at(&_sg.pools, img_id.id); |
16155 | SOKOL_ASSERT(img && (img->slot.state == SG_RESOURCESTATE_ALLOC)); |
16156 | _sg_init_image(img, &desc_def); |
16157 | SOKOL_ASSERT((img->slot.state == SG_RESOURCESTATE_VALID) || (img->slot.state == SG_RESOURCESTATE_FAILED)); |
16158 | } |
16159 | else { |
16160 | SG_LOG("image pool exhausted!"); |
16161 | _SG_TRACE_NOARGS(err_image_pool_exhausted); |
16162 | } |
16163 | _SG_TRACE_ARGS(make_image, &desc_def, img_id); |
16164 | return img_id; |
16165 | } |
16166 | |
16167 | SOKOL_API_IMPL sg_shader sg_make_shader(const sg_shader_desc* desc) { |
16168 | SOKOL_ASSERT(_sg.valid); |
16169 | SOKOL_ASSERT(desc); |
16170 | sg_shader_desc desc_def = _sg_shader_desc_defaults(desc); |
16171 | sg_shader shd_id = _sg_alloc_shader(); |
16172 | if (shd_id.id != SG_INVALID_ID) { |
16173 | _sg_shader_t* shd = _sg_shader_at(&_sg.pools, shd_id.id); |
16174 | SOKOL_ASSERT(shd && (shd->slot.state == SG_RESOURCESTATE_ALLOC)); |
16175 | _sg_init_shader(shd, &desc_def); |
16176 | SOKOL_ASSERT((shd->slot.state == SG_RESOURCESTATE_VALID) || (shd->slot.state == SG_RESOURCESTATE_FAILED)); |
16177 | } |
16178 | else { |
16179 | SG_LOG("shader pool exhausted!"); |
16180 | _SG_TRACE_NOARGS(err_shader_pool_exhausted); |
16181 | } |
16182 | _SG_TRACE_ARGS(make_shader, &desc_def, shd_id); |
16183 | return shd_id; |
16184 | } |
16185 | |
16186 | SOKOL_API_IMPL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc) { |
16187 | SOKOL_ASSERT(_sg.valid); |
16188 | SOKOL_ASSERT(desc); |
16189 | sg_pipeline_desc desc_def = _sg_pipeline_desc_defaults(desc); |
16190 | sg_pipeline pip_id = _sg_alloc_pipeline(); |
16191 | if (pip_id.id != SG_INVALID_ID) { |
16192 | _sg_pipeline_t* pip = _sg_pipeline_at(&_sg.pools, pip_id.id); |
16193 | SOKOL_ASSERT(pip && (pip->slot.state == SG_RESOURCESTATE_ALLOC)); |
16194 | _sg_init_pipeline(pip, &desc_def); |
16195 | SOKOL_ASSERT((pip->slot.state == SG_RESOURCESTATE_VALID) || (pip->slot.state == SG_RESOURCESTATE_FAILED)); |
16196 | } |
16197 | else { |
16198 | SG_LOG("pipeline pool exhausted!"); |
16199 | _SG_TRACE_NOARGS(err_pipeline_pool_exhausted); |
16200 | } |
16201 | _SG_TRACE_ARGS(make_pipeline, &desc_def, pip_id); |
16202 | return pip_id; |
16203 | } |
16204 | |
16205 | SOKOL_API_IMPL sg_pass sg_make_pass(const sg_pass_desc* desc) { |
16206 | SOKOL_ASSERT(_sg.valid); |
16207 | SOKOL_ASSERT(desc); |
16208 | sg_pass_desc desc_def = _sg_pass_desc_defaults(desc); |
16209 | sg_pass pass_id = _sg_alloc_pass(); |
16210 | if (pass_id.id != SG_INVALID_ID) { |
16211 | _sg_pass_t* pass = _sg_pass_at(&_sg.pools, pass_id.id); |
16212 | SOKOL_ASSERT(pass && (pass->slot.state == SG_RESOURCESTATE_ALLOC)); |
16213 | _sg_init_pass(pass, &desc_def); |
16214 | SOKOL_ASSERT((pass->slot.state == SG_RESOURCESTATE_VALID) || (pass->slot.state == SG_RESOURCESTATE_FAILED)); |
16215 | } |
16216 | else { |
16217 | SG_LOG("pass pool exhausted!"); |
16218 | _SG_TRACE_NOARGS(err_pass_pool_exhausted); |
16219 | } |
16220 | _SG_TRACE_ARGS(make_pass, &desc_def, pass_id); |
16221 | return pass_id; |
16222 | } |
16223 | |
16224 | /*-- destroy resource --------------------------------------------------------*/ |
16225 | SOKOL_API_IMPL void sg_destroy_buffer(sg_buffer buf_id) { |
16226 | SOKOL_ASSERT(_sg.valid); |
16227 | _SG_TRACE_ARGS(destroy_buffer, buf_id); |
16228 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16229 | if (buf) { |
16230 | if ((buf->slot.state == SG_RESOURCESTATE_VALID) || (buf->slot.state == SG_RESOURCESTATE_FAILED)) { |
16231 | _sg_uninit_buffer(buf); |
16232 | SOKOL_ASSERT(buf->slot.state == SG_RESOURCESTATE_ALLOC); |
16233 | } |
16234 | if (buf->slot.state == SG_RESOURCESTATE_ALLOC) { |
16235 | _sg_dealloc_buffer(buf); |
16236 | SOKOL_ASSERT(buf->slot.state == SG_RESOURCESTATE_INITIAL); |
16237 | } |
16238 | } |
16239 | } |
16240 | |
16241 | SOKOL_API_IMPL void sg_destroy_image(sg_image img_id) { |
16242 | SOKOL_ASSERT(_sg.valid); |
16243 | _SG_TRACE_ARGS(destroy_image, img_id); |
16244 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
16245 | if (img) { |
16246 | if ((img->slot.state == SG_RESOURCESTATE_VALID) || (img->slot.state == SG_RESOURCESTATE_FAILED)) { |
16247 | _sg_uninit_image(img); |
16248 | SOKOL_ASSERT(img->slot.state == SG_RESOURCESTATE_ALLOC); |
16249 | } |
16250 | if (img->slot.state == SG_RESOURCESTATE_ALLOC) { |
16251 | _sg_dealloc_image(img); |
16252 | SOKOL_ASSERT(img->slot.state == SG_RESOURCESTATE_INITIAL); |
16253 | } |
16254 | } |
16255 | } |
16256 | |
16257 | SOKOL_API_IMPL void sg_destroy_shader(sg_shader shd_id) { |
16258 | SOKOL_ASSERT(_sg.valid); |
16259 | _SG_TRACE_ARGS(destroy_shader, shd_id); |
16260 | _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
16261 | if (shd) { |
16262 | if ((shd->slot.state == SG_RESOURCESTATE_VALID) || (shd->slot.state == SG_RESOURCESTATE_FAILED)) { |
16263 | _sg_uninit_shader(shd); |
16264 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_ALLOC); |
16265 | } |
16266 | if (shd->slot.state == SG_RESOURCESTATE_ALLOC) { |
16267 | _sg_dealloc_shader(shd); |
16268 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_INITIAL); |
16269 | } |
16270 | } |
16271 | } |
16272 | |
16273 | SOKOL_API_IMPL void sg_destroy_pipeline(sg_pipeline pip_id) { |
16274 | SOKOL_ASSERT(_sg.valid); |
16275 | _SG_TRACE_ARGS(destroy_pipeline, pip_id); |
16276 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
16277 | if (pip) { |
16278 | if ((pip->slot.state == SG_RESOURCESTATE_VALID) || (pip->slot.state == SG_RESOURCESTATE_FAILED)) { |
16279 | _sg_uninit_pipeline(pip); |
16280 | SOKOL_ASSERT(pip->slot.state == SG_RESOURCESTATE_ALLOC); |
16281 | } |
16282 | if (pip->slot.state == SG_RESOURCESTATE_ALLOC) { |
16283 | _sg_dealloc_pipeline(pip); |
16284 | SOKOL_ASSERT(pip->slot.state == SG_RESOURCESTATE_INITIAL); |
16285 | } |
16286 | } |
16287 | } |
16288 | |
16289 | SOKOL_API_IMPL void sg_destroy_pass(sg_pass pass_id) { |
16290 | SOKOL_ASSERT(_sg.valid); |
16291 | _SG_TRACE_ARGS(destroy_pass, pass_id); |
16292 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
16293 | if (pass) { |
16294 | if ((pass->slot.state == SG_RESOURCESTATE_VALID) || (pass->slot.state == SG_RESOURCESTATE_FAILED)) { |
16295 | _sg_uninit_pass(pass); |
16296 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_ALLOC); |
16297 | } |
16298 | if (pass->slot.state == SG_RESOURCESTATE_ALLOC) { |
16299 | _sg_dealloc_pass(pass); |
16300 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_INITIAL); |
16301 | } |
16302 | } |
16303 | } |
16304 | |
16305 | SOKOL_API_IMPL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height) { |
16306 | SOKOL_ASSERT(_sg.valid); |
16307 | SOKOL_ASSERT(pass_action); |
16308 | SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0)); |
16309 | sg_pass_action pa; |
16310 | _sg_resolve_default_pass_action(pass_action, &pa); |
16311 | _sg.cur_pass.id = SG_INVALID_ID; |
16312 | _sg.pass_valid = true; |
16313 | _sg_begin_pass(0, &pa, width, height); |
16314 | _SG_TRACE_ARGS(begin_default_pass, pass_action, width, height); |
16315 | } |
16316 | |
16317 | SOKOL_API_IMPL void sg_begin_default_passf(const sg_pass_action* pass_action, float width, float height) { |
16318 | sg_begin_default_pass(pass_action, (int)width, (int)height); |
16319 | } |
16320 | |
16321 | SOKOL_API_IMPL void sg_begin_pass(sg_pass pass_id, const sg_pass_action* pass_action) { |
16322 | SOKOL_ASSERT(_sg.valid); |
16323 | SOKOL_ASSERT(pass_action); |
16324 | SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0)); |
16325 | _sg.cur_pass = pass_id; |
16326 | _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
16327 | if (pass && _sg_validate_begin_pass(pass)) { |
16328 | _sg.pass_valid = true; |
16329 | sg_pass_action pa; |
16330 | _sg_resolve_default_pass_action(pass_action, &pa); |
16331 | const _sg_image_t* img = _sg_pass_color_image(pass, 0); |
16332 | SOKOL_ASSERT(img); |
16333 | const int w = img->cmn.width; |
16334 | const int h = img->cmn.height; |
16335 | _sg_begin_pass(pass, &pa, w, h); |
16336 | _SG_TRACE_ARGS(begin_pass, pass_id, pass_action); |
16337 | } |
16338 | else { |
16339 | _sg.pass_valid = false; |
16340 | _SG_TRACE_NOARGS(err_pass_invalid); |
16341 | } |
16342 | } |
16343 | |
16344 | SOKOL_API_IMPL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) { |
16345 | SOKOL_ASSERT(_sg.valid); |
16346 | if (!_sg.pass_valid) { |
16347 | _SG_TRACE_NOARGS(err_pass_invalid); |
16348 | return; |
16349 | } |
16350 | _sg_apply_viewport(x, y, width, height, origin_top_left); |
16351 | _SG_TRACE_ARGS(apply_viewport, x, y, width, height, origin_top_left); |
16352 | } |
16353 | |
16354 | SOKOL_API_IMPL void sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left) { |
16355 | sg_apply_viewport((int)x, (int)y, (int)width, (int)height, origin_top_left); |
16356 | } |
16357 | |
16358 | SOKOL_API_IMPL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) { |
16359 | SOKOL_ASSERT(_sg.valid); |
16360 | if (!_sg.pass_valid) { |
16361 | _SG_TRACE_NOARGS(err_pass_invalid); |
16362 | return; |
16363 | } |
16364 | _sg_apply_scissor_rect(x, y, width, height, origin_top_left); |
16365 | _SG_TRACE_ARGS(apply_scissor_rect, x, y, width, height, origin_top_left); |
16366 | } |
16367 | |
16368 | SOKOL_API_IMPL void sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left) { |
16369 | sg_apply_scissor_rect((int)x, (int)y, (int)width, (int)height, origin_top_left); |
16370 | } |
16371 | |
16372 | SOKOL_API_IMPL void sg_apply_pipeline(sg_pipeline pip_id) { |
16373 | SOKOL_ASSERT(_sg.valid); |
16374 | _sg.bindings_valid = false; |
16375 | if (!_sg_validate_apply_pipeline(pip_id)) { |
16376 | _sg.next_draw_valid = false; |
16377 | _SG_TRACE_NOARGS(err_draw_invalid); |
16378 | return; |
16379 | } |
16380 | if (!_sg.pass_valid) { |
16381 | _SG_TRACE_NOARGS(err_pass_invalid); |
16382 | return; |
16383 | } |
16384 | _sg.cur_pipeline = pip_id; |
16385 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
16386 | SOKOL_ASSERT(pip); |
16387 | _sg.next_draw_valid = (SG_RESOURCESTATE_VALID == pip->slot.state); |
16388 | SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->cmn.shader_id.id)); |
16389 | _sg_apply_pipeline(pip); |
16390 | _SG_TRACE_ARGS(apply_pipeline, pip_id); |
16391 | } |
16392 | |
16393 | SOKOL_API_IMPL void sg_apply_bindings(const sg_bindings* bindings) { |
16394 | SOKOL_ASSERT(_sg.valid); |
16395 | SOKOL_ASSERT(bindings); |
16396 | SOKOL_ASSERT((bindings->_start_canary == 0) && (bindings->_end_canary==0)); |
16397 | if (!_sg_validate_apply_bindings(bindings)) { |
16398 | _sg.next_draw_valid = false; |
16399 | _SG_TRACE_NOARGS(err_draw_invalid); |
16400 | return; |
16401 | } |
16402 | _sg.bindings_valid = true; |
16403 | |
16404 | _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); |
16405 | SOKOL_ASSERT(pip); |
16406 | |
16407 | _sg_buffer_t* vbs[SG_MAX_SHADERSTAGE_BUFFERS] = { 0 }; |
16408 | int num_vbs = 0; |
16409 | for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++, num_vbs++) { |
16410 | if (bindings->vertex_buffers[i].id) { |
16411 | vbs[i] = _sg_lookup_buffer(&_sg.pools, bindings->vertex_buffers[i].id); |
16412 | SOKOL_ASSERT(vbs[i]); |
16413 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vbs[i]->slot.state); |
16414 | _sg.next_draw_valid &= !vbs[i]->cmn.append_overflow; |
16415 | } |
16416 | else { |
16417 | break; |
16418 | } |
16419 | } |
16420 | |
16421 | _sg_buffer_t* ib = 0; |
16422 | if (bindings->index_buffer.id) { |
16423 | ib = _sg_lookup_buffer(&_sg.pools, bindings->index_buffer.id); |
16424 | SOKOL_ASSERT(ib); |
16425 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == ib->slot.state); |
16426 | _sg.next_draw_valid &= !ib->cmn.append_overflow; |
16427 | } |
16428 | |
16429 | _sg_image_t* vs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 }; |
16430 | int num_vs_imgs = 0; |
16431 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_vs_imgs++) { |
16432 | if (bindings->vs_images[i].id) { |
16433 | vs_imgs[i] = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id); |
16434 | SOKOL_ASSERT(vs_imgs[i]); |
16435 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vs_imgs[i]->slot.state); |
16436 | } |
16437 | else { |
16438 | break; |
16439 | } |
16440 | } |
16441 | |
16442 | _sg_image_t* fs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 }; |
16443 | int num_fs_imgs = 0; |
16444 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_fs_imgs++) { |
16445 | if (bindings->fs_images[i].id) { |
16446 | fs_imgs[i] = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id); |
16447 | SOKOL_ASSERT(fs_imgs[i]); |
16448 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == fs_imgs[i]->slot.state); |
16449 | } |
16450 | else { |
16451 | break; |
16452 | } |
16453 | } |
16454 | if (_sg.next_draw_valid) { |
16455 | const int* vb_offsets = bindings->vertex_buffer_offsets; |
16456 | int ib_offset = bindings->index_buffer_offset; |
16457 | _sg_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
16458 | _SG_TRACE_ARGS(apply_bindings, bindings); |
16459 | } |
16460 | else { |
16461 | _SG_TRACE_NOARGS(err_draw_invalid); |
16462 | } |
16463 | } |
16464 | |
16465 | SOKOL_API_IMPL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data) { |
16466 | SOKOL_ASSERT(_sg.valid); |
16467 | SOKOL_ASSERT((stage == SG_SHADERSTAGE_VS) || (stage == SG_SHADERSTAGE_FS)); |
16468 | SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); |
16469 | SOKOL_ASSERT(data && data->ptr && (data->size > 0)); |
16470 | if (!_sg_validate_apply_uniforms(stage, ub_index, data)) { |
16471 | _sg.next_draw_valid = false; |
16472 | _SG_TRACE_NOARGS(err_draw_invalid); |
16473 | return; |
16474 | } |
16475 | if (!_sg.pass_valid) { |
16476 | _SG_TRACE_NOARGS(err_pass_invalid); |
16477 | return; |
16478 | } |
16479 | if (!_sg.next_draw_valid) { |
16480 | _SG_TRACE_NOARGS(err_draw_invalid); |
16481 | return; |
16482 | } |
16483 | _sg_apply_uniforms(stage, ub_index, data); |
16484 | _SG_TRACE_ARGS(apply_uniforms, stage, ub_index, data); |
16485 | } |
16486 | |
16487 | SOKOL_API_IMPL void sg_draw(int base_element, int num_elements, int num_instances) { |
16488 | SOKOL_ASSERT(_sg.valid); |
16489 | SOKOL_ASSERT(base_element >= 0); |
16490 | SOKOL_ASSERT(num_elements >= 0); |
16491 | SOKOL_ASSERT(num_instances >= 0); |
16492 | #if defined(SOKOL_DEBUG) |
16493 | if (!_sg.bindings_valid) { |
16494 | SG_LOG("attempting to draw without resource bindings"); |
16495 | } |
16496 | #endif |
16497 | if (!_sg.pass_valid) { |
16498 | _SG_TRACE_NOARGS(err_pass_invalid); |
16499 | return; |
16500 | } |
16501 | if (!_sg.next_draw_valid) { |
16502 | _SG_TRACE_NOARGS(err_draw_invalid); |
16503 | return; |
16504 | } |
16505 | if (!_sg.bindings_valid) { |
16506 | _SG_TRACE_NOARGS(err_bindings_invalid); |
16507 | return; |
16508 | } |
16509 | /* attempting to draw with zero elements or instances is not technically an |
16510 | error, but might be handled as an error in the backend API (e.g. on Metal) |
16511 | */ |
16512 | if ((0 == num_elements) || (0 == num_instances)) { |
16513 | _SG_TRACE_NOARGS(err_draw_invalid); |
16514 | return; |
16515 | } |
16516 | _sg_draw(base_element, num_elements, num_instances); |
16517 | _SG_TRACE_ARGS(draw, base_element, num_elements, num_instances); |
16518 | } |
16519 | |
16520 | SOKOL_API_IMPL void sg_end_pass(void) { |
16521 | SOKOL_ASSERT(_sg.valid); |
16522 | if (!_sg.pass_valid) { |
16523 | _SG_TRACE_NOARGS(err_pass_invalid); |
16524 | return; |
16525 | } |
16526 | _sg_end_pass(); |
16527 | _sg.cur_pass.id = SG_INVALID_ID; |
16528 | _sg.cur_pipeline.id = SG_INVALID_ID; |
16529 | _sg.pass_valid = false; |
16530 | _SG_TRACE_NOARGS(end_pass); |
16531 | } |
16532 | |
16533 | SOKOL_API_IMPL void sg_commit(void) { |
16534 | SOKOL_ASSERT(_sg.valid); |
16535 | _sg_commit(); |
16536 | _sg_notify_commit_listeners(); |
16537 | _SG_TRACE_NOARGS(commit); |
16538 | _sg.frame_index++; |
16539 | } |
16540 | |
16541 | SOKOL_API_IMPL void sg_reset_state_cache(void) { |
16542 | SOKOL_ASSERT(_sg.valid); |
16543 | _sg_reset_state_cache(); |
16544 | _SG_TRACE_NOARGS(reset_state_cache); |
16545 | } |
16546 | |
16547 | SOKOL_API_IMPL void sg_update_buffer(sg_buffer buf_id, const sg_range* data) { |
16548 | SOKOL_ASSERT(_sg.valid); |
16549 | SOKOL_ASSERT(data && data->ptr && (data->size > 0)); |
16550 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16551 | if ((data->size > 0) && buf && (buf->slot.state == SG_RESOURCESTATE_VALID)) { |
16552 | if (_sg_validate_update_buffer(buf, data)) { |
16553 | SOKOL_ASSERT(data->size <= (size_t)buf->cmn.size); |
16554 | /* only one update allowed per buffer and frame */ |
16555 | SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index); |
16556 | /* update and append on same buffer in same frame not allowed */ |
16557 | SOKOL_ASSERT(buf->cmn.append_frame_index != _sg.frame_index); |
16558 | _sg_update_buffer(buf, data); |
16559 | buf->cmn.update_frame_index = _sg.frame_index; |
16560 | } |
16561 | } |
16562 | _SG_TRACE_ARGS(update_buffer, buf_id, data); |
16563 | } |
16564 | |
16565 | SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const sg_range* data) { |
16566 | SOKOL_ASSERT(_sg.valid); |
16567 | SOKOL_ASSERT(data && data->ptr); |
16568 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16569 | int result; |
16570 | if (buf) { |
16571 | /* rewind append cursor in a new frame */ |
16572 | if (buf->cmn.append_frame_index != _sg.frame_index) { |
16573 | buf->cmn.append_pos = 0; |
16574 | buf->cmn.append_overflow = false; |
16575 | } |
16576 | if ((buf->cmn.append_pos + _sg_roundup((int)data->size, 4)) > buf->cmn.size) { |
16577 | buf->cmn.append_overflow = true; |
16578 | } |
16579 | const int start_pos = buf->cmn.append_pos; |
16580 | if (buf->slot.state == SG_RESOURCESTATE_VALID) { |
16581 | if (_sg_validate_append_buffer(buf, data)) { |
16582 | if (!buf->cmn.append_overflow && (data->size > 0)) { |
16583 | /* update and append on same buffer in same frame not allowed */ |
16584 | SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index); |
16585 | int copied_num_bytes = _sg_append_buffer(buf, data, buf->cmn.append_frame_index != _sg.frame_index); |
16586 | buf->cmn.append_pos += copied_num_bytes; |
16587 | buf->cmn.append_frame_index = _sg.frame_index; |
16588 | } |
16589 | } |
16590 | } |
16591 | result = start_pos; |
16592 | } |
16593 | else { |
16594 | /* FIXME: should we return -1 here? */ |
16595 | result = 0; |
16596 | } |
16597 | _SG_TRACE_ARGS(append_buffer, buf_id, data, result); |
16598 | return result; |
16599 | } |
16600 | |
16601 | SOKOL_API_IMPL bool sg_query_buffer_overflow(sg_buffer buf_id) { |
16602 | SOKOL_ASSERT(_sg.valid); |
16603 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16604 | bool result = buf ? buf->cmn.append_overflow : false; |
16605 | return result; |
16606 | } |
16607 | |
16608 | SOKOL_API_IMPL bool sg_query_buffer_will_overflow(sg_buffer buf_id, size_t size) { |
16609 | SOKOL_ASSERT(_sg.valid); |
16610 | _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16611 | bool result = false; |
16612 | if (buf) { |
16613 | int append_pos = buf->cmn.append_pos; |
16614 | /* rewind append cursor in a new frame */ |
16615 | if (buf->cmn.append_frame_index != _sg.frame_index) { |
16616 | append_pos = 0; |
16617 | } |
16618 | if ((append_pos + _sg_roundup((int)size, 4)) > buf->cmn.size) { |
16619 | result = true; |
16620 | } |
16621 | } |
16622 | return result; |
16623 | } |
16624 | |
16625 | SOKOL_API_IMPL void sg_update_image(sg_image img_id, const sg_image_data* data) { |
16626 | SOKOL_ASSERT(_sg.valid); |
16627 | _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
16628 | if (img && img->slot.state == SG_RESOURCESTATE_VALID) { |
16629 | if (_sg_validate_update_image(img, data)) { |
16630 | SOKOL_ASSERT(img->cmn.upd_frame_index != _sg.frame_index); |
16631 | _sg_update_image(img, data); |
16632 | img->cmn.upd_frame_index = _sg.frame_index; |
16633 | } |
16634 | } |
16635 | _SG_TRACE_ARGS(update_image, img_id, data); |
16636 | } |
16637 | |
16638 | SOKOL_API_IMPL void sg_push_debug_group(const char* name) { |
16639 | SOKOL_ASSERT(_sg.valid); |
16640 | SOKOL_ASSERT(name); |
16641 | _SOKOL_UNUSED(name); |
16642 | _SG_TRACE_ARGS(push_debug_group, name); |
16643 | } |
16644 | |
16645 | SOKOL_API_IMPL void sg_pop_debug_group(void) { |
16646 | SOKOL_ASSERT(_sg.valid); |
16647 | _SG_TRACE_NOARGS(pop_debug_group); |
16648 | } |
16649 | |
16650 | SOKOL_API_IMPL bool sg_add_commit_listener(sg_commit_listener listener) { |
16651 | SOKOL_ASSERT(_sg.valid); |
16652 | return _sg_add_commit_listener(&listener); |
16653 | } |
16654 | |
16655 | SOKOL_API_IMPL bool sg_remove_commit_listener(sg_commit_listener listener) { |
16656 | SOKOL_ASSERT(_sg.valid); |
16657 | return _sg_remove_commit_listener(&listener); |
16658 | } |
16659 | |
16660 | SOKOL_API_IMPL sg_buffer_info sg_query_buffer_info(sg_buffer buf_id) { |
16661 | SOKOL_ASSERT(_sg.valid); |
16662 | sg_buffer_info info; |
16663 | _sg_clear(&info, sizeof(info)); |
16664 | const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
16665 | if (buf) { |
16666 | info.slot.state = buf->slot.state; |
16667 | info.slot.res_id = buf->slot.id; |
16668 | info.slot.ctx_id = buf->slot.ctx_id; |
16669 | info.update_frame_index = buf->cmn.update_frame_index; |
16670 | info.append_frame_index = buf->cmn.append_frame_index; |
16671 | info.append_pos = buf->cmn.append_pos; |
16672 | info.append_overflow = buf->cmn.append_overflow; |
16673 | #if defined(SOKOL_D3D11) |
16674 | info.num_slots = 1; |
16675 | info.active_slot = 0; |
16676 | #else |
16677 | info.num_slots = buf->cmn.num_slots; |
16678 | info.active_slot = buf->cmn.active_slot; |
16679 | #endif |
16680 | } |
16681 | return info; |
16682 | } |
16683 | |
16684 | SOKOL_API_IMPL sg_image_info sg_query_image_info(sg_image img_id) { |
16685 | SOKOL_ASSERT(_sg.valid); |
16686 | sg_image_info info; |
16687 | _sg_clear(&info, sizeof(info)); |
16688 | const _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); |
16689 | if (img) { |
16690 | info.slot.state = img->slot.state; |
16691 | info.slot.res_id = img->slot.id; |
16692 | info.slot.ctx_id = img->slot.ctx_id; |
16693 | info.upd_frame_index = img->cmn.upd_frame_index; |
16694 | #if defined(SOKOL_D3D11) |
16695 | info.num_slots = 1; |
16696 | info.active_slot = 0; |
16697 | #else |
16698 | info.num_slots = img->cmn.num_slots; |
16699 | info.active_slot = img->cmn.active_slot; |
16700 | #endif |
16701 | info.width = img->cmn.width; |
16702 | info.height = img->cmn.height; |
16703 | } |
16704 | return info; |
16705 | } |
16706 | |
16707 | SOKOL_API_IMPL sg_shader_info sg_query_shader_info(sg_shader shd_id) { |
16708 | SOKOL_ASSERT(_sg.valid); |
16709 | sg_shader_info info; |
16710 | _sg_clear(&info, sizeof(info)); |
16711 | const _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
16712 | if (shd) { |
16713 | info.slot.state = shd->slot.state; |
16714 | info.slot.res_id = shd->slot.id; |
16715 | info.slot.ctx_id = shd->slot.ctx_id; |
16716 | } |
16717 | return info; |
16718 | } |
16719 | |
16720 | SOKOL_API_IMPL sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip_id) { |
16721 | SOKOL_ASSERT(_sg.valid); |
16722 | sg_pipeline_info info; |
16723 | _sg_clear(&info, sizeof(info)); |
16724 | const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
16725 | if (pip) { |
16726 | info.slot.state = pip->slot.state; |
16727 | info.slot.res_id = pip->slot.id; |
16728 | info.slot.ctx_id = pip->slot.ctx_id; |
16729 | } |
16730 | return info; |
16731 | } |
16732 | |
16733 | SOKOL_API_IMPL sg_pass_info sg_query_pass_info(sg_pass pass_id) { |
16734 | SOKOL_ASSERT(_sg.valid); |
16735 | sg_pass_info info; |
16736 | _sg_clear(&info, sizeof(info)); |
16737 | const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
16738 | if (pass) { |
16739 | info.slot.state = pass->slot.state; |
16740 | info.slot.res_id = pass->slot.id; |
16741 | info.slot.ctx_id = pass->slot.ctx_id; |
16742 | } |
16743 | return info; |
16744 | } |
16745 | |
16746 | SOKOL_API_IMPL sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc) { |
16747 | SOKOL_ASSERT(_sg.valid && desc); |
16748 | return _sg_buffer_desc_defaults(desc); |
16749 | } |
16750 | |
16751 | SOKOL_API_IMPL sg_image_desc sg_query_image_defaults(const sg_image_desc* desc) { |
16752 | SOKOL_ASSERT(_sg.valid && desc); |
16753 | return _sg_image_desc_defaults(desc); |
16754 | } |
16755 | |
16756 | SOKOL_API_IMPL sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc) { |
16757 | SOKOL_ASSERT(_sg.valid && desc); |
16758 | return _sg_shader_desc_defaults(desc); |
16759 | } |
16760 | |
16761 | SOKOL_API_IMPL sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc) { |
16762 | SOKOL_ASSERT(_sg.valid && desc); |
16763 | return _sg_pipeline_desc_defaults(desc); |
16764 | } |
16765 | |
16766 | SOKOL_API_IMPL sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc) { |
16767 | SOKOL_ASSERT(_sg.valid && desc); |
16768 | return _sg_pass_desc_defaults(desc); |
16769 | } |
16770 | |
16771 | SOKOL_API_IMPL const void* sg_d3d11_device(void) { |
16772 | #if defined(SOKOL_D3D11) |
16773 | return (const void*) _sg.d3d11.dev; |
16774 | #else |
16775 | return 0; |
16776 | #endif |
16777 | } |
16778 | |
16779 | SOKOL_API_IMPL const void* sg_mtl_device(void) { |
16780 | #if defined(SOKOL_METAL) |
16781 | if (nil != _sg.mtl.device) { |
16782 | return (__bridge const void*) _sg.mtl.device; |
16783 | } |
16784 | else { |
16785 | return 0; |
16786 | } |
16787 | #else |
16788 | return 0; |
16789 | #endif |
16790 | } |
16791 | |
16792 | SOKOL_API_IMPL const void* sg_mtl_render_command_encoder(void) { |
16793 | #if defined(SOKOL_METAL) |
16794 | if (nil != _sg.mtl.cmd_encoder) { |
16795 | return (__bridge const void*) _sg.mtl.cmd_encoder; |
16796 | } |
16797 | else { |
16798 | return 0; |
16799 | } |
16800 | #else |
16801 | return 0; |
16802 | #endif |
16803 | } |
16804 | |
16805 | #ifdef _MSC_VER |
16806 | #pragma warning(pop) |
16807 | #endif |
16808 | |
16809 | #endif /* SOKOL_GFX_IMPL */ |