forked from KhronosGroup/Vulkan-Loader
-
Notifications
You must be signed in to change notification settings - Fork 1
/
loader.c
7167 lines (6361 loc) · 331 KB
/
loader.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
*
* Copyright (c) 2014-2023 The Khronos Group Inc.
* Copyright (c) 2014-2023 Valve Corporation
* Copyright (c) 2014-2023 LunarG, Inc.
* Copyright (C) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Jon Ashburn <[email protected]>
* Author: Courtney Goeltzenleuchter <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Lenny Komow <[email protected]>
* Author: Charles Giessen <[email protected]>
*
*/
#include "loader.h"
#include <ctype.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <stdbool.h>
#include <string.h>
#include <stddef.h>
#if defined(__APPLE__)
#include <CoreFoundation/CoreFoundation.h>
#include <sys/param.h>
#endif
#include <sys/types.h>
#if defined(_WIN32)
#include "dirent_on_windows.h"
#elif COMMON_UNIX_PLATFORMS
#include <dirent.h>
#else
#warning dirent.h not available on this platform
#endif // _WIN32
#include "allocation.h"
#include "cJSON.h"
#include "debug_utils.h"
#include "loader_environment.h"
#include "gpa_helper.h"
#include "log.h"
#include "unknown_function_handling.h"
#include "vk_loader_platform.h"
#include "wsi.h"
#if defined(WIN32)
#include "loader_windows.h"
#endif
#if defined(LOADER_ENABLE_LINUX_SORT)
// This header is currently only used when sorting Linux devices, so don't include it otherwise.
#include "loader_linux.h"
#endif // LOADER_ENABLE_LINUX_SORT
// Generated file containing all the extension data
#include "vk_loader_extensions.c"
struct loader_struct loader = {0};
struct activated_layer_info {
char *name;
char *manifest;
char *library;
bool is_implicit;
char *disable_env;
};
// thread safety lock for accessing global data structures such as "loader"
// all entrypoints on the instance chain need to be locked except GPA
// additionally CreateDevice and DestroyDevice needs to be locked
loader_platform_thread_mutex loader_lock;
loader_platform_thread_mutex loader_preload_icd_lock;
loader_platform_thread_mutex loader_global_instance_list_lock;
// A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
// other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
// functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
// significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
// vkCreateInstance.
struct loader_icd_tramp_list scanned_icds;
LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
// Creates loader_api_version struct that contains the major and minor fields, setting patch to 0
loader_api_version loader_make_version(uint32_t version) {
loader_api_version out_version;
out_version.major = VK_API_VERSION_MAJOR(version);
out_version.minor = VK_API_VERSION_MINOR(version);
out_version.patch = 0;
return out_version;
}
// Creates loader_api_version struct containing the major, minor, and patch fields
loader_api_version loader_make_full_version(uint32_t version) {
loader_api_version out_version;
out_version.major = VK_API_VERSION_MAJOR(version);
out_version.minor = VK_API_VERSION_MINOR(version);
out_version.patch = VK_API_VERSION_PATCH(version);
return out_version;
}
loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) {
loader_api_version out_version;
out_version.major = (uint16_t)major;
out_version.minor = (uint16_t)minor;
out_version.patch = (uint16_t)patch;
return out_version;
}
// Helper macros for determining if a version is valid or not
bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) {
// major version is satisfied
return (version.major > required.major) ||
// major version is equal, minor version is patch version is greater to minimum minor
(version.major == required.major && version.minor > required.minor) ||
// major and minor version are equal, patch version is greater or equal to minimum patch
(version.major == required.major && version.minor == required.minor && version.patch >= required.patch);
}
// Wrapper around opendir so that the dirent_on_windows gets the instance it needs
// while linux opendir & readdir does not
DIR *loader_opendir(const struct loader_instance *instance, const char *name) {
#if defined(_WIN32)
return opendir(instance ? &instance->alloc_callbacks : NULL, name);
#elif COMMON_UNIX_PLATFORMS
return opendir(name);
#else
#warning dirent.h - opendir not available on this platform
#endif // _WIN32
}
int loader_closedir(const struct loader_instance *instance, DIR *dir) {
#if defined(_WIN32)
return closedir(instance ? &instance->alloc_callbacks : NULL, dir);
#elif COMMON_UNIX_PLATFORMS
return closedir(dir);
#else
#warning dirent.h - closedir not available on this platform
#endif // _WIN32
}
bool is_json(const char *path, size_t len) {
if (len < 5) {
return false;
}
return !strncmp(path, ".json", 5);
}
// Handle error from to library loading
void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename,
enum loader_layer_library_status *lib_status) {
const char *error_message = loader_platform_open_library_error(filename);
// If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level
// Discussed in Github issue 262 & 644
// "wrong ELF class" is a linux error, " with error 193" is a windows error
VkFlags err_flag = VULKAN_LOADER_ERROR_BIT;
if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) {
err_flag = VULKAN_LOADER_INFO_BIT;
if (NULL != lib_status) {
*lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE;
}
}
// Check if the error is due to lack of memory
// "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY
// Linux doesn't have such a nice error message - only if there are reported issues should this be called
else if (strstr(error_message, " with error 8") != NULL) {
if (NULL != lib_status) {
*lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY;
}
} else if (NULL != lib_status) {
*lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD;
}
loader_log(inst, err_flag, 0, error_message);
}
VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
struct loader_instance *inst = loader_get_instance(instance);
if (!inst) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table.");
return VK_ERROR_INITIALIZATION_FAILED;
}
loader_set_dispatch(object, inst->disp);
return VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
struct loader_device *dev;
struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
if (NULL == icd_term || NULL == dev) {
return VK_ERROR_INITIALIZATION_FAILED;
}
loader_set_dispatch(object, &dev->loader_dispatch);
return VK_SUCCESS;
}
void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) {
loader_instance_heap_free(inst, layer_properties->manifest_file_name);
loader_instance_heap_free(inst, layer_properties->lib_name);
loader_instance_heap_free(inst, layer_properties->functions.str_gipa);
loader_instance_heap_free(inst, layer_properties->functions.str_gdpa);
loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface);
loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list);
if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) {
for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) {
free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints);
}
}
loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list);
loader_instance_heap_free(inst, layer_properties->disable_env_var.name);
loader_instance_heap_free(inst, layer_properties->disable_env_var.value);
loader_instance_heap_free(inst, layer_properties->enable_env_var.name);
loader_instance_heap_free(inst, layer_properties->enable_env_var.value);
free_string_list(inst, &layer_properties->component_layer_names);
loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties);
loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties);
loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version);
free_string_list(inst, &layer_properties->override_paths);
free_string_list(inst, &layer_properties->blacklist_layer_names);
free_string_list(inst, &layer_properties->app_key_paths);
// Make sure to clear out the removed layer, in case new layers are added in the previous location
memset(layer_properties, 0, sizeof(struct loader_layer_properties));
}
VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) {
if (instance_layers->count > 0) {
*libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (*libs == NULL) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
return VK_SUCCESS;
}
VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) {
assert(source_str && dest_str);
size_t str_len = strlen(source_str);
*dest_str = loader_instance_heap_calloc(inst, str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY;
strncpy(*dest_str, source_str, str_len);
(*dest_str)[str_len] = 0;
return VK_SUCCESS;
}
VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) {
assert(string_list);
string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == string_list->list) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
string_list->allocated_count = allocated_count;
string_list->count = 0;
return VK_SUCCESS;
}
VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) {
assert(string_list && str);
if (string_list->allocated_count == 0) {
string_list->allocated_count = 32;
string_list->list =
loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == string_list->list) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
} else if (string_list->count + 1 > string_list->allocated_count) {
uint32_t new_allocated_count = string_list->allocated_count * 2;
string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count,
sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == string_list->list) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
// Null out the new space
memset(string_list->list + string_list->allocated_count, 0, string_list->allocated_count);
string_list->allocated_count *= 2;
}
string_list->list[string_list->count++] = str;
return VK_SUCCESS;
}
VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str,
size_t str_len) {
assert(string_list && str);
char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_str) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
strncpy(new_str, str, str_len);
new_str[str_len] = '\0';
VkResult res = append_str_to_string_list(inst, string_list, new_str);
if (res != VK_SUCCESS) {
// Cleanup new_str if the append failed - as append_str_to_string_list takes ownership but not if the function fails
loader_instance_heap_free(inst, new_str);
}
return res;
}
void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) {
assert(string_list);
if (string_list->list) {
for (uint32_t i = 0; i < string_list->count; i++) {
loader_instance_heap_free(inst, string_list->list[i]);
string_list->list[i] = NULL;
}
loader_instance_heap_free(inst, string_list->list);
string_list->list = NULL;
}
string_list->count = 0;
string_list->allocated_count = 0;
}
// Combine path elements, separating each element with the platform-specific
// directory separator, and save the combined string to a destination buffer,
// not exceeding the given length. Path elements are given as variable args,
// with a NULL element terminating the list.
//
// \returns the total length of the combined string, not including an ASCII
// NUL termination character. This length may exceed the available storage:
// in this case, the written string will be truncated to avoid a buffer
// overrun, and the return value will greater than or equal to the storage
// size. A NULL argument may be provided as the destination buffer in order
// to determine the required string length without actually writing a string.
size_t loader_platform_combine_path(char *dest, size_t len, ...) {
size_t required_len = 0;
va_list ap;
const char *component;
va_start(ap, len);
component = va_arg(ap, const char *);
while (component) {
if (required_len > 0) {
// This path element is not the first non-empty element; prepend
// a directory separator if space allows
if (dest && required_len + 1 < len) {
(void)snprintf(dest + required_len, len - required_len, "%c", DIRECTORY_SYMBOL);
}
required_len++;
}
if (dest && required_len < len) {
strncpy(dest + required_len, component, len - required_len);
}
required_len += strlen(component);
component = va_arg(ap, const char *);
}
va_end(ap);
// strncpy(3) won't add a NUL terminating byte in the event of truncation.
if (dest && required_len >= len) {
dest[len - 1] = '\0';
}
return required_len;
}
// Given string of three part form "maj.min.pat" convert to a vulkan version number.
// Also can understand four part form "variant.major.minor.patch" if provided.
uint32_t loader_parse_version_string(char *vers_str) {
uint32_t variant = 0, major = 0, minor = 0, patch = 0;
char *vers_tok;
if (!vers_str) {
return 0;
}
vers_tok = strtok(vers_str, ".\"\n\r");
if (NULL != vers_tok) {
major = (uint16_t)atoi(vers_tok);
vers_tok = strtok(NULL, ".\"\n\r");
if (NULL != vers_tok) {
minor = (uint16_t)atoi(vers_tok);
vers_tok = strtok(NULL, ".\"\n\r");
if (NULL != vers_tok) {
patch = (uint16_t)atoi(vers_tok);
vers_tok = strtok(NULL, ".\"\n\r");
// check that we are using a 4 part version string
if (NULL != vers_tok) {
// if we are, move the values over into the correct place
variant = major;
major = minor;
minor = patch;
patch = (uint16_t)atoi(vers_tok);
}
}
}
}
return VK_MAKE_API_VERSION(variant, major, minor, patch);
}
bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
}
// Search the given ext_array for an extension matching the given vk_ext_prop
bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
const VkExtensionProperties *ext_array) {
for (uint32_t i = 0; i < count; i++) {
if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
}
return false;
}
// Search the given ext_list for an extension matching the given vk_ext_prop
bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
for (uint32_t i = 0; i < ext_list->count; i++) {
if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
}
return false;
}
// Search the given ext_list for a device extension matching the given ext_prop
bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
for (uint32_t i = 0; i < ext_list->count; i++) {
if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
}
return false;
}
VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list,
struct loader_layer_properties *layer_property) {
VkResult res = VK_SUCCESS;
if (layer_list->capacity == 0) {
res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties));
if (VK_SUCCESS != res) {
goto out;
}
}
// Ensure enough room to add an entry
if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_ptr) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list");
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
layer_list->list = new_ptr;
memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity);
layer_list->capacity *= 2;
}
memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties));
layer_list->count++;
memset(layer_property, 0, sizeof(struct loader_layer_properties));
out:
if (res != VK_SUCCESS) {
loader_free_layer_properties(inst, layer_property);
}
return res;
}
// Search the given layer list for a layer property matching the given layer name
struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) {
for (uint32_t i = 0; i < layer_list->count; i++) {
const VkLayerProperties *item = &layer_list->list[i].info;
if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
}
return NULL;
}
struct loader_layer_properties *loader_find_pointer_layer_property(const char *name,
const struct loader_pointer_layer_list *layer_list) {
for (uint32_t i = 0; i < layer_list->count; i++) {
const VkLayerProperties *item = &layer_list->list[i]->info;
if (strcmp(name, item->layerName) == 0) return layer_list->list[i];
}
return NULL;
}
// Search the given layer list for a layer matching the given layer name
bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) {
if (NULL == layer_list) {
return false;
}
if (NULL != loader_find_pointer_layer_property(name, layer_list)) {
return true;
}
return false;
}
// Search the given meta-layer's component list for a layer matching the given layer name
bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name,
struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) {
if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) {
return true;
}
struct loader_layer_properties *comp_layer_props =
loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list);
if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props);
}
}
return false;
}
// Search the override layer's blacklist for a layer matching the given layer name
bool loader_find_layer_name_in_blacklist(const struct loader_instance *inst, const char *layer_name,
struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) {
if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) {
return true;
}
}
return false;
}
// Remove all layer properties entries from the list
void loader_delete_layer_list_and_properties(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
uint32_t i;
if (!layer_list) return;
for (i = 0; i < layer_list->count; i++) {
if (layer_list->list[i].lib_handle) {
loader_platform_close_library(layer_list->list[i].lib_handle);
loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s",
layer_list->list[i].lib_name);
layer_list->list[i].lib_handle = NULL;
}
loader_free_layer_properties(inst, &(layer_list->list[i]));
}
layer_list->count = 0;
if (layer_list->capacity > 0) {
layer_list->capacity = 0;
loader_instance_heap_free(inst, layer_list->list);
}
}
void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list,
uint32_t layer_to_remove) {
if (layer_list == NULL || layer_to_remove >= layer_list->count) {
return;
}
loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove]));
// Remove the current invalid meta-layer from the layer list. Use memmove since we are
// overlapping the source and destination addresses.
memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
// Decrement the count (because we now have one less) and decrement the loop index since we need to
// re-check this index.
layer_list->count--;
}
// Remove all layers in the layer list that are blacklisted by the override layer.
// NOTE: This should only be called if an override layer is found and not expired.
void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
if (NULL == override_prop) {
return;
}
for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
struct loader_layer_properties cur_layer_prop = layer_list->list[j];
const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
// Skip the override layer itself.
if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
continue;
}
// If found in the override layer's blacklist, remove it
if (loader_find_layer_name_in_blacklist(inst, cur_layer_name, layer_list, override_prop)) {
loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
"loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. "
"Removing that layer from current layer list.",
cur_layer_name);
loader_remove_layer_in_list(inst, layer_list, j);
j--;
// Re-do the query for the override layer
override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
}
}
}
// Remove all layers in the layer list that are not found inside any implicit meta-layers.
void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
int32_t i;
int32_t j;
int32_t layer_count = (int32_t)(layer_list->count);
for (i = 0; i < layer_count; i++) {
layer_list->list[i].keep = false;
}
for (i = 0; i < layer_count; i++) {
struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
cur_layer_prop->keep = true;
continue;
}
for (j = 0; j < layer_count; j++) {
struct loader_layer_properties *layer_to_check = &layer_list->list[j];
if (i == j) {
continue;
}
if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
// For all layers found in this meta layer, we want to keep them as well.
if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) {
cur_layer_prop->keep = true;
}
}
}
}
// Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
// dynamically updated if we delete a layer property in the list).
for (i = 0; i < (int32_t)(layer_list->count); i++) {
struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
if (!cur_layer_prop->keep) {
loader_log(
inst, VULKAN_LOADER_DEBUG_BIT, 0,
"loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list "
"inside of any. So removing layer from current layer list.",
cur_layer_prop->info.layerName);
loader_remove_layer_in_list(inst, layer_list, i);
i--;
}
}
}
VkResult loader_add_instance_extensions(const struct loader_instance *inst,
const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
struct loader_extension_list *ext_list) {
uint32_t i, count = 0;
VkExtensionProperties *ext_props;
VkResult res = VK_SUCCESS;
if (!fp_get_props) {
// No EnumerateInstanceExtensionProperties defined
goto out;
}
// Make sure we never call ourself by accident, this should never happen outside of error paths
if (fp_get_props == vkEnumerateInstanceExtensionProperties) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would "
"lead to infinite recursion.",
lib_name);
goto out;
}
res = fp_get_props(NULL, &count, NULL);
if (res != VK_SUCCESS) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name);
goto out;
}
if (count == 0) {
// No ExtensionProperties to report
goto out;
}
ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
if (NULL == ext_props) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
res = fp_get_props(NULL, &count, ext_props);
if (res != VK_SUCCESS) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s",
lib_name);
goto out;
}
for (i = 0; i < count; i++) {
bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
if (!ext_unsupported) {
res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
if (res != VK_SUCCESS) {
goto out;
}
}
}
out:
return res;
}
VkResult loader_add_device_extensions(const struct loader_instance *inst,
PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
VkPhysicalDevice physical_device, const char *lib_name,
struct loader_extension_list *ext_list) {
uint32_t i = 0, count = 0;
VkResult res = VK_SUCCESS;
VkExtensionProperties *ext_props = NULL;
res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
if (res != VK_SUCCESS) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name);
return res;
}
if (count > 0) {
ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
if (!ext_props) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.",
lib_name);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
if (res != VK_SUCCESS) {
return res;
}
for (i = 0; i < count; i++) {
res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
if (res != VK_SUCCESS) {
return res;
}
}
}
return VK_SUCCESS;
}
VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
size_t capacity = 32 * element_size;
list_info->count = 0;
list_info->capacity = 0;
list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (list_info->list == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
list_info->capacity = capacity;
return VK_SUCCESS;
}
void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
loader_instance_heap_free(inst, list->list);
list->count = 0;
list->capacity = 0;
list->list = NULL;
}
// Append non-duplicate extension properties defined in props to the given ext_list.
// Return - Vk_SUCCESS on success
VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
uint32_t prop_list_count, const VkExtensionProperties *props) {
if (ext_list->list == NULL || ext_list->capacity == 0) {
VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
if (VK_SUCCESS != res) {
return res;
}
}
for (uint32_t i = 0; i < prop_list_count; i++) {
const VkExtensionProperties *cur_ext = &props[i];
// look for duplicates
if (has_vk_extension_property(cur_ext, ext_list)) {
continue;
}
// add to list at end
// check for enough capacity
if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (new_ptr == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_to_ext_list: Failed to reallocate space for extension list");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
ext_list->list = new_ptr;
// double capacity
ext_list->capacity *= 2;
}
memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
ext_list->count++;
}
return VK_SUCCESS;
}
// Append one extension property defined in props with entrypoints defined in entries to the given
// ext_list. Do not append if a duplicate.
// If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not
// NULL) Return - Vk_SUCCESS on success
VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
const VkExtensionProperties *props, struct loader_string_list *entrys) {
VkResult res = VK_SUCCESS;
bool should_free_entrys = true;
if (ext_list->list == NULL || ext_list->capacity == 0) {
res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
if (VK_SUCCESS != res) {
goto out;
}
}
// look for duplicates
if (has_vk_dev_ext_property(props, ext_list)) {
goto out;
}
uint32_t idx = ext_list->count;
// add to list at end
// check for enough capacity
if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_ptr) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
ext_list->list = new_ptr;
// double capacity
ext_list->capacity *= 2;
}
memcpy(&ext_list->list[idx].props, props, sizeof(*props));
if (entrys) {
ext_list->list[idx].entrypoints = *entrys;
should_free_entrys = false;
}
ext_list->count++;
out:
if (NULL != entrys && should_free_entrys) {
free_string_list(inst, entrys);
}
return res;
}
// Create storage for pointers to loader_layer_properties
bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) {
list->capacity = 32 * sizeof(void *);
list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (list->list == NULL) {
return false;
}
list->count = 0;
return true;
}
// Search the given array of layer names for an entry matching the given VkLayerProperties
bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count,
struct activated_layer_info *layer_info) {
for (uint32_t i = 0; i < layer_info_count; i++) {
if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) {
return true;
}
}
return false;
}
void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) {
loader_instance_heap_free(inst, layer_list->list);
layer_list->count = 0;
layer_list->capacity = 0;
layer_list->list = NULL;
}
// Append layer properties defined in prop_list to the given layer_info list
VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list,
struct loader_layer_properties *props) {
if (list->list == NULL || list->capacity == 0) {
if (!loader_init_pointer_layer_list(inst, list)) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
// Check for enough capacity
if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
size_t new_capacity = list->capacity * 2;
void *new_ptr =
loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_ptr) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
list->list = new_ptr;
list->capacity = new_capacity;
}
list->list[list->count++] = props;
return VK_SUCCESS;
}
// Determine if the provided explicit layer should be available by querying the appropriate environmental variables.
bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
const struct loader_envvar_disable_layers_filter *disable_filter,
const struct loader_layer_properties *prop) {
bool available = true;
if (NULL != disable_filter) {
bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER));
bool disabled_by_type = (is_implicit) ? (disable_filter->disable_all_implicit) : (disable_filter->disable_all_explicit);
if (disable_filter->disable_all || disabled_by_type ||
check_name_matches_filter_environment_var(inst, prop->info.layerName, &disable_filter->additional_filters)) {
available = false;
}
}
if (NULL != enable_filter && check_name_matches_filter_environment_var(inst, prop->info.layerName, enable_filter)) {
available = true;
} else if (!available) {
loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
"Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
VK_LAYERS_DISABLE_ENV_VAR);
}
return available;
}
// Search the given search_list for any layers in the props list. Add these to the
// output layer_list.
VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
const struct loader_envvar_disable_layers_filter *disable_filter,
struct loader_pointer_layer_list *output_list,
struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count,
const char *const *names, const struct loader_layer_list *source_list) {
VkResult err = VK_SUCCESS;
for (uint32_t i = 0; i < name_count; i++) {
const char *source_name = names[i];
struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list);
if (NULL == layer_prop) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
"loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name);
err = VK_ERROR_LAYER_NOT_PRESENT;
continue;
}
// Make sure the layer isn't already in the output_list, skip adding it if it is.
if (loader_find_layer_name_in_list(source_name, output_list)) {
continue;
}
if (!loader_layer_is_available(inst, enable_filter, disable_filter, layer_prop)) {
continue;
}
// If not a meta-layer, simply add it.
if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
err = loader_add_layer_properties_to_list(inst, output_list, layer_prop);
if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop);
if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
} else {
err = loader_add_meta_layer(inst, enable_filter, disable_filter, layer_prop, output_list, expanded_output_list,
source_list, NULL);
if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
}
}
return err;
}
// Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
// For an implicit layer, at least a disable environment variable is required.
bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
const struct loader_envvar_disable_layers_filter *disable_filter,
const struct loader_layer_properties *prop) {
bool enable = false;
bool forced_disabled = false;
bool forced_enabled = false;
if ((NULL != disable_filter &&
(disable_filter->disable_all || disable_filter->disable_all_implicit ||
check_name_matches_filter_environment_var(inst, prop->info.layerName, &disable_filter->additional_filters)))) {
forced_disabled = true;
}
if (NULL != enable_filter && check_name_matches_filter_environment_var(inst, prop->info.layerName, enable_filter)) {
forced_enabled = true;
}
// If no enable_environment variable is specified, this implicit layer is always be enabled by default.
if (NULL == prop->enable_env_var.name) {