-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdxgkrnl.h
More file actions
1045 lines (977 loc) · 36.6 KB
/
dxgkrnl.h
File metadata and controls
1045 lines (977 loc) · 36.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2022, Microsoft Corporation.
*
* Author:
* Iouri Tarassov <iourit@linux.microsoft.com>
*
* Dxgkrnl Graphics Driver
* Headers for internal objects
*
*/
#ifndef _DXGKRNL_H
#define _DXGKRNL_H
#include <linux/uuid.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/refcount.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include "hyperv.h"
#include "d3dkmthk.h"
#include <linux/version.h>
#include "misc.h"
#include "hmgr.h"
#include "d3dkmthk.h"
struct dxgprocess;
struct dxgadapter;
struct dxgdevice;
struct dxgcontext;
struct dxgallocation;
struct dxgresource;
struct dxgsharedresource;
struct dxgsyncobject;
struct dxgsharedsyncobject;
struct dxghwqueue;
/*
* Driver private data.
* A single /dev/dxg device is created per virtual machine.
*/
struct dxgdriver {
struct dxgglobal *dxgglobal;
struct device *dxgdev;
struct pci_driver pci_drv;
struct hv_driver vmbus_drv;
};
extern struct dxgdriver dxgdrv;
#define DXGDEV dxgdrv.dxgdev
struct dxgk_device_types {
u32 post_device:1;
u32 post_device_certain:1;
u32 software_device:1;
u32 soft_gpu_device:1;
u32 warp_device:1;
u32 bdd_device:1;
u32 support_miracast:1;
u32 mismatched_lda:1;
u32 indirect_display_device:1;
u32 xbox_one_device:1;
u32 child_id_support_dwm_clone:1;
u32 child_id_support_dwm_clone2:1;
u32 has_internal_panel:1;
u32 rfx_vgpu_device:1;
u32 virtual_render_device:1;
u32 support_preserve_boot_display:1;
u32 is_uefi_frame_buffer:1;
u32 removable_device:1;
u32 virtual_monitor_device:1;
};
enum dxgdevice_flushschedulerreason {
DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE = 4,
};
enum dxgobjectstate {
DXGOBJECTSTATE_CREATED,
DXGOBJECTSTATE_ACTIVE,
DXGOBJECTSTATE_STOPPED,
DXGOBJECTSTATE_DESTROYED,
};
struct dxgvmbuschannel {
struct vmbus_channel *channel;
struct hv_device *hdev;
struct dxgadapter *adapter;
spinlock_t packet_list_mutex;
struct list_head packet_list_head;
struct kmem_cache *packet_cache;
atomic64_t packet_request_id;
};
int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev);
void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch);
void dxgvmbuschannel_receive(void *ctx);
struct dxgpagingqueue {
struct dxgdevice *device;
struct dxgprocess *process;
struct list_head pqueue_list_entry;
struct d3dkmthandle device_handle;
struct d3dkmthandle handle;
struct d3dkmthandle syncobj_handle;
void *mapped_address;
};
/*
* The structure describes an event, which will be signaled by
* a message from host.
*/
enum dxghosteventtype {
dxghostevent_cpu_event = 1,
dxghostevent_dma_fence = 2,
};
struct dxghostevent {
struct list_head host_event_list_entry;
u64 event_id;
enum dxghosteventtype event_type;
};
struct dxghosteventcpu {
struct dxghostevent hdr;
struct dxgprocess *process;
struct eventfd_ctx *cpu_event;
struct completion *completion_event;
bool destroy_after_signal;
bool remove_from_list;
};
struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device);
void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue);
void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue);
/*
* This is GPU synchronization object, which is used to synchronize execution
* between GPU contextx/hardware queues or for tracking GPU execution progress.
* A dxgsyncobject is created when somebody creates a syncobject or opens a
* shared syncobject.
* A syncobject belongs to an adapter, unless it is a cross-adapter object.
* Cross adapter syncobjects are currently not implemented.
*
* D3DDDI_MONITORED_FENCE and D3DDDI_PERIODIC_MONITORED_FENCE are called
* "device" syncobject, because the belong to a device (dxgdevice).
* Device syncobjects are inserted to a list in dxgdevice.
*
* A syncobject can be "shared", meaning that it could be opened by many
* processes.
*
* Shared syncobjects are inserted to a list in its owner
* (dxgsharedsyncobject).
* A syncobject can be shared by using a global handle or by using
* "NT security handle".
* When global handle sharing is used, the handle is created durinig object
* creation.
* When "NT security" is used, the handle for sharing is create be calling
* dxgk_share_objects. On Linux "NT handle" is represented by a file
* descriptor. FD points to dxgsharedsyncobject.
*/
struct dxgsyncobject {
struct kref syncobj_kref;
enum d3dddi_synchronizationobject_type type;
/*
* List entry in dxgdevice for device sync objects.
* List entry in dxgadapter for other objects
*/
struct list_head syncobj_list_entry;
/* List entry in the dxgsharedsyncobject object for shared synobjects */
struct list_head shared_syncobj_list_entry;
/* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */
struct dxgadapter *adapter;
/*
* Pointer to the device, which was used to create the object.
* This is NULL for non-device syncbjects
*/
struct dxgdevice *device;
struct dxgprocess *process;
/* Used by D3DDDI_CPU_NOTIFICATION objects */
struct dxghosteventcpu *host_event;
/* Owner object for shared syncobjects */
struct dxgsharedsyncobject *shared_owner;
/* CPU virtual address of the fence value for "device" syncobjects */
void *mapped_address;
/* Handle in the process handle table */
struct d3dkmthandle handle;
/* Cached handle of the device. Used to avoid device dereference. */
struct d3dkmthandle device_handle;
union {
struct {
/* Must be the first bit */
u32 destroyed:1;
/* Must be the second bit */
u32 stopped:1;
/* device syncobject */
u32 monitored_fence:1;
u32 cpu_event:1;
u32 shared:1;
u32 reserved:27;
};
long flags;
};
};
/*
* The structure defines an offered vGPU vm bus channel.
*/
struct dxgvgpuchannel {
struct list_head vgpu_ch_list_entry;
struct winluid adapter_luid;
struct hv_device *hdev;
};
/*
* The object is used as parent of all sync objects, created for a shared
* syncobject. When a shared syncobject is created without NT security, the
* handle in the global handle table will point to this object.
*/
struct dxgsharedsyncobject {
struct kref ssyncobj_kref;
/* Referenced by file descriptors */
int host_shared_handle_nt_reference;
/* Corresponding handle in the host global handle table */
struct d3dkmthandle host_shared_handle;
/*
* When the sync object is shared by NT handle, this is the
* corresponding handle in the host
*/
struct d3dkmthandle host_shared_handle_nt;
/* Protects access to host_shared_handle_nt */
struct mutex fd_mutex;
struct rw_semaphore syncobj_list_lock;
struct list_head shared_syncobj_list_head;
struct list_head adapter_shared_syncobj_list_entry;
struct dxgadapter *adapter;
enum d3dddi_synchronizationobject_type type;
u32 monitored_fence:1;
};
struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
struct dxgsyncobject
*syncobj);
void dxgsharedsyncobj_release(struct kref *refcount);
void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
struct dxgsyncobject *syncobj);
void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
struct dxgsyncobject *syncobj);
int dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj,
struct dxgprocess *process,
struct d3dkmthandle objecthandle);
void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj);
struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
struct dxgdevice *device,
struct dxgadapter *adapter,
enum
d3dddi_synchronizationobject_type
type,
struct
d3dddi_synchronizationobject_flags
flags);
void dxgsyncobject_destroy(struct dxgprocess *process,
struct dxgsyncobject *syncobj);
void dxgsyncobject_stop(struct dxgsyncobject *syncobj);
void dxgsyncobject_release(struct kref *refcount);
/*
* device_state_counter - incremented every time the execition state of
* a DXGDEVICE is changed in the host. Used to optimize access to the
* device execution state.
*/
struct dxgglobal {
struct dxgdriver *drvdata;
struct dxgvmbuschannel channel;
struct hv_device *hdev;
u32 num_adapters;
u32 vmbus_ver; /* Interface version */
atomic_t device_state_counter;
struct resource *mem;
u64 mmiospace_base;
u64 mmiospace_size;
struct miscdevice dxgdevice;
struct mutex device_mutex;
/* list of created processes */
struct list_head plisthead;
struct mutex plistmutex;
/* list of created adapters */
struct list_head adapter_list_head;
struct rw_semaphore adapter_list_lock;
/*
* List of the vGPU VM bus channels (dxgvgpuchannel)
* Protected by device_mutex
*/
struct list_head vgpu_ch_list_head;
/* protects acces to the global VM bus channel */
struct rw_semaphore channel_lock;
/* protects the dxgprocess_adapter lists */
struct mutex process_adapter_mutex;
/* list of events, waiting to be signaled by the host */
struct list_head host_event_list_head;
spinlock_t host_event_list_mutex;
atomic64_t host_event_id;
bool global_channel_initialized;
bool async_msg_enabled;
bool misc_registered;
bool pci_registered;
bool vmbus_registered;
bool map_guest_pages_enabled;
};
static inline struct dxgglobal *dxggbl(void)
{
return dxgdrv.dxgglobal;
}
int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid,
struct winluid host_vgpu_luid);
void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state);
void dxgglobal_release_adapter_list_lock(enum dxglockstate state);
int dxgglobal_init_global_channel(void);
void dxgglobal_destroy_global_channel(void);
struct vmbus_channel *dxgglobal_get_vmbus(void);
struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void);
void dxgglobal_acquire_process_adapter_lock(void);
void dxgglobal_release_process_adapter_lock(void);
void dxgglobal_add_host_event(struct dxghostevent *hostevent);
void dxgglobal_remove_host_event(struct dxghostevent *hostevent);
u64 dxgglobal_new_host_event_id(void);
void dxgglobal_signal_host_event(u64 event_id);
struct dxghostevent *dxgglobal_get_host_event(u64 event_id);
int dxgglobal_acquire_channel_lock(void);
void dxgglobal_release_channel_lock(void);
/*
* Describes adapter information for each process
*/
struct dxgprocess_adapter {
/* Entry in dxgadapter::adapter_process_list_head */
struct list_head adapter_process_list_entry;
/* Entry in dxgprocess::process_adapter_list_head */
struct list_head process_adapter_list_entry;
/* List of all dxgdevice objects created for the process on adapter */
struct list_head device_list_head;
struct mutex device_list_mutex;
struct dxgadapter *adapter;
struct dxgprocess *process;
int refcount;
};
struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process,
struct dxgadapter
*adapter);
void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter);
int dxgprocess_adapter_add_device(struct dxgprocess *process,
struct dxgadapter *adapter,
struct dxgdevice *device);
void dxgprocess_adapter_remove_device(struct dxgdevice *device);
void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info);
void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info);
/*
* The structure represents a process, which opened the /dev/dxg device.
* A corresponding object is created on the host.
*/
struct dxgprocess {
/*
* Process list entry in dxgglobal.
* Protected by the dxgglobal->plistmutex.
*/
struct list_head plistentry;
pid_t pid;
pid_t tgid;
pid_t vpid; /* pdi from the current namespace */
struct pid_namespace *nspid; /* namespace id */
/* how many time the process was opened */
struct kref process_kref;
/* protects the object memory */
struct kref process_mem_kref;
/*
* This handle table is used for all objects except dxgadapter
* The handle table lock order is higher than the local_handle_table
* lock
*/
struct hmgrtable handle_table;
/*
* This handle table is used for dxgadapter objects.
* The handle table lock order is lowest.
*/
struct hmgrtable local_handle_table;
/* Handle of the corresponding objec on the host */
struct d3dkmthandle host_handle;
/*
* List of opened adapters (dxgprocess_adapter).
* Protected by process_adapter_mutex.
*/
struct list_head process_adapter_list_head;
};
struct dxgprocess *dxgprocess_create(void);
void dxgprocess_destroy(struct dxgprocess *process);
void dxgprocess_release(struct kref *refcount);
void dxgprocess_mem_release(struct kref *refcount);
int dxgprocess_open_adapter(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle *handle);
int dxgprocess_close_adapter(struct dxgprocess *process,
struct d3dkmthandle handle);
struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process,
struct d3dkmthandle handle);
struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process,
struct d3dkmthandle handle);
struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process,
struct d3dkmthandle handle);
struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process,
enum hmgrentry_type t,
struct d3dkmthandle h);
void dxgprocess_ht_lock_shared_down(struct dxgprocess *process);
void dxgprocess_ht_lock_shared_up(struct dxgprocess *process);
void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process);
void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process);
struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess
*process,
struct dxgadapter
*adapter);
enum dxgadapter_state {
DXGADAPTER_STATE_ACTIVE = 0,
DXGADAPTER_STATE_STOPPED = 1,
DXGADAPTER_STATE_WAITING_VMBUS = 2,
};
/*
* This object represents the grapchis adapter.
* Objects, which take reference on the adapter:
* - dxgglobal
* - dxgdevice
* - adapter handle (struct d3dkmthandle)
*/
struct dxgadapter {
struct rw_semaphore core_lock;
struct kref adapter_kref;
/* Protects creation and destruction of dxgdevice objects */
struct mutex device_creation_lock;
/* Entry in the list of adapters in dxgglobal */
struct list_head adapter_list_entry;
/* The list of dxgprocess_adapter entries */
struct list_head adapter_process_list_head;
/* List of all dxgsharedresource objects */
struct list_head shared_resource_list_head;
/* List of all dxgsharedsyncobject objects */
struct list_head adapter_shared_syncobj_list_head;
/* List of all non-device dxgsyncobject objects */
struct list_head syncobj_list_head;
/* This lock protects shared resource and syncobject lists */
struct rw_semaphore shared_resource_list_lock;
struct pci_dev *pci_dev;
struct hv_device *hv_dev;
struct dxgvmbuschannel channel;
struct d3dkmthandle host_handle;
enum dxgadapter_state adapter_state;
struct winluid host_adapter_luid;
struct winluid host_vgpu_luid;
struct winluid luid; /* VM bus channel luid */
u16 device_description[80];
u16 device_instance_id[WIN_MAX_PATH];
bool compute_only;
bool stopping_adapter;
};
int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev);
bool dxgadapter_is_active(struct dxgadapter *adapter);
void dxgadapter_start(struct dxgadapter *adapter);
void dxgadapter_stop(struct dxgadapter *adapter);
void dxgadapter_release(struct kref *refcount);
int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter);
void dxgadapter_release_lock_shared(struct dxgadapter *adapter);
int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter);
void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter);
void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter);
void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
struct dxgsharedsyncobject *so);
void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
struct dxgsharedsyncobject *so);
void dxgadapter_add_syncobj(struct dxgadapter *adapter,
struct dxgsyncobject *so);
void dxgadapter_remove_syncobj(struct dxgsyncobject *so);
void dxgadapter_add_process(struct dxgadapter *adapter,
struct dxgprocess_adapter *process_info);
void dxgadapter_remove_process(struct dxgprocess_adapter *process_info);
void dxgadapter_remove_shared_resource(struct dxgadapter *adapter,
struct dxgsharedresource *object);
/*
* The object represent the device object.
* The following objects take reference on the device
* - dxgcontext
* - device handle (struct d3dkmthandle)
*/
struct dxgdevice {
enum dxgobjectstate object_state;
/* Device takes reference on the adapter */
struct dxgadapter *adapter;
struct dxgprocess_adapter *adapter_info;
struct dxgprocess *process;
/* Entry in the DGXPROCESS_ADAPTER device list */
struct list_head device_list_entry;
struct kref device_kref;
/* Protects destcruction of the device object */
struct rw_semaphore device_lock;
struct rw_semaphore context_list_lock;
struct list_head context_list_head;
/* List of device allocations */
struct rw_semaphore alloc_list_lock;
struct list_head alloc_list_head;
struct list_head resource_list_head;
/* List of paging queues. Protected by process handle table lock. */
struct list_head pqueue_list_head;
struct list_head syncobj_list_head;
struct d3dkmthandle handle;
enum d3dkmt_deviceexecution_state execution_state;
int execution_state_counter;
u32 handle_valid;
};
struct dxgdevice *dxgdevice_create(struct dxgadapter *a, struct dxgprocess *p);
void dxgdevice_destroy(struct dxgdevice *device);
void dxgdevice_stop(struct dxgdevice *device);
void dxgdevice_mark_destroyed(struct dxgdevice *device);
int dxgdevice_acquire_lock_shared(struct dxgdevice *dev);
void dxgdevice_release_lock_shared(struct dxgdevice *dev);
void dxgdevice_release(struct kref *refcount);
void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx);
void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx);
void dxgdevice_add_alloc(struct dxgdevice *dev, struct dxgallocation *a);
void dxgdevice_remove_alloc(struct dxgdevice *dev, struct dxgallocation *a);
void dxgdevice_remove_alloc_safe(struct dxgdevice *dev,
struct dxgallocation *a);
void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res);
void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res);
void dxgdevice_add_paging_queue(struct dxgdevice *dev,
struct dxgpagingqueue *pqueue);
void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue);
void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so);
void dxgdevice_remove_syncobj(struct dxgsyncobject *so);
bool dxgdevice_is_active(struct dxgdevice *dev);
void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev);
void dxgdevice_release_context_list_lock(struct dxgdevice *dev);
void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *dev);
void dxgdevice_release_alloc_list_lock(struct dxgdevice *dev);
void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *dev);
void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *dev);
/*
* The object represent the execution context of a device.
*/
struct dxgcontext {
enum dxgobjectstate object_state;
struct dxgdevice *device;
struct dxgprocess *process;
/* entry in the device context list */
struct list_head context_list_entry;
struct list_head hwqueue_list_head;
struct rw_semaphore hwqueue_list_lock;
struct kref context_kref;
struct d3dkmthandle handle;
struct d3dkmthandle device_handle;
};
struct dxgcontext *dxgcontext_create(struct dxgdevice *dev);
void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx);
void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx);
void dxgcontext_release(struct kref *refcount);
int dxgcontext_add_hwqueue(struct dxgcontext *ctx,
struct dxghwqueue *hq);
void dxgcontext_remove_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq);
void dxgcontext_remove_hwqueue_safe(struct dxgcontext *ctx,
struct dxghwqueue *hq);
bool dxgcontext_is_active(struct dxgcontext *ctx);
/*
* The object represent the execution hardware queue of a device.
*/
struct dxghwqueue {
/* entry in the context hw queue list */
struct list_head hwqueue_list_entry;
struct kref hwqueue_kref;
struct dxgcontext *context;
struct dxgprocess *process;
struct d3dkmthandle progress_fence_sync_object;
struct d3dkmthandle handle;
struct d3dkmthandle device_handle;
void *progress_fence_mapped_address;
};
struct dxghwqueue *dxghwqueue_create(struct dxgcontext *ctx);
void dxghwqueue_destroy(struct dxgprocess *pr, struct dxghwqueue *hq);
void dxghwqueue_release(struct kref *refcount);
/*
* When a shared resource is created this structure provides information
* about every allocation in the resource. It is used when someone opens the
* resource and locks its allocation.
*/
struct dxgsharedallocdata {
u32 private_data_size; /* Size of private data */
u32 num_pages; /* Allocation size in pages */
bool cached; /* True is the alloc memory is cached */
};
/*
* A shared resource object is created to track the list of dxgresource objects,
* which are opened for the same underlying shared resource.
* Objects are shared by using a file descriptor handle.
* FD is created by calling dxgk_share_objects and providing shandle to
* dxgsharedresource. The FD points to a dxgresource object, which is created
* by calling dxgk_open_resource_nt. dxgresource object is referenced by the
* FD.
*
* The object is referenced by every dxgresource in its list.
*
*/
struct dxgsharedresource {
/* Every dxgresource object in the resource list takes a reference */
struct kref sresource_kref;
struct dxgadapter *adapter;
/* List of dxgresource objects, opened for the shared resource. */
/* Protected by dxgadapter::shared_resource_list_lock */
struct list_head resource_list_head;
/* Entry in the list of dxgsharedresource in dxgadapter */
/* Protected by dxgadapter::shared_resource_list_lock */
struct list_head shared_resource_list_entry;
struct mutex fd_mutex;
/* Referenced by file descriptors */
int host_shared_handle_nt_reference;
/* Corresponding global handle in the host */
struct d3dkmthandle host_shared_handle;
/*
* When the sync object is shared by NT handle, this is the
* corresponding handle in the host
*/
struct d3dkmthandle host_shared_handle_nt;
/* Values below are computed when the resource is sealed */
u32 runtime_private_data_size;
u32 alloc_private_data_size;
u32 resource_private_data_size;
u32 allocation_count;
union {
struct {
/* Cannot add new allocations */
u32 sealed:1;
u32 reserved:31;
};
long flags;
};
struct dxgsharedallocdata *alloc_info;
u8 *alloc_private_data;
u8 *runtime_private_data;
u8 *resource_private_data;
};
struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter);
void dxgsharedresource_destroy(struct kref *refcount);
void dxgsharedresource_add_resource(struct dxgsharedresource *sres,
struct dxgresource *res);
void dxgsharedresource_remove_resource(struct dxgsharedresource *sres,
struct dxgresource *res);
struct dxgresource {
struct kref resource_kref;
enum dxgobjectstate object_state;
struct d3dkmthandle handle;
struct list_head alloc_list_head;
struct list_head resource_list_entry;
struct list_head shared_resource_list_entry;
struct dxgdevice *device;
struct dxgprocess *process;
/* Protects adding allocations to resource and resource destruction */
struct mutex resource_mutex;
u64 private_runtime_handle;
union {
struct {
u32 destroyed:1; /* Must be the first */
u32 handle_valid:1;
u32 reserved:30;
};
long flags;
};
/* Owner of the shared resource */
struct dxgsharedresource *shared_owner;
};
struct dxgresource *dxgresource_create(struct dxgdevice *dev);
void dxgresource_destroy(struct dxgresource *res);
void dxgresource_free_handle(struct dxgresource *res);
void dxgresource_release(struct kref *refcount);
int dxgresource_add_alloc(struct dxgresource *res,
struct dxgallocation *a);
void dxgresource_remove_alloc(struct dxgresource *res, struct dxgallocation *a);
void dxgresource_remove_alloc_safe(struct dxgresource *res,
struct dxgallocation *a);
bool dxgresource_is_active(struct dxgresource *res);
struct privdata {
u32 data_size;
u8 data[1];
};
struct dxgallocation {
/* Entry in the device list or resource list (when resource exists) */
struct list_head alloc_list_entry;
/* Allocation owner */
union {
struct dxgdevice *device;
struct dxgresource *resource;
} owner;
struct dxgprocess *process;
/* Pointer to private driver data desc. Used for shared resources */
struct privdata *priv_drv_data;
struct d3dkmthandle alloc_handle;
/* Set to 1 when allocation belongs to resource. */
u32 resource_owner:1;
/* Set to 1 when 'cpu_address' is mapped to the IO space. */
u32 cpu_address_mapped:1;
/* Set to 1 when the allocatio is mapped as cached */
u32 cached:1;
u32 handle_valid:1;
/* GPADL address list for existing sysmem allocations */
struct vmbus_gpadl gpadl;
/* Number of pages in the 'pages' array */
u32 num_pages;
/*
* How many times dxgk_lock2 is called to allocation, which is mapped
* to IO space.
*/
u32 cpu_address_refcount;
/*
* CPU address from the existing sysmem allocation, or
* mapped to the CPU visible backing store in the IO space
*/
void *cpu_address;
/* Describes pages for the existing sysmem allocation */
struct page **pages;
};
struct dxgallocation *dxgallocation_create(struct dxgprocess *process);
void dxgallocation_stop(struct dxgallocation *a);
void dxgallocation_destroy(struct dxgallocation *a);
void dxgallocation_free_handle(struct dxgallocation *a);
long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2);
long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2);
int dxg_unmap_iospace(void *va, u32 size);
/*
* The convention is that VNBus instance id is a GUID, but the host sets
* the lower part of the value to the host adapter LUID. The function
* provides the necessary conversion.
*/
static inline void guid_to_luid(guid_t *guid, struct winluid *luid)
{
*luid = *(struct winluid *)&guid->b[0];
}
/*
* VM bus interface
*
*/
/*
* The interface version is used to ensure that the host and the guest use the
* same VM bus protocol. It needs to be incremented every time the VM bus
* interface changes. DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION is
* incremented each time the earlier versions of the interface are no longer
* compatible with the current version.
*/
#define DXGK_VMBUS_INTERFACE_VERSION_OLD 27
#define DXGK_VMBUS_INTERFACE_VERSION 40
#define DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION 16
void dxgvmb_initialize(void);
int dxgvmb_send_set_iospace_region(u64 start, u64 len);
int dxgvmb_send_create_process(struct dxgprocess *process);
int dxgvmb_send_destroy_process(struct d3dkmthandle process);
int dxgvmb_send_open_adapter(struct dxgadapter *adapter);
int dxgvmb_send_close_adapter(struct dxgadapter *adapter);
int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter);
struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter,
struct dxgprocess *process,
struct d3dkmt_createdevice *args);
int dxgvmb_send_destroy_device(struct dxgadapter *adapter,
struct dxgprocess *process,
struct d3dkmthandle h);
int dxgvmb_send_flush_device(struct dxgdevice *device,
enum dxgdevice_flushschedulerreason reason);
struct d3dkmthandle
dxgvmb_send_create_context(struct dxgadapter *adapter,
struct dxgprocess *process,
struct d3dkmt_createcontextvirtual
*args);
int dxgvmb_send_destroy_context(struct dxgadapter *adapter,
struct dxgprocess *process,
struct d3dkmthandle h);
int dxgvmb_send_create_paging_queue(struct dxgprocess *pr,
struct dxgdevice *dev,
struct d3dkmt_createpagingqueue *args,
struct dxgpagingqueue *pq);
int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle h);
int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev,
struct d3dkmt_createallocation *args,
struct d3dkmt_createallocation *__user inargs,
struct dxgresource *res,
struct dxgallocation **allocs,
struct d3dddi_allocationinfo2 *alloc_info,
struct d3dkmt_createstandardallocation *stda);
int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev,
struct d3dkmt_destroyallocation2 *args,
struct d3dkmthandle *alloc_handles);
int dxgvmb_send_make_resident(struct dxgprocess *pr, struct dxgadapter *adapter,
struct d3dddi_makeresident *args);
int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgadapter *adapter,
struct d3dkmt_evict *args);
int dxgvmb_send_submit_command(struct dxgprocess *pr,
struct dxgadapter *adapter,
struct d3dkmt_submitcommand *args);
int dxgvmb_send_map_gpu_va(struct dxgprocess *pr, struct d3dkmthandle h,
struct dxgadapter *adapter,
struct d3dddi_mapgpuvirtualaddress *args);
int dxgvmb_send_reserve_gpu_va(struct dxgprocess *pr,
struct dxgadapter *adapter,
struct d3dddi_reservegpuvirtualaddress *args);
int dxgvmb_send_free_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter,
struct d3dkmt_freegpuvirtualaddress *args);
int dxgvmb_send_update_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter,
struct d3dkmt_updategpuvirtualaddress *args);
int dxgvmb_send_create_sync_object(struct dxgprocess *pr,
struct dxgadapter *adapter,
struct d3dkmt_createsynchronizationobject2
*args, struct dxgsyncobject *so);
int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr,
struct d3dkmthandle h);
int dxgvmb_send_signal_sync_object(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dddicb_signalflags flags,
u64 legacy_fence_value,
struct d3dkmthandle context,
u32 object_count,
struct d3dkmthandle *object,
u32 context_count,
struct d3dkmthandle *contexts,
u32 fence_count, u64 *fences,
struct eventfd_ctx *cpu_event,
struct d3dkmthandle device);
int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle context,
u32 object_count,
struct d3dkmthandle *objects,
u64 *fences,
bool legacy_fence);
int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process,
struct dxgadapter *adapter,
struct
d3dkmt_waitforsynchronizationobjectfromcpu
*args,
bool user_address,
u64 cpu_event);
int dxgvmb_send_lock2(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_lock2 *args,
struct d3dkmt_lock2 *__user outargs);
int dxgvmb_send_unlock2(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_unlock2 *args);
int dxgvmb_send_update_alloc_property(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dddi_updateallocproperty *args,
struct d3dddi_updateallocproperty *__user
inargs);
int dxgvmb_send_mark_device_as_error(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_markdeviceaserror *args);
int dxgvmb_send_set_allocation_priority(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_setallocationpriority *a);
int dxgvmb_send_get_allocation_priority(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_getallocationpriority *a);
int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle context,
int priority, bool in_process);
int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle context,
int *priority,
bool in_process);
int dxgvmb_send_offer_allocations(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_offerallocations *args);
int dxgvmb_send_reclaim_allocations(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle device,
struct d3dkmt_reclaimallocations2 *args,
u64 __user *paging_fence_value);
int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle other_process,
struct
d3dkmt_changevideomemoryreservation
*args);
int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_createhwqueue *args,
struct d3dkmt_createhwqueue *__user inargs,
struct dxghwqueue *hq);
int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle handle);
int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_queryadapterinfo *args);
int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_submitcommandtohwqueue *a);
int dxgvmb_send_query_clock_calibration(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_queryclockcalibration *a,
struct d3dkmt_queryclockcalibration
*__user inargs);
int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_flushheaptransitions *arg);
int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
struct dxgvmbuschannel *channel,
struct d3dkmt_opensyncobjectfromnthandle2
*args,
struct dxgsyncobject *syncobj);
int dxgvmb_send_open_sync_object(struct dxgprocess *process,
struct d3dkmthandle device,
struct d3dkmthandle host_shared_syncobj,
struct d3dkmthandle *syncobj);
int dxgvmb_send_query_alloc_residency(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_queryallocationresidency
*args);
int dxgvmb_send_escape(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_escape *args,
bool user_mode);
int dxgvmb_send_query_vidmem_info(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_queryvideomemoryinfo *args,
struct d3dkmt_queryvideomemoryinfo
*__user iargs);
int dxgvmb_send_get_device_state(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmt_getdevicestate *args,
struct d3dkmt_getdevicestate *__user inargs);
int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
struct d3dkmthandle object,
struct d3dkmthandle *shared_handle);
int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle);
int dxgvmb_send_open_resource(struct dxgprocess *process,
struct dxgadapter *adapter,
struct d3dkmthandle device,
struct d3dkmthandle global_share,
u32 allocation_count,
u32 total_priv_drv_data_size,
struct d3dkmthandle *resource_handle,
struct d3dkmthandle *alloc_handles);
int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device,
enum d3dkmdt_standardallocationtype t,
struct d3dkmdt_gdisurfacedata *data,
u32 physical_adapter_index,
u32 *alloc_priv_driver_size,
void *prive_alloc_data,
u32 *res_priv_data_size,
void *priv_res_data);
int dxgvmb_send_query_statistics(struct d3dkmthandle host_process_handle,
struct dxgadapter *adapter,
struct d3dkmt_querystatistics *args);