diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_priv.h')
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d8c8b5ff449a..daa9d47514c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -45,6 +45,7 @@ #include <linux/swap.h> #include "amd_shared.h" +#include "amdgpu.h" #define KFD_MAX_RING_ENTRY_SIZE 8 @@ -321,6 +322,9 @@ struct kfd_dev { unsigned int max_doorbell_slices; int noretry; + + /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ + struct dev_pagemap pgmap; }; enum kfd_mempool { @@ -649,12 +653,6 @@ enum kfd_pdd_bound { /* Data that is per-process-per device. */ struct kfd_process_device { - /* - * List of all per-device data for a process. - * Starts from kfd_process.per_device_data. - */ - struct list_head per_device_list; - /* The device that owns this data. */ struct kfd_dev *dev; @@ -674,7 +672,7 @@ struct kfd_process_device { /* VM context for GPUVM allocations */ struct file *drm_file; - void *vm; + void *drm_priv; /* GPUVM allocations storage */ struct idr alloc_idr; @@ -736,6 +734,17 @@ struct kfd_process_device { #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) +struct svm_range_list { + struct mutex lock; + struct rb_root_cached objects; + struct list_head list; + struct work_struct deferred_list_work; + struct list_head deferred_range_list; + spinlock_t deferred_list_lock; + atomic_t evicted_ranges; + struct delayed_work restore_work; +}; + /* Process data */ struct kfd_process { /* @@ -771,10 +780,11 @@ struct kfd_process { u32 pasid; /* - * List of kfd_process_device structures, + * Array of kfd_process_device pointers, * one for each device the process is using. */ - struct list_head per_device_data; + struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; + uint32_t n_pdds; struct process_queue_manager pqm; @@ -813,6 +823,12 @@ struct kfd_process { struct kobject *kobj; struct kobject *kobj_queues; struct attribute attr_pasid; + + /* shared virtual memory registered by this process */ + struct svm_range_list svms; + bool svm_disabled; + + bool xnack_enabled; }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ @@ -846,6 +862,20 @@ struct kfd_process *kfd_create_process(struct file *filep); struct kfd_process *kfd_get_process(const struct task_struct *); struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); + +int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); +int kfd_process_gpuid_from_kgd(struct kfd_process *p, + struct amdgpu_device *adev, uint32_t *gpuid, + uint32_t *gpuidx); +static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, + uint32_t gpuidx, uint32_t *gpuid) { + return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; +} +static inline struct kfd_process_device *kfd_process_device_from_gpuidx( + struct kfd_process *p, uint32_t gpuidx) { + return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; +} + void kfd_unref_process(struct kfd_process *p); int kfd_process_evict_queues(struct kfd_process *p); int kfd_process_restore_queues(struct kfd_process *p); @@ -861,6 +891,8 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, struct kfd_process *p); +bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); + int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, struct vm_area_struct *vma); @@ -872,14 +904,6 @@ void *kfd_process_device_translate_handle(struct kfd_process_device *p, void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, int handle); -/* Process device data iterator */ -struct kfd_process_device *kfd_get_first_process_device_data( - struct kfd_process *p); -struct kfd_process_device *kfd_get_next_process_device_data( - struct kfd_process *p, - struct kfd_process_device *pdd); -bool kfd_has_process_device_data(struct kfd_process *p); - /* PASIDs */ int kfd_pasid_init(void); void kfd_pasid_exit(void); @@ -1012,8 +1036,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, u32 *ctl_stack_used_size, u32 *save_area_used_size); -int amdkfd_fence_wait_timeout(unsigned int *fence_addr, - unsigned int fence_value, +int amdkfd_fence_wait_timeout(uint64_t *fence_addr, + uint64_t fence_value, unsigned int timeout_ms); /* Packet Manager */ @@ -1049,7 +1073,7 @@ struct packet_manager_funcs { uint32_t filter_param, bool reset, unsigned int sdma_engine); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value); + uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); /* Packet sizes */ @@ -1064,6 +1088,7 @@ struct packet_manager_funcs { extern const struct packet_manager_funcs kfd_vi_pm_funcs; extern const struct packet_manager_funcs kfd_v9_pm_funcs; +extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); void pm_uninit(struct packet_manager *pm, bool hanging); @@ -1071,7 +1096,7 @@ int pm_send_set_resources(struct packet_manager *pm, struct scheduling_resources *res); int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, - uint32_t fence_value); + uint64_t fence_value); int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, enum kfd_unmap_queues_filter mode, @@ -1119,6 +1144,8 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, void kfd_signal_reset_event(struct kfd_dev *dev); +void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); + void kfd_flush_tlb(struct kfd_process_device *pdd); int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); |
