forked from sonals/drm_sched_test
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sched_test_common.h
111 lines (91 loc) · 3.17 KB
/
sched_test_common.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021-2022 Xilinx, Inc.
* Authors:
* Sonal Santan <sonal.santan@xilinx.com>
*/
#ifndef _SCHED_TEST_COMMON_H_
#define _SCHED_TEST_COMMON_H_
#include <linux/platform_device.h>
#include <linux/spinlock_types.h>
#include <linux/idr.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/gpu_scheduler.h>
#include "uapi/sched_test.h"
struct sched_test_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
u64 emit_seqno;
};
/* Helper struct for the HW emulation thread */
struct sched_test_hwemu {
struct sched_test_device *dev;
/* Kernel thread emulating HW and processing jobs submitted by the DRM scheduler */
struct task_struct *hwemu_thread;
/* List of jobs to be processed by the kernel thread -- queue for the HW emulation thread */
struct list_head events_list;
/* Used to protect the job (events_list) queue */
spinlock_t events_lock;
/* Used for irq_fence locking between scheduler and HW emulation thread */
spinlock_t job_lock;
/* Count of jobs processed */
unsigned long count;
wait_queue_head_t wq;
enum sched_test_queue qu;
};
struct sched_test_device {
struct drm_device drm;
struct platform_device *platform;
struct sched_test_queue_state queue[SCHED_TSTQ_MAX];
/* Abstraction for emulated HW queues*/
struct sched_test_hwemu *hwemu[SCHED_TSTQ_MAX];
};
/* File private data structure */
struct sched_test_file_priv {
struct sched_test_device *sdev;
struct drm_sched_entity entity[SCHED_TSTQ_MAX];
/* Job objects submitted by an application are tracked by this container */
struct idr job_idr;
};
struct sched_test_job {
struct drm_sched_job base;
/* Reference counting of this job object */
struct kref refcount;
struct sched_test_device *sdev;
/* The 'done' fence (if any) of another job this job is dependent on */
struct dma_fence *in_fence;
/* Reference to the 'finished' fence owned by the drm_sched_job */
struct dma_fence *done_fence;
/* Fence created by the driver and used between the DRM scheduler and the emulated HW thread */
struct dma_fence *irq_fence;
enum sched_test_queue qu;
/* Callback for freeing of the job on refcount going to 0. */
void (*free)(struct kref *ref);
};
/* Models the IRQ fence */
struct sched_test_fence {
struct dma_fence base;
struct sched_test_device *sdev;
u64 seqno;
enum sched_test_queue qu;
};
static inline struct sched_test_job *to_sched_test_job(struct drm_sched_job *job)
{
return container_of(job, struct sched_test_job, base);
}
static inline struct sched_test_device *to_sched_test_dev(struct drm_device *dev)
{
return container_of(dev, struct sched_test_device, drm);
}
static inline struct sched_test_fence *to_sched_test_fence(struct dma_fence *fence)
{
return container_of(fence, struct sched_test_fence, base);
}
int sched_test_sched_init(struct sched_test_device *sdev);
void sched_test_sched_fini(struct sched_test_device *sdev);
int sched_test_job_init(struct sched_test_job *job, struct sched_test_file_priv *priv);
void sched_test_job_fini(struct sched_test_job *job);
int sched_test_hwemu_threads_start(struct sched_test_device *sdev);
int sched_test_hwemu_threads_stop(struct sched_test_device *sdev);
#endif