1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef INTEL_RING_TYPES_H 7 #define INTEL_RING_TYPES_H 8 9 #include <linux/atomic.h> 10 #include <linux/kref.h> 11 #include <linux/types.h> 12 13 /* 14 * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 15 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 16 * to give some inclination as to some of the magic values used in the various 17 * workarounds! 18 */ 19 #define CACHELINE_BYTES 64 20 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) 21 22 struct i915_vma; 23 24 struct intel_ring { 25 struct kref ref; 26 struct i915_vma *vma; 27 void *vaddr; 28 29 /* 30 * As we have two types of rings, one global to the engine used 31 * by ringbuffer submission and those that are exclusive to a 32 * context used by execlists, we have to play safe and allow 33 * atomic updates to the pin_count. However, the actual pinning 34 * of the context is either done during initialisation for 35 * ringbuffer submission or serialised as part of the context 36 * pinning for execlists, and so we do not need a mutex ourselves 37 * to serialise intel_ring_pin/intel_ring_unpin. 38 */ 39 atomic_t pin_count; 40 41 u32 head; /* updated during retire, loosely tracks RING_HEAD */ 42 u32 tail; /* updated on submission, used for RING_TAIL */ 43 u32 emit; /* updated during request construction */ 44 45 u32 space; 46 u32 size; 47 u32 wrap; 48 u32 effective_size; 49 }; 50 51 #endif /* INTEL_RING_TYPES_H */ 52