1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
3
4 #include "util/u_range.h"
5 #include "util/u_transfer.h"
6 #include "util/list.h"
7
8 struct pipe_resource;
9 struct nouveau_context;
10 struct nouveau_bo;
11
12 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
13 * resource->data has not been updated to reflect modified VRAM contents
14 *
15 * USER_MEMORY: resource->data is a pointer to client memory and may change
16 * between GL calls
17 *
18 * USER_PTR: bo is backed by user memory mapped into the GPUs VM
19 */
20 #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
21 #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
22 #define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 2)
23 #define NOUVEAU_BUFFER_STATUS_USER_PTR (1 << 6)
24 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
25
26 #define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
27
28 /* Resources, if mapped into the GPU's address space, are guaranteed to
29 * have constant virtual addresses (nv50+).
30 *
31 * The address of a resource will lie within the nouveau_bo referenced,
32 * and this bo should be added to the memory manager's validation list.
33 */
34 struct nv04_resource {
35 struct pipe_resource base;
36 const struct u_resource_vtbl *vtbl;
37
38 uint64_t address; /* virtual address (nv50+) */
39
40 uint8_t *data; /* resource's contents, if domain == 0, or cached */
41 struct nouveau_bo *bo;
42 uint32_t offset; /* offset into the data/bo */
43
44 uint8_t status;
45 uint8_t domain;
46
47 uint16_t cb_bindings[6]; /* per-shader per-slot bindings */
48
49 struct nouveau_fence *fence;
50 struct nouveau_fence *fence_wr;
51
52 struct nouveau_mm_allocation *mm;
53
54 /* buffer range that has been initialized */
55 struct util_range valid_buffer_range;
56 };
57
58 void
59 nouveau_buffer_release_gpu_storage(struct nv04_resource *);
60
61 void
62 nouveau_copy_buffer(struct nouveau_context *,
63 struct nv04_resource *dst, unsigned dst_pos,
64 struct nv04_resource *src, unsigned src_pos, unsigned size);
65
66 bool
67 nouveau_buffer_migrate(struct nouveau_context *,
68 struct nv04_resource *, unsigned domain);
69
70 void *
71 nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
72 uint32_t offset, uint32_t flags);
73
74 static inline void
nouveau_resource_unmap(struct nv04_resource * res)75 nouveau_resource_unmap(struct nv04_resource *res)
76 {
77 /* no-op */
78 }
79
80 static inline struct nv04_resource *
nv04_resource(struct pipe_resource * resource)81 nv04_resource(struct pipe_resource *resource)
82 {
83 return (struct nv04_resource *)resource;
84 }
85
86 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
87 static inline bool
nouveau_resource_mapped_by_gpu(struct pipe_resource * resource)88 nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
89 {
90 return nv04_resource(resource)->domain != 0;
91 }
92
93 struct pipe_resource *
94 nouveau_buffer_create(struct pipe_screen *pscreen,
95 const struct pipe_resource *templ);
96
97 struct pipe_resource *
98 nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
99 const struct pipe_resource *templ,
100 void *user_ptr);
101
102 struct pipe_resource *
103 nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
104 unsigned bytes, unsigned usage);
105
106 bool
107 nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
108 unsigned base, unsigned size);
109
110 void
111 nouveau_buffer_invalidate(struct pipe_context *pipe,
112 struct pipe_resource *resource);
113
114 /* Copy data to a scratch buffer and return address & bo the data resides in.
115 * Returns 0 on failure.
116 */
117 uint64_t
118 nouveau_scratch_data(struct nouveau_context *,
119 const void *data, unsigned base, unsigned size,
120 struct nouveau_bo **);
121
122 #endif
123