1 /*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24 #include "priv.h"
25
26 #include <subdev/fb.h>
27
28 int
nvkm_ltc_tags_alloc(struct nvkm_ltc * ltc,u32 n,struct nvkm_mm_node ** pnode)29 nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
30 {
31 int ret = nvkm_mm_head(<c->tags, 0, 1, n, n, 1, pnode);
32 if (ret)
33 *pnode = NULL;
34 return ret;
35 }
36
37 void
nvkm_ltc_tags_free(struct nvkm_ltc * ltc,struct nvkm_mm_node ** pnode)38 nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
39 {
40 nvkm_mm_free(<c->tags, pnode);
41 }
42
43 void
nvkm_ltc_tags_clear(struct nvkm_ltc * ltc,u32 first,u32 count)44 nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
45 {
46 const u32 limit = first + count - 1;
47
48 BUG_ON((first > limit) || (limit >= ltc->num_tags));
49
50 mutex_lock(<c->subdev.mutex);
51 ltc->func->cbc_clear(ltc, first, limit);
52 ltc->func->cbc_wait(ltc);
53 mutex_unlock(<c->subdev.mutex);
54 }
55
56 int
nvkm_ltc_zbc_color_get(struct nvkm_ltc * ltc,int index,const u32 color[4])57 nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
58 {
59 memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
60 ltc->func->zbc_clear_color(ltc, index, color);
61 return index;
62 }
63
64 int
nvkm_ltc_zbc_depth_get(struct nvkm_ltc * ltc,int index,const u32 depth)65 nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
66 {
67 ltc->zbc_depth[index] = depth;
68 ltc->func->zbc_clear_depth(ltc, index, depth);
69 return index;
70 }
71
72 void
nvkm_ltc_invalidate(struct nvkm_ltc * ltc)73 nvkm_ltc_invalidate(struct nvkm_ltc *ltc)
74 {
75 if (ltc->func->invalidate)
76 ltc->func->invalidate(ltc);
77 }
78
79 void
nvkm_ltc_flush(struct nvkm_ltc * ltc)80 nvkm_ltc_flush(struct nvkm_ltc *ltc)
81 {
82 if (ltc->func->flush)
83 ltc->func->flush(ltc);
84 }
85
86 static void
nvkm_ltc_intr(struct nvkm_subdev * subdev)87 nvkm_ltc_intr(struct nvkm_subdev *subdev)
88 {
89 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
90 ltc->func->intr(ltc);
91 }
92
93 static int
nvkm_ltc_oneinit(struct nvkm_subdev * subdev)94 nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
95 {
96 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
97 return ltc->func->oneinit(ltc);
98 }
99
100 static int
nvkm_ltc_init(struct nvkm_subdev * subdev)101 nvkm_ltc_init(struct nvkm_subdev *subdev)
102 {
103 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
104 int i;
105
106 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
107 ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
108 ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
109 }
110
111 ltc->func->init(ltc);
112 return 0;
113 }
114
115 static void *
nvkm_ltc_dtor(struct nvkm_subdev * subdev)116 nvkm_ltc_dtor(struct nvkm_subdev *subdev)
117 {
118 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
119 struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
120 nvkm_mm_fini(<c->tags);
121 if (ram)
122 nvkm_mm_free(&ram->vram, <c->tag_ram);
123 return ltc;
124 }
125
126 static const struct nvkm_subdev_func
127 nvkm_ltc = {
128 .dtor = nvkm_ltc_dtor,
129 .oneinit = nvkm_ltc_oneinit,
130 .init = nvkm_ltc_init,
131 .intr = nvkm_ltc_intr,
132 };
133
134 int
nvkm_ltc_new_(const struct nvkm_ltc_func * func,struct nvkm_device * device,int index,struct nvkm_ltc ** pltc)135 nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
136 int index, struct nvkm_ltc **pltc)
137 {
138 struct nvkm_ltc *ltc;
139
140 if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
141 return -ENOMEM;
142
143 nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, <c->subdev);
144 ltc->func = func;
145 ltc->zbc_min = 1; /* reserve 0 for disabled */
146 ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
147 return 0;
148 }
149