1/*
2 * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "../i915_drv.h"
18
19#include <drm/drm_legacy.h> /* for agp */
20
21/* MCH IFP BARs */
22#define I915_IFPADDR	0x60
23#define I965_IFPADDR	0x70
24
25extern struct cfdriver inteldrm_cd;
26
27#ifdef __amd64__
28#define membar_producer_wc()	__asm volatile("sfence":::"memory")
29#else
30#define membar_producer_wc()	__asm volatile(\
31				"lock; addl $0,0(%%esp)":::"memory")
32#endif
33
34/*
35 * We're intel IGD, bus 0 function 0 dev 0 should be the GMCH, so it should
36 * be Intel
37 */
38int
39inteldrm_gmch_match(struct pci_attach_args *pa)
40{
41	if (pa->pa_bus == 0 && pa->pa_device == 0 && pa->pa_function == 0 &&
42	    PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
43	    PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
44	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
45		return (1);
46	return (0);
47}
48
49void
50i915_alloc_ifp(struct inteldrm_softc *dev_priv, struct pci_attach_args *bpa)
51{
52	bus_addr_t	addr;
53	u_int32_t	reg;
54
55	dev_priv->ifp.i9xx.bst = bpa->pa_memt;
56
57	reg = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I915_IFPADDR);
58	if (reg & 0x1) {
59		addr = (bus_addr_t)reg;
60		addr &= ~0x1;
61		/* XXX extents ... need data on whether bioses alloc or not. */
62		if (bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
63		    &dev_priv->ifp.i9xx.bsh) != 0)
64			goto nope;
65		return;
66	} else if (bpa->pa_memex == NULL ||
67	    extent_alloc_subregion(bpa->pa_memex, 0x100000, 0xffffffff,
68	    PAGE_SIZE, PAGE_SIZE, 0, 0, 0, &addr) ||
69	    bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
70	    &dev_priv->ifp.i9xx.bsh))
71		goto nope;
72
73	pci_conf_write(bpa->pa_pc, bpa->pa_tag, I915_IFPADDR, addr | 0x1);
74
75	return;
76
77nope:
78	dev_priv->ifp.i9xx.bsh = 0;
79	printf("%s: no ifp\n", dev_priv->sc_dev.dv_xname);
80}
81
82void
83i965_alloc_ifp(struct inteldrm_softc *dev_priv, struct pci_attach_args *bpa)
84{
85	bus_addr_t	addr;
86	u_int32_t	lo, hi;
87
88	dev_priv->ifp.i9xx.bst = bpa->pa_memt;
89
90	hi = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR + 4);
91	lo = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR);
92	if (lo & 0x1) {
93		addr = (((u_int64_t)hi << 32) | lo);
94		addr &= ~0x1;
95		/* XXX extents ... need data on whether bioses alloc or not. */
96		if (bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
97		    &dev_priv->ifp.i9xx.bsh) != 0)
98			goto nope;
99		return;
100	} else if (bpa->pa_memex == NULL ||
101	    extent_alloc_subregion(bpa->pa_memex, 0x100000, 0xffffffff,
102	    PAGE_SIZE, PAGE_SIZE, 0, 0, 0, &addr) ||
103	    bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
104	    &dev_priv->ifp.i9xx.bsh))
105		goto nope;
106
107	pci_conf_write(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR + 4,
108	    upper_32_bits(addr));
109	pci_conf_write(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR,
110	    (addr & 0xffffffff) | 0x1);
111
112	return;
113
114nope:
115	dev_priv->ifp.i9xx.bsh = 0;
116	printf("%s: no ifp\n", dev_priv->sc_dev.dv_xname);
117}
118
119void
120intel_gtt_chipset_setup(struct drm_device *dev)
121{
122	struct inteldrm_softc *dev_priv = dev->dev_private;
123	struct pci_attach_args bpa;
124
125	if (GRAPHICS_VER(dev_priv) >= 6)
126		return;
127
128	if (pci_find_device(&bpa, inteldrm_gmch_match) == 0) {
129		printf("%s: can't find GMCH\n",
130		    dev_priv->sc_dev.dv_xname);
131		return;
132	}
133
134	/* Set up the IFP for chipset flushing */
135	if (GRAPHICS_VER(dev_priv) >= 4 || IS_G33(dev_priv)) {
136		i965_alloc_ifp(dev_priv, &bpa);
137	} else if (GRAPHICS_VER(dev_priv) == 3) {
138		i915_alloc_ifp(dev_priv, &bpa);
139	} else {
140		int nsegs;
141		/*
142		 * I8XX has no flush page mechanism, we fake it by writing until
143		 * the cache is empty. allocate a page to scribble on
144		 */
145		dev_priv->ifp.i8xx.kva = NULL;
146		if (bus_dmamem_alloc(dev_priv->dmat, PAGE_SIZE, 0, 0,
147		    &dev_priv->ifp.i8xx.seg, 1, &nsegs, BUS_DMA_WAITOK) == 0) {
148			if (bus_dmamem_map(dev_priv->dmat, &dev_priv->ifp.i8xx.seg,
149			    1, PAGE_SIZE, &dev_priv->ifp.i8xx.kva, 0) != 0) {
150				bus_dmamem_free(dev_priv->dmat,
151				    &dev_priv->ifp.i8xx.seg, nsegs);
152				dev_priv->ifp.i8xx.kva = NULL;
153			}
154		}
155	}
156}
157
158int
159intel_gmch_enable_gtt(void)
160{
161	struct inteldrm_softc *dev_priv = (void *)inteldrm_cd.cd_devs[0];
162
163	intel_gtt_chipset_setup(&dev_priv->drm);
164	return 1;
165}
166
167int
168intel_gmch_probe(struct pci_dev *bridge_dev, struct pci_dev *gpu_pdev,
169    void *bridge)
170{
171	return 1;
172}
173
174void
175intel_gmch_gtt_get(u64 *gtt_total,
176    phys_addr_t *mappable_base, resource_size_t *mappable_end)
177{
178	struct inteldrm_softc *dev_priv = (void *)inteldrm_cd.cd_devs[0];
179	struct agp_info *ai = &dev_priv->drm.agp->info;
180
181	*gtt_total = ai->ai_aperture_size;
182	*mappable_base = ai->ai_aperture_base;
183	*mappable_end = ai->ai_aperture_size;
184}
185
186void
187intel_gmch_gtt_flush(void)
188{
189	struct inteldrm_softc *dev_priv = (void *)inteldrm_cd.cd_devs[0];
190
191	/*
192	 * Write to this flush page flushes the chipset write cache.
193	 * The write will return when it is done.
194	 */
195	if (GRAPHICS_VER(dev_priv) >= 3) {
196	    if (dev_priv->ifp.i9xx.bsh != 0)
197		bus_space_write_4(dev_priv->ifp.i9xx.bst,
198		    dev_priv->ifp.i9xx.bsh, 0, 1);
199	} else {
200		int i;
201#define I830_HIC        0x70
202		i915_reg_t hic = _MMIO(I830_HIC);
203
204		wbinvd_on_all_cpus();
205
206		intel_uncore_write(&dev_priv->uncore, hic,
207		    (intel_uncore_read(&dev_priv->uncore, hic) | (1<<31)));
208		for (i = 1000; i; i--) {
209			if (!(intel_uncore_read(&dev_priv->uncore, hic) & (1<<31)))
210				break;
211			delay(100);
212		}
213
214	}
215}
216
217void
218intel_gmch_remove(void)
219{
220}
221
222void
223intel_gmch_gtt_insert_sg_entries(struct sg_table *pages, unsigned int pg_start,
224    unsigned int flags)
225{
226	struct inteldrm_softc *dev_priv = (void *)inteldrm_cd.cd_devs[0];
227	struct agp_softc *sc = dev_priv->drm.agp->agpdev;
228	bus_addr_t addr = sc->sc_apaddr + pg_start * PAGE_SIZE;
229	struct sg_page_iter sg_iter;
230
231	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
232		sc->sc_methods->bind_page(sc->sc_chipc, addr,
233		    sg_page_iter_dma_address(&sg_iter), flags);
234		addr += PAGE_SIZE;
235	}
236	membar_producer_wc();
237	intel_gmch_gtt_flush();
238}
239
240void
241intel_gmch_gtt_insert_page(dma_addr_t addr, unsigned int pg,
242    unsigned int flags)
243{
244	struct inteldrm_softc *dev_priv = (void *)inteldrm_cd.cd_devs[0];
245	struct agp_softc *sc = dev_priv->drm.agp->agpdev;
246	bus_addr_t apaddr = sc->sc_apaddr + (pg * PAGE_SIZE);
247	sc->sc_methods->bind_page(sc->sc_chipc, apaddr, addr, flags);
248	intel_gmch_gtt_flush();
249}
250
251void
252intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
253{
254	struct inteldrm_softc *dev_priv = (void *)inteldrm_cd.cd_devs[0];
255	struct agp_softc *sc = dev_priv->drm.agp->agpdev;
256	bus_addr_t addr = sc->sc_apaddr + first_entry * PAGE_SIZE;
257	int i;
258
259	for (i = 0; i < num_entries; i++) {
260		sc->sc_methods->unbind_page(sc->sc_chipc, addr);
261		addr += PAGE_SIZE;
262	}
263	membar_producer_wc();
264}
265