• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/misc/sgi-gru/

Lines Matching refs:gru

32 #include "gru.h"
39 .name = "gru"
50 * Select a gru fault map to be used by the current cpu. Note that
96 static int gru_wrap_asid(struct gru_state *gru)
98 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
100 gru->gs_asid_gen++;
105 static int gru_reset_asid_limit(struct gru_state *gru, int asid)
109 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
113 asid = gru_wrap_asid(gru);
114 gru_flush_all_tlb(gru);
115 gid = gru->gs_gid;
118 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
120 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
122 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
133 asid = gru_wrap_asid(gru);
141 gru->gs_asid_limit = limit;
142 gru->gs_asid = asid;
143 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
149 static int gru_assign_asid(struct gru_state *gru)
153 gru->gs_asid += ASID_INC;
154 asid = gru->gs_asid;
155 if (asid >= gru->gs_asid_limit)
156 asid = gru_reset_asid_limit(gru, asid);
158 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
184 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
187 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
191 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
194 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
198 static void reserve_gru_resources(struct gru_state *gru,
201 gru->gs_active_contexts++;
203 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
206 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
209 static void free_gru_resources(struct gru_state *gru,
212 gru->gs_active_contexts--;
213 gru->gs_cbr_map |= gts->ts_cbr_map;
214 gru->gs_dsr_map |= gts->ts_dsr_map;
224 static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
227 return hweight64(gru->gs_cbr_map) >= cbr_au_count
228 && hweight64(gru->gs_dsr_map) >= dsr_au_count
229 && gru->gs_active_contexts < max_active_contexts;
236 static int gru_load_mm_tracker(struct gru_state *gru,
240 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
247 spin_lock(&gru->gs_asid_lock);
249 gru->gs_asid_gen)) {
250 asid = gru_assign_asid(gru);
252 asids->mt_asid_gen = gru->gs_asid_gen;
257 spin_unlock(&gru->gs_asid_lock);
261 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
262 __set_bit(gru->gs_gid, gms->ms_asidmap);
267 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
272 static void gru_unload_mm_tracker(struct gru_state *gru,
279 asids = &gms->ms_asids[gru->gs_gid];
282 spin_lock(&gru->gs_asid_lock);
286 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
287 spin_unlock(&gru->gs_asid_lock);
438 struct gru_state *gru;
440 gru = gts->ts_gru;
441 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
443 spin_lock(&gru->gs_lock);
444 gru->gs_gts[gts->ts_ctxnum] = NULL;
445 free_gru_resources(gru, gts);
446 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
447 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
451 spin_unlock(&gru->gs_lock);
556 struct gru_state *gru = gts->ts_gru;
562 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
571 gru_unload_mm_tracker(gru, gts);
573 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
592 struct gru_state *gru = gts->ts_gru;
596 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
625 asid = gru_load_mm_tracker(gru, gts);
640 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
660 struct gru_state *gru = gts->ts_gru;
663 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
667 if (gru->gs_gts[gts->ts_ctxnum] != gts)
711 static int gru_check_chiplet_assignment(struct gru_state *gru,
722 return gru->gs_blade_id == blade_id &&
723 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
727 * Unload the gru context if it is not assigned to the correct blade or
733 struct gru_state *gru;
740 gru = gts->ts_gru;
741 if (!gru || gts->ts_tgid_owner != current->tgid)
744 if (!gru_check_chiplet_assignment(gru, gts)) {
785 struct gru_state *gru, *gru0;
800 gru = blade->bs_lru_gru;
802 gru = next_gru(blade, gru);
803 blade->bs_lru_gru = gru;
806 gru0 = gru;
808 if (gru_check_chiplet_assignment(gru, gts)) {
809 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
811 spin_lock(&gru->gs_lock);
813 if (flag && gru == gru0 && ctxnum == ctxnum0)
815 ngts = gru->gs_gts[ctxnum];
826 spin_unlock(&gru->gs_lock);
827 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
830 if (flag && gru == gru0)
834 gru = next_gru(blade, gru);
849 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
850 hweight64(gru->gs_dsr_map));
854 * Assign a gru context.
856 static int gru_assign_context_number(struct gru_state *gru)
860 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
861 __set_bit(ctxnum, &gru->gs_context_map);
870 struct gru_state *gru, *grux;
877 gru = NULL;
885 gru = grux;
892 if (gru) {
893 spin_lock(&gru->gs_lock);
894 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
896 spin_unlock(&gru->gs_lock);
899 reserve_gru_resources(gru, gts);
900 gts->ts_gru = gru;
901 gts->ts_blade = gru->gs_blade_id;
902 gts->ts_ctxnum = gru_assign_context_number(gru);
904 gru->gs_gts[gts->ts_ctxnum] = gts;
905 spin_unlock(&gru->gs_lock);
918 return gru;
926 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.