• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/misc/sgi-gru/

Lines Matching defs:gts

121 		gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
199 struct gru_thread_state *gts)
202 gts->ts_cbr_map =
203 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
204 gts->ts_cbr_idx);
205 gts->ts_dsr_map =
206 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
210 struct gru_thread_state *gts)
213 gru->gs_cbr_map |= gts->ts_cbr_map;
214 gru->gs_dsr_map |= gts->ts_dsr_map;
237 struct gru_thread_state *gts)
239 struct gru_mm_struct *gms = gts->ts_gms;
241 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
266 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
267 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
273 struct gru_thread_state *gts)
275 struct gru_mm_struct *gms = gts->ts_gms;
280 ctxbitmap = (1 << gts->ts_ctxnum);
285 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
286 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
295 void gts_drop(struct gru_thread_state *gts)
297 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
298 if (gts->ts_gms)
299 gru_drop_mmu_notifier(gts->ts_gms);
300 kfree(gts);
311 struct gru_thread_state *gts;
313 list_for_each_entry(gts, &vdata->vd_head, ts_next)
314 if (gts->ts_tsid == tsid)
315 return gts;
326 struct gru_thread_state *gts;
332 gts = kmalloc(bytes, GFP_KERNEL);
333 if (!gts)
337 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
338 atomic_set(&gts->ts_refcnt, 1);
339 mutex_init(&gts->ts_ctxlock);
340 gts->ts_cbr_au_count = cbr_au_count;
341 gts->ts_dsr_au_count = dsr_au_count;
342 gts->ts_tlb_preload_count = tlb_preload_count;
343 gts->ts_user_options = options;
344 gts->ts_user_blade_id = -1;
345 gts->ts_user_chiplet_id = -1;
346 gts->ts_tsid = tsid;
347 gts->ts_ctxnum = NULLCTX;
348 gts->ts_tlb_int_select = -1;
349 gts->ts_cch_req_slice = -1;
350 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
352 gts->ts_mm = current->mm;
353 gts->ts_vma = vma;
357 gts->ts_gms = gms;
360 gru_dbg(grudev, "alloc gts %p\n", gts);
361 return gts;
364 gts_drop(gts);
393 struct gru_thread_state *gts;
396 gts = gru_find_current_gts_nolock(vdata, tsid);
398 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
399 return gts;
404 * another thread to race to create a gts.
410 struct gru_thread_state *gts, *ngts;
412 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
416 if (IS_ERR(gts))
417 return gts;
422 gts_drop(gts);
423 gts = ngts;
426 list_add(&gts->ts_next, &vdata->vd_head);
429 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
430 return gts;
436 static void gru_free_gru_context(struct gru_thread_state *gts)
440 gru = gts->ts_gru;
441 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
444 gru->gs_gts[gts->ts_ctxnum] = NULL;
445 free_gru_resources(gru, gts);
446 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
447 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
448 gts->ts_ctxnum = NULLCTX;
449 gts->ts_gru = NULL;
450 gts->ts_blade = -1;
453 gts_drop(gts);
554 void gru_unload_context(struct gru_thread_state *gts, int savestate)
556 struct gru_state *gru = gts->ts_gru;
558 int ctxnum = gts->ts_ctxnum;
560 if (!is_kernel_context(gts))
561 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
564 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
565 gts, gts->ts_cbr_map, gts->ts_dsr_map);
570 if (!is_kernel_context(gts))
571 gru_unload_mm_tracker(gru, gts);
573 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
574 ctxnum, gts->ts_cbr_map,
575 gts->ts_dsr_map);
576 gts->ts_data_valid = 1;
583 gru_free_gru_context(gts);
590 void gru_load_context(struct gru_thread_state *gts)
592 struct gru_state *gru = gts->ts_gru;
594 int i, err, asid, ctxnum = gts->ts_ctxnum;
599 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
600 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
601 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
603 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
604 cch->tlb_int_select = gts->ts_tlb_int_select;
606 if (gts->ts_cch_req_slice >= 0) {
608 cch->req_slice = gts->ts_cch_req_slice;
613 cch->dsr_allocation_map = gts->ts_dsr_map;
614 cch->cbr_allocation_map = gts->ts_cbr_map;
616 if (is_kernel_context(gts)) {
625 asid = gru_load_mm_tracker(gru, gts);
628 cch->sizeavail[i] = gts->ts_sizeavail;
635 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
636 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
640 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
641 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
647 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
648 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
657 int gru_update_cch(struct gru_thread_state *gts)
660 struct gru_state *gru = gts->ts_gru;
661 int i, ctxnum = gts->ts_ctxnum, ret = 0;
667 if (gru->gs_gts[gts->ts_ctxnum] != gts)
672 cch->sizeavail[i] = gts->ts_sizeavail;
673 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
676 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
677 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
694 static int gru_retarget_intr(struct gru_thread_state *gts)
696 if (gts->ts_tlb_int_select < 0
697 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
700 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
702 return gru_update_cch(gts);
712 struct gru_thread_state *gts)
717 blade_id = gts->ts_user_blade_id;
721 chiplet_id = gts->ts_user_chiplet_id;
731 void gru_check_context_placement(struct gru_thread_state *gts)
740 gru = gts->ts_gru;
741 if (!gru || gts->ts_tgid_owner != current->tgid)
744 if (!gru_check_chiplet_assignment(gru, gts)) {
746 gru_unload_context(gts, 1);
747 } else if (gru_retarget_intr(gts)) {
761 static int is_gts_stealable(struct gru_thread_state *gts,
764 if (is_kernel_context(gts))
767 return mutex_trylock(&gts->ts_ctxlock);
770 static void gts_stolen(struct gru_thread_state *gts,
773 if (is_kernel_context(gts)) {
777 mutex_unlock(&gts->ts_ctxlock);
782 void gru_steal_context(struct gru_thread_state *gts)
790 blade_id = gts->ts_user_blade_id;
793 cbr = gts->ts_cbr_au_count;
794 dsr = gts->ts_dsr_au_count;
808 if (gru_check_chiplet_assignment(gru, gts)) {
839 gts->ustats.context_stolen++;
847 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
868 struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
872 int blade_id = gts->ts_user_blade_id;
880 if (!gru_check_chiplet_assignment(grux, gts))
882 if (check_gru_resources(grux, gts->ts_cbr_au_count,
883 gts->ts_dsr_au_count,
894 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
895 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
899 reserve_gru_resources(gru, gts);
900 gts->ts_gru = gru;
901 gts->ts_blade = gru->gs_blade_id;
902 gts->ts_ctxnum = gru_assign_context_number(gru);
903 atomic_inc(&gts->ts_refcnt);
904 gru->gs_gts[gts->ts_ctxnum] = gts;
909 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
910 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
911 gts->ts_gru->gs_gid, gts->ts_ctxnum,
912 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
930 struct gru_thread_state *gts;
939 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
940 if (!gts)
944 mutex_lock(&gts->ts_ctxlock);
947 gru_check_context_placement(gts);
949 if (!gts->ts_gru) {
951 if (!gru_assign_gru_context(gts)) {
953 mutex_unlock(&gts->ts_ctxlock);
956 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
957 gru_steal_context(gts);
960 gru_load_context(gts);
961 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
968 mutex_unlock(&gts->ts_ctxlock);