Lines Matching refs:target

103 	struct memory_target *target;
105 list_for_each_entry(target, &targets, node)
106 if (target->memory_pxm == mem_pxm)
107 return target;
113 struct memory_target *target;
117 list_for_each_entry(target, &targets, node) {
118 uid_ptr = target->gen_port_device_handle + 8;
121 return target;
140 struct memory_target *target;
143 target = acpi_find_genport_target(uid);
144 if (!target)
148 target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
150 target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU];
178 struct memory_target *target;
180 target = find_mem_target(mem_pxm);
181 if (!target) {
182 target = kzalloc(sizeof(*target), GFP_KERNEL);
183 if (!target)
185 target->memory_pxm = mem_pxm;
186 target->processor_pxm = PXM_INVAL;
187 target->memregions = (struct resource) {
193 list_add_tail(&target->node, &targets);
194 INIT_LIST_HEAD(&target->caches);
197 return target;
204 struct memory_target *target;
206 target = alloc_target(mem_pxm);
207 if (!target)
212 * in the per-target memregions resource tree.
214 if (!__request_region(&target->memregions, start, len, "memory target",
222 struct memory_target *target;
224 target = alloc_target(mem_pxm);
225 if (!target)
228 memcpy(target->gen_port_device_handle, handle,
303 static void hmat_update_target_access(struct memory_target *target,
308 target->coord[access].read_latency = value;
309 target->coord[access].write_latency = value;
312 target->coord[access].read_latency = value;
315 target->coord[access].write_latency = value;
318 target->coord[access].read_bandwidth = value;
319 target->coord[access].write_bandwidth = value;
322 target->coord[access].read_bandwidth = value;
325 target->coord[access].write_bandwidth = value;
335 struct memory_target *target;
343 target = find_mem_target(pxm);
344 if (!target)
347 hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
349 hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
351 hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
353 hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
355 target->ext_updated = true;
403 struct memory_target *target = find_mem_target(tgt_pxm);
408 if (target && target->processor_pxm == init_pxm) {
409 hmat_update_target_access(target, type, value,
413 hmat_update_target_access(target, type, value,
477 struct memory_target *target;
492 target = find_mem_target(cache->memory_PD);
493 if (!target)
531 list_add_tail(&tcache->node, &target->caches);
540 struct memory_target *target = NULL;
558 target = find_mem_target(p->memory_PD);
559 if (!target) {
564 if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
571 target->processor_pxm = p->processor_PD;
631 static u32 hmat_initiator_perf(struct memory_target *target,
656 if (targs[i] == target->memory_pxm) {
723 static void hmat_update_target_attrs(struct memory_target *target,
733 if (target->ext_updated)
739 !(*(u16 *)target->gen_port_device_handle))
748 if (target->processor_pxm != PXM_INVAL) {
749 cpu_nid = pxm_to_node(target->processor_pxm);
752 set_bit(target->processor_pxm, p_nodes);
788 value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
795 hmat_update_target_access(target, loc->hmat_loc->data_type, best, access);
799 static void __hmat_register_target_initiators(struct memory_target *target,
806 mem_nid = pxm_to_node(target->memory_pxm);
807 hmat_update_target_attrs(target, p_nodes, access);
814 static void hmat_update_generic_target(struct memory_target *target)
818 hmat_update_target_attrs(target, p_nodes,
820 hmat_update_target_attrs(target, p_nodes,
824 static void hmat_register_target_initiators(struct memory_target *target)
828 __hmat_register_target_initiators(target, p_nodes,
830 __hmat_register_target_initiators(target, p_nodes,
834 static void hmat_register_target_cache(struct memory_target *target)
836 unsigned mem_nid = pxm_to_node(target->memory_pxm);
839 list_for_each_entry(tcache, &target->caches, node)
843 static void hmat_register_target_perf(struct memory_target *target, int access)
845 unsigned mem_nid = pxm_to_node(target->memory_pxm);
846 node_set_perf_attrs(mem_nid, &target->coord[access], access);
849 static void hmat_register_target_devices(struct memory_target *target)
860 for (res = target->memregions.child; res; res = res->sibling) {
861 int target_nid = pxm_to_node(target->memory_pxm);
867 static void hmat_register_target(struct memory_target *target)
869 int nid = pxm_to_node(target->memory_pxm);
875 hmat_register_target_devices(target);
882 if (*(u16 *)target->gen_port_device_handle) {
883 hmat_update_generic_target(target);
884 target->registered = true;
899 if (!target->registered) {
900 hmat_register_target_initiators(target);
901 hmat_register_target_cache(target);
902 hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
903 hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
904 target->registered = true;
911 struct memory_target *target;
913 list_for_each_entry(target, &targets, node)
914 hmat_register_target(target);
920 struct memory_target *target;
928 target = find_mem_target(pxm);
929 if (!target)
932 hmat_register_target(target);
940 struct memory_target *target;
948 target = find_mem_target(pxm);
949 if (!target)
951 attrs = &target->coord[1];
964 struct memory_target *target;
970 target = find_mem_target(pxm);
971 if (!target)
975 hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
978 perf = &target->coord[1];
993 struct memory_target *target, *tnext;
998 list_for_each_entry_safe(target, tnext, &targets, node) {
1001 list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
1006 list_del(&target->node);
1007 res = target->memregions.child;
1010 __release_region(&target->memregions, res->start,
1014 kfree(target);