Lines Matching refs:mt_entry

115 	struct dev_mapping_table *mt_entry;
132 mt_entry = &sc->mapping_table[et_entry->start_index];
133 dpm_entry->DeviceIndex = htole16(mt_entry->id);
168 * @mt_entry: mapping table entry
175 struct dev_mapping_table *mt_entry)
190 if (mt_entry->dpm_entry_num == MPR_DPM_BAD_IDX) {
193 mt_entry->id);
202 dpm_entry = dpm_entry + mt_entry->dpm_entry_num;
204 mt_entry->physical_id);
205 dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32);
206 dpm_entry->DeviceIndex = htole16(mt_entry->id);
207 dpm_entry->MappingInformation = htole16(mt_entry->missing_count);
214 __func__, mt_entry->dpm_entry_num, mt_entry->id);
216 mt_entry->dpm_entry_num)) {
219 mt_entry->dpm_entry_num, mt_entry->id);
349 struct dev_mapping_table *mt_entry;
365 mt_entry = &sc->mapping_table[start_idx];
366 for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) {
367 if (mt_entry->missing_count > high_missing_count) {
368 high_missing_count = mt_entry->missing_count;
386 struct dev_mapping_table *mt_entry;
389 mt_entry = &sc->mapping_table[start_idx];
390 for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
391 if (mt_entry->physical_id == wwid)
408 struct dev_mapping_table *mt_entry;
411 mt_entry = &sc->mapping_table[map_idx];
412 if (mt_entry->physical_id == dev_id)
429 struct dev_mapping_table *mt_entry;
432 mt_entry = &sc->mapping_table[start_idx];
433 for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
434 if (mt_entry->dev_handle == volHandle)
451 struct dev_mapping_table *mt_entry;
454 mt_entry = &sc->mapping_table[map_idx];
455 if (mt_entry->dev_handle == handle)
476 struct dev_mapping_table *mt_entry;
486 mt_entry = &sc->mapping_table[start_idx];
487 for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
488 if (!(mt_entry->device_info & MPR_MAP_IN_USE))
491 if (mt_entry->missing_count > high_missing_count) {
492 high_missing_count = mt_entry->missing_count;
517 struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx];
526 for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++)
527 if (!(mt_entry->device_info & (MPR_MAP_IN_USE |
576 struct dev_mapping_table *mt_entry;
628 mt_entry = &sc->mapping_table[map_idx];
629 mt_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
650 struct dev_mapping_table *mt_entry;
664 mt_entry = &sc->mapping_table[map_idx];
665 mt_entry->init_complete = 1;
668 mt_entry->missing_count = 0;
670 if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT)
671 mt_entry->missing_count++;
673 mt_entry->device_info &= ~MPR_MAP_IN_USE;
674 mt_entry->dev_handle = 0;
685 dpm_idx = mt_entry->dpm_entry_num;
708 if ((mt_entry->physical_id ==
711 mt_entry->missing_count)) {
714 __func__, mt_entry->id);
726 "with target ID %d.\n", __func__, mt_entry->id);
727 mt_entry->dpm_entry_num = dpm_idx;
732 (0xFFFFFFFF & mt_entry->physical_id);
734 (mt_entry->physical_id >> 32);
736 dpm_entry->MappingInformation = mt_entry->missing_count;
744 __func__, mt_entry->id);
806 struct dev_mapping_table *mt_entry;
814 mt_entry = &sc->mapping_table[map_idx];
815 if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT)
816 mt_entry->missing_count++;
825 _mapping_clear_map_entry(mt_entry);
835 mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
838 dpm_entry += mt_entry->dpm_entry_num;
839 if (dpm_entry->MappingInformation != mt_entry->missing_count) {
840 dpm_entry->MappingInformation = mt_entry->missing_count;
841 sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1;
924 struct dev_mapping_table *mt_entry;
959 mt_entry = &sc->mapping_table[map_idx];
960 if ((et_entry->enclosure_id == mt_entry->physical_id) &&
961 (!mt_entry->phy_bits || (mt_entry->phy_bits &
983 mt_entry = &sc->mapping_table[map_idx];
984 if (!(mt_entry->device_info & MPR_DEV_RESERVED)) {
1033 mt_entry = &sc->mapping_table[enc_entry->start_index];
1036 mt_entry++)
1037 mt_entry->device_info &= ~MPR_DEV_RESERVED;
1049 mt_entry = &sc->mapping_table[map_idx];
1050 if (!(mt_entry->device_info & MPR_DEV_RESERVED)) {
1091 mt_entry = &sc->mapping_table[map_idx];
1092 _mapping_clear_map_entry(mt_entry);
1115 mt_entry = &sc->mapping_table[enc_entry->
1120 mt_entry++)
1121 mt_entry->device_info |=
1153 struct dev_mapping_table *mt_entry;
1295 mt_entry = &sc->mapping_table[map_idx];
1297 + map_idx); index++, mt_entry++) {
1298 mt_entry->device_info = MPR_DEV_RESERVED;
1299 mt_entry->physical_id = et_entry->enclosure_id;
1300 mt_entry->phy_bits = et_entry->phy_bits;
1301 mt_entry->missing_count = 0;
1331 struct dev_mapping_table *mt_entry;
1447 mt_entry = &sc->mapping_table[map_idx];
1449 + map_idx); index++, mt_entry++) {
1450 mt_entry->device_info = MPR_DEV_RESERVED;
1451 mt_entry->physical_id = et_entry->enclosure_id;
1452 mt_entry->phy_bits = et_entry->phy_bits;
1453 mt_entry->missing_count = 0;
1470 struct dev_mapping_table *mt_entry;
1475 mt_entry = &sc->mapping_table[start_idx];
1476 for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++)
1477 mt_entry->physical_id = et_entry->enclosure_id;
1585 struct dev_mapping_table *mt_entry;
1644 mt_entry = &sc->mapping_table[map_idx];
1645 mt_entry->physical_id = phy_change->physical_id;
1646 mt_entry->id = map_idx;
1647 mt_entry->dev_handle = phy_change->dev_handle;
1648 mt_entry->missing_count = 0;
1649 mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
1650 mt_entry->device_info = phy_change->device_info |
1701 mt_entry->dpm_entry_num = dpm_idx;
1734 mt_entry = &sc->mapping_table[map_idx];
1736 mt_entry->dpm_entry_num);
1738 mt_entry->init_complete = 0;
1742 mt_entry = &sc->mapping_table[map_idx];
1743 mt_entry->physical_id = phy_change->physical_id;
1744 mt_entry->id = map_idx;
1745 mt_entry->dev_handle = phy_change->dev_handle;
1746 mt_entry->missing_count = 0;
1747 mt_entry->device_info = phy_change->device_info
1759 if (mt_entry->dpm_entry_num !=
1761 dpm_idx = mt_entry->dpm_entry_num;
1779 if ((mt_entry->physical_id ==
1781 mt_entry->init_complete = 1;
1783 mt_entry->init_complete = 0;
1786 mt_entry->init_complete = 0;
1789 !mt_entry->init_complete) {
1790 mt_entry->dpm_entry_num = dpm_idx;
1796 mt_entry->physical_id);
1798 (mt_entry->physical_id >> 32);
1815 mt_entry->init_complete = 1;
1842 struct dev_mapping_table *mt_entry;
1901 mt_entry = &sc->mapping_table[map_idx];
1902 mt_entry->physical_id = port_change->physical_id;
1903 mt_entry->id = map_idx;
1904 mt_entry->dev_handle = port_change->dev_handle;
1905 mt_entry->missing_count = 0;
1906 mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
1907 mt_entry->device_info = port_change->device_info |
1958 mt_entry->dpm_entry_num = dpm_idx;
1991 mt_entry = &sc->mapping_table[map_idx];
1993 mt_entry->dpm_entry_num);
1995 mt_entry->init_complete = 0;
1999 mt_entry = &sc->mapping_table[map_idx];
2000 mt_entry->physical_id =
2002 mt_entry->id = map_idx;
2003 mt_entry->dev_handle = port_change->dev_handle;
2004 mt_entry->missing_count = 0;
2005 mt_entry->device_info =
2018 if (mt_entry->dpm_entry_num !=
2020 dpm_idx = mt_entry->dpm_entry_num;
2038 if ((mt_entry->physical_id ==
2040 mt_entry->init_complete = 1;
2042 mt_entry->init_complete = 0;
2045 mt_entry->init_complete = 0;
2048 !mt_entry->init_complete) {
2049 mt_entry->dpm_entry_num = dpm_idx;
2055 mt_entry->physical_id);
2057 (mt_entry->physical_id >> 32);
2074 mt_entry->init_complete = 1;
2216 struct dev_mapping_table *mt_entry;
2265 mt_entry = &sc->mapping_table[dev_idx];
2266 mt_entry->physical_id =
2268 mt_entry->physical_id = (mt_entry->physical_id << 32) |
2270 mt_entry->id = dev_idx;
2271 mt_entry->missing_count = missing_cnt;
2272 mt_entry->dpm_entry_num = entry_num;
2273 mt_entry->device_info = MPR_DEV_RESERVED;
2329 mt_entry = &sc->mapping_table[dev_idx];
2331 map_idx++, mt_entry++) {
2332 if (mt_entry->dpm_entry_num !=
2342 mt_entry->physical_id = (physical_id << 32) |
2344 mt_entry->phy_bits = phy_bits;
2345 mt_entry->id = dev_idx;
2346 mt_entry->dpm_entry_num = entry_num;
2347 mt_entry->missing_count = missing_cnt;
2348 mt_entry->device_info = MPR_DEV_RESERVED;
2359 mt_entry = &sc->mapping_table[map_idx];
2360 if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
2367 mt_entry->physical_id = (physical_id << 32) |
2369 mt_entry->phy_bits = phy_bits;
2370 mt_entry->id = dev_idx;
2371 mt_entry->missing_count = missing_cnt;
2372 mt_entry->dpm_entry_num = entry_num;
2373 mt_entry->device_info = MPR_DEV_RESERVED;
2408 struct dev_mapping_table *mt_entry;
2458 mt_entry = sc->mapping_table;
2459 for (i = 0; i < sc->max_devices; i++, mt_entry++) {
2460 if (mt_entry->init_complete) {
2510 mt_entry = &sc->mapping_table[start_idx];
2515 mt_entry = sc->mapping_table;
2525 for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) {
2526 if (mt_entry->device_info & MPR_DEV_RESERVED
2527 && !mt_entry->physical_id)
2528 mt_entry->init_complete = 1;
2529 else if (mt_entry->device_info & MPR_DEV_RESERVED) {
2530 if (!mt_entry->init_complete) {
2535 if (mt_entry->missing_count <
2537 mt_entry->missing_count++;
2538 if (mt_entry->dpm_entry_num !=
2541 mt_entry);
2544 mt_entry->init_complete = 1;
2672 struct dev_mapping_table *mt_entry;
2675 mt_entry = &sc->mapping_table[map_idx];
2676 if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
2678 return mt_entry->id;
2711 struct dev_mapping_table *mt_entry;
2714 mt_entry = &sc->mapping_table[start_idx];
2715 for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
2716 if (mt_entry->dev_handle == volHandle &&
2717 mt_entry->physical_id == wwid)
2718 return mt_entry->id;
3018 struct dev_mapping_table *mt_entry;
3070 mt_entry = &sc->mapping_table[map_idx];
3071 mt_entry->id = map_idx;
3072 mt_entry->dev_handle = le16toh
3074 mt_entry->device_info =
3100 mt_entry = &sc->mapping_table[map_idx];
3101 mt_entry->physical_id = wwid_table[i];
3102 mt_entry->id = map_idx;
3103 mt_entry->dev_handle = le16toh(element->
3105 mt_entry->device_info = MPR_DEV_RESERVED |
3134 mt_entry = &sc->mapping_table[map_idx];
3136 element, mt_entry->physical_id);