Lines Matching refs:dm

101 static devfs_rid devfs_rid_input(devfs_rid rid, struct devfs_mount *dm);
104 struct devfs_mount *dm, struct devfs_dirent *de);
105 static void devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm);
110 static int devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm);
112 static int devfs_rule_match(struct devfs_krule *dk, struct devfs_mount *dm,
114 static int devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_mount *dm,
116 static void devfs_rule_run(struct devfs_krule *dk, struct devfs_mount *dm,
120 struct devfs_mount *dm, struct devfs_dirent *de,
123 struct devfs_mount *dm);
127 static int devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm);
138 * lock on dm in case we need to run anything.
141 devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de)
145 sx_assert(&dm->dm_lock, SX_XLOCKED);
147 if (dm->dm_ruleset == 0)
150 ds = devfs_ruleset_bynum(dm->dm_ruleset);
152 devfs_ruleset_applyde(ds, dm, de, devfs_rule_depth);
160 devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td)
170 sx_assert(&dm->dm_lock, SX_XLOCKED);
187 error = devfs_rule_input(dr, dm);
203 error = devfs_rule_input(dr, dm);
228 devfs_rule_applydm(dk, dm);
233 rid = devfs_rid_input(rid, dm);
239 devfs_rule_applydm(dk, dm);
243 rid = devfs_rid_input(rid, dm);
254 error = devfs_rule_input(dr, dm);
283 error = devfs_ruleset_use(rsnum, dm);
287 rsnum = rid2rsn(devfs_rid_input(mkrid(rsnum, 0), dm));
293 devfs_ruleset_applydm(ds, dm);
317 * Adjust the rule identifier to use the ruleset of dm if one isn't
325 devfs_rid_input(devfs_rid rid, struct devfs_mount *dm)
329 return (mkrid(dm->dm_ruleset, rid2rn(rid)));
343 devfs_rule_applyde_recursive(struct devfs_krule *dk, struct devfs_mount *dm,
349 devfs_rule_applyde_recursive(dk, dm, de2);
350 devfs_rule_run(dk, dm, de, devfs_rule_depth);
354 * Apply dk to all entires in dm.
357 devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm)
360 devfs_rule_applyde_recursive(dk, dm, dm->dm_rootdir);
457 devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm)
462 dr->dr_id = devfs_rid_input(dr->dr_id, dm);
532 devfs_rule_match(struct devfs_krule *dk, struct devfs_mount *dm,
566 if (!devfs_rule_matchpath(dk, dm, de))
576 devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_mount *dm,
589 (de->de_dirent->d_type == DT_DIR && de != dm->dm_rootdir &&
592 pname = devfs_fqpn(specname, dm, de, NULL);
606 devfs_rule_run(struct devfs_krule *dk, struct devfs_mount *dm,
612 if (!devfs_rule_match(dk, dm, de))
639 devfs_ruleset_applyde(ds, dm, de, depth - 1);
648 devfs_ruleset_applyde(struct devfs_ruleset *ds, struct devfs_mount *dm,
654 devfs_rule_run(dk, dm, de, depth);
658 * Apply all the rules in ds to all the entires in dm.
661 devfs_ruleset_applydm(struct devfs_ruleset *ds, struct devfs_mount *dm)
669 * foreach(de in dm)
674 * foreach(de in dm)
682 devfs_rule_applydm(dk, dm);
748 * Make rsnum the active ruleset for dm.
751 devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm)
755 if (dm->dm_ruleset != 0) {
756 cds = devfs_ruleset_bynum(dm->dm_ruleset);
762 dm->dm_ruleset = 0;
771 dm->dm_ruleset = rsnum;
777 devfs_rules_cleanup(struct devfs_mount *dm)
781 sx_assert(&dm->dm_lock, SX_XLOCKED);
782 if (dm->dm_ruleset != 0) {
783 ds = devfs_ruleset_bynum(dm->dm_ruleset);
790 * Make rsnum the active ruleset for dm (locked)
793 devfs_ruleset_set(devfs_rsnum rsnum, struct devfs_mount *dm)
796 sx_assert(&dm->dm_lock, SX_XLOCKED);
799 devfs_ruleset_use(rsnum, dm);
807 devfs_ruleset_apply(struct devfs_mount *dm)
811 sx_assert(&dm->dm_lock, SX_XLOCKED);
814 if (dm->dm_ruleset == 0) {
818 ds = devfs_ruleset_bynum(dm->dm_ruleset);
820 devfs_ruleset_applydm(ds, dm);