1/*  Title:      Pure/PIDE/headless.scala
2    Author:     Makarius
3
4Headless PIDE session and resources from file-system.
5*/
6
7package isabelle
8
9
10import java.io.{File => JFile}
11
12import scala.annotation.tailrec
13import scala.collection.mutable
14
15
16object Headless
17{
18  /** session **/
19
20  private def stable_snapshot(
21    state: Document.State, version: Document.Version, name: Document.Node.Name): Document.Snapshot =
22  {
23    val snapshot = state.snapshot(name)
24    assert(version.id == snapshot.version.id)
25    snapshot
26  }
27
28  class Use_Theories_Result private[Headless](
29    val state: Document.State,
30    val version: Document.Version,
31    val nodes: List[(Document.Node.Name, Document_Status.Node_Status)],
32    val nodes_committed: List[(Document.Node.Name, Document_Status.Node_Status)])
33  {
34    def nodes_pending: List[(Document.Node.Name, Document_Status.Node_Status)] =
35    {
36      val committed = nodes_committed.iterator.map(_._1).toSet
37      nodes.filter(p => !committed(p._1))
38    }
39
40    def snapshot(name: Document.Node.Name): Document.Snapshot =
41      stable_snapshot(state, version, name)
42
43    def ok: Boolean =
44      (nodes.iterator ++ nodes_committed.iterator).forall({ case (_, st) => st.ok })
45  }
46
47  class Session private[Headless](
48    session_name: String,
49    _session_options: => Options,
50    override val resources: Resources) extends isabelle.Session(_session_options, resources)
51  {
52    session =>
53
54
55    private def loaded_theory(name: Document.Node.Name): Boolean =
56      resources.session_base.loaded_theory(name.theory)
57
58
59    /* options */
60
61    override def consolidate_delay: Time = session_options.seconds("headless_consolidate_delay")
62    override def prune_delay: Time = session_options.seconds("headless_prune_delay")
63
64    def default_check_delay: Time = session_options.seconds("headless_check_delay")
65    def default_check_limit: Int = session_options.int("headless_check_limit")
66    def default_nodes_status_delay: Time = session_options.seconds("headless_nodes_status_delay")
67    def default_watchdog_timeout: Time = session_options.seconds("headless_watchdog_timeout")
68    def default_commit_cleanup_delay: Time = session_options.seconds("headless_commit_cleanup_delay")
69
70
71    /* temporary directory */
72
73    val tmp_dir: JFile = Isabelle_System.tmp_dir("server_session")
74    val tmp_dir_name: String = File.path(tmp_dir).implode
75
76    def master_directory(master_dir: String): String =
77      proper_string(master_dir) getOrElse tmp_dir_name
78
79    override def toString: String = session_name
80
81    override def stop(): Process_Result =
82    {
83      try { super.stop() }
84      finally { Isabelle_System.rm_tree(tmp_dir) }
85    }
86
87
88    /* theories */
89
90    private object Load_State
91    {
92      def finished: Load_State = Load_State(Nil, Nil, 0)
93
94      def count_file(name: Document.Node.Name): Long =
95        if (loaded_theory(name)) 0 else name.path.file.length
96    }
97
98    private case class Load_State(
99      pending: List[Document.Node.Name], rest: List[Document.Node.Name], load_limit: Long)
100    {
101      def next(
102        dep_graph: Document.Node.Name.Graph[Unit],
103        finished: Document.Node.Name => Boolean): (List[Document.Node.Name], Load_State) =
104      {
105        def load_requirements(pending1: List[Document.Node.Name], rest1: List[Document.Node.Name])
106          : (List[Document.Node.Name], Load_State) =
107        {
108          val load_theories = dep_graph.all_preds(pending1).reverse.filterNot(finished)
109          (load_theories, Load_State(pending1, rest1, load_limit))
110        }
111
112        if (!pending.forall(finished)) (Nil, this)
113        else if (rest.isEmpty) (Nil, Load_State.finished)
114        else if (load_limit == 0) load_requirements(rest, Nil)
115        else {
116          val reachable =
117            dep_graph.reachable_limit(load_limit, Load_State.count_file, dep_graph.imm_preds, rest)
118          val (pending1, rest1) = rest.partition(reachable)
119          load_requirements(pending1, rest1)
120        }
121      }
122    }
123
124    private sealed case class Use_Theories_State(
125      dep_graph: Document.Node.Name.Graph[Unit],
126      load_state: Load_State,
127      watchdog_timeout: Time,
128      commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit],
129      last_update: Time = Time.now(),
130      nodes_status: Document_Status.Nodes_Status = Document_Status.Nodes_Status.empty,
131      already_committed: Map[Document.Node.Name, Document_Status.Node_Status] = Map.empty,
132      result: Option[Exn.Result[Use_Theories_Result]] = None)
133    {
134      def update(new_nodes_status: Document_Status.Nodes_Status): Use_Theories_State =
135        copy(last_update = Time.now(), nodes_status = new_nodes_status)
136
137      def watchdog: Boolean =
138        watchdog_timeout > Time.zero && Time.now() - last_update > watchdog_timeout
139
140      def finished_result: Boolean = result.isDefined
141
142      def join_result: Option[(Exn.Result[Use_Theories_Result], Use_Theories_State)] =
143        if (finished_result) Some((result.get, this)) else None
144
145      def cancel_result: Use_Theories_State =
146        if (finished_result) this else copy(result = Some(Exn.Exn(Exn.Interrupt())))
147
148      def clean_theories: (List[Document.Node.Name], Use_Theories_State) =
149      {
150        @tailrec def frontier(base: List[Document.Node.Name], front: Set[Document.Node.Name])
151          : Set[Document.Node.Name] =
152        {
153          val add = base.filter(name => dep_graph.imm_succs(name).forall(front))
154          if (add.isEmpty) front
155          else {
156            val preds = add.map(dep_graph.imm_preds)
157            val base1 = (preds.head /: preds.tail)(_ ++ _).toList.filter(already_committed.keySet)
158            frontier(base1, front ++ add)
159          }
160        }
161
162        if (already_committed.isEmpty) (Nil, this)
163        else {
164          val base =
165            (for {
166              (name, (_, (_, succs))) <- dep_graph.iterator
167              if succs.isEmpty && already_committed.isDefinedAt(name)
168            } yield name).toList
169          val clean = frontier(base, Set.empty)
170          if (clean.isEmpty) (Nil, this)
171          else {
172            (dep_graph.topological_order.filter(clean),
173              copy(dep_graph = dep_graph.exclude(clean)))
174          }
175        }
176      }
177
178      def check(state: Document.State, version: Document.Version, beyond_limit: Boolean)
179        : (List[Document.Node.Name], Use_Theories_State) =
180      {
181        val already_committed1 =
182          commit match {
183            case None => already_committed
184            case Some(commit_fn) =>
185              (already_committed /: dep_graph.topological_order)(
186                { case (committed, name) =>
187                    def parents_committed: Boolean =
188                      version.nodes(name).header.imports.forall(parent =>
189                        loaded_theory(parent) || committed.isDefinedAt(parent))
190                    if (!committed.isDefinedAt(name) && parents_committed &&
191                        state.node_consolidated(version, name))
192                    {
193                      val snapshot = stable_snapshot(state, version, name)
194                      val status = Document_Status.Node_Status.make(state, version, name)
195                      commit_fn(snapshot, status)
196                      committed + (name -> status)
197                    }
198                    else committed
199                })
200          }
201
202        def finished_theory(name: Document.Node.Name): Boolean =
203          loaded_theory(name) ||
204          (if (commit.isDefined) already_committed1.isDefinedAt(name)
205           else state.node_consolidated(version, name))
206
207        val result1 =
208          if (!finished_result &&
209            (beyond_limit || watchdog ||
210              dep_graph.keys_iterator.forall(name =>
211                finished_theory(name) || nodes_status.quasi_consolidated(name))))
212          {
213            val nodes =
214              (for {
215                name <- dep_graph.keys_iterator
216                if !loaded_theory(name)
217              } yield { (name -> Document_Status.Node_Status.make(state, version, name)) }).toList
218            val nodes_committed =
219              (for {
220                name <- dep_graph.keys_iterator
221                status <- already_committed1.get(name)
222              } yield (name -> status)).toList
223            Some(Exn.Res(new Use_Theories_Result(state, version, nodes, nodes_committed)))
224          }
225          else result
226
227        val (load_theories, load_state1) = load_state.next(dep_graph, finished_theory(_))
228
229        (load_theories,
230          copy(already_committed = already_committed1, result = result1, load_state = load_state1))
231      }
232    }
233
234    def use_theories(
235      theories: List[String],
236      qualifier: String = Sessions.DRAFT,
237      master_dir: String = "",
238      unicode_symbols: Boolean = false,
239      check_delay: Time = default_check_delay,
240      check_limit: Int = default_check_limit,
241      watchdog_timeout: Time = default_watchdog_timeout,
242      nodes_status_delay: Time = default_nodes_status_delay,
243      id: UUID.T = UUID.random(),
244      // commit: must not block, must not fail
245      commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit] = None,
246      commit_cleanup_delay: Time = default_commit_cleanup_delay,
247      progress: Progress = No_Progress): Use_Theories_Result =
248    {
249      val dependencies =
250      {
251        val import_names =
252          theories.map(thy =>
253            resources.import_name(qualifier, master_directory(master_dir), thy) -> Position.none)
254        resources.dependencies(import_names, progress = progress).check_errors
255      }
256      val dep_theories = dependencies.theories
257      val dep_theories_set = dep_theories.toSet
258      val dep_files =
259        dependencies.loaded_files(false).flatMap(_._2).
260          map(path => Document.Node.Name(resources.append("", path)))
261
262      val use_theories_state =
263      {
264        val dep_graph = dependencies.theory_graph
265
266        val maximals = dep_graph.maximals
267        val rest =
268          if (maximals.isEmpty || maximals.tail.isEmpty) maximals
269          else {
270            val depth = dep_graph.node_depth(Load_State.count_file)
271            maximals.sortBy(node => - depth(node))
272          }
273        val load_limit =
274          if (commit.isDefined) (session_options.real("headless_load_limit") * 1024 * 1024).round
275          else 0
276        val load_state = Load_State(Nil, rest, load_limit)
277
278        Synchronized(Use_Theories_State(dep_graph, load_state, watchdog_timeout, commit))
279      }
280
281      def check_state(beyond_limit: Boolean = false)
282      {
283        val state = session.get_state()
284        for {
285          version <- state.stable_tip_version
286          load_theories = use_theories_state.change_result(_.check(state, version, beyond_limit))
287          if load_theories.nonEmpty
288        } resources.load_theories(session, id, load_theories, dep_files, unicode_symbols, progress)
289      }
290
291      val check_progress =
292      {
293        var check_count = 0
294        Event_Timer.request(Time.now(), repeat = Some(check_delay))
295          {
296            if (progress.stopped) use_theories_state.change(_.cancel_result)
297            else {
298              check_count += 1
299              check_state(check_limit > 0 && check_count > check_limit)
300            }
301          }
302      }
303
304      val consumer =
305      {
306        val delay_nodes_status =
307          Standard_Thread.delay_first(nodes_status_delay max Time.zero) {
308            progress.nodes_status(use_theories_state.value.nodes_status)
309          }
310
311        val delay_commit_clean =
312          Standard_Thread.delay_first(commit_cleanup_delay max Time.zero) {
313            val clean_theories = use_theories_state.change_result(_.clean_theories)
314            if (clean_theories.nonEmpty) {
315              progress.echo("Removing " + clean_theories.length + " theories ...")
316              resources.clean_theories(session, id, clean_theories)
317            }
318          }
319
320        Session.Consumer[Session.Commands_Changed](getClass.getName) {
321          case changed =>
322            if (changed.nodes.exists(dep_theories_set)) {
323              val snapshot = session.snapshot()
324              val state = snapshot.state
325              val version = snapshot.version
326
327              val theory_progress =
328                use_theories_state.change_result(st =>
329                  {
330                    val domain =
331                      if (st.nodes_status.is_empty) dep_theories_set
332                      else changed.nodes.iterator.filter(dep_theories_set).toSet
333
334                    val (nodes_status_changed, nodes_status1) =
335                      st.nodes_status.update(resources, state, version,
336                        domain = Some(domain), trim = changed.assignment)
337
338                    if (nodes_status_delay >= Time.zero && nodes_status_changed) {
339                      delay_nodes_status.invoke
340                    }
341
342                    val theory_progress =
343                      (for {
344                        (name, node_status) <- nodes_status1.present.iterator
345                        if changed.nodes.contains(name) && !st.already_committed.isDefinedAt(name)
346                        p1 = node_status.percentage
347                        if p1 > 0 && Some(p1) != st.nodes_status.get(name).map(_.percentage)
348                      } yield Progress.Theory(name.theory, percentage = Some(p1))).toList
349
350                    (theory_progress, st.update(nodes_status1))
351                  })
352
353              theory_progress.foreach(progress.theory(_))
354
355              check_state()
356
357              if (commit.isDefined && commit_cleanup_delay > Time.zero) {
358                if (use_theories_state.value.finished_result)
359                  delay_commit_clean.revoke
360                else delay_commit_clean.invoke
361              }
362            }
363        }
364      }
365
366      try {
367        session.commands_changed += consumer
368        check_state()
369        use_theories_state.guarded_access(_.join_result)
370        check_progress.cancel
371      }
372      finally {
373        session.commands_changed -= consumer
374        resources.unload_theories(session, id, dep_theories)
375      }
376
377      Exn.release(use_theories_state.guarded_access(_.join_result))
378    }
379
380    def purge_theories(
381      theories: List[String],
382      qualifier: String = Sessions.DRAFT,
383      master_dir: String = "",
384      all: Boolean = false): (List[Document.Node.Name], List[Document.Node.Name]) =
385    {
386      val nodes =
387        if (all) None
388        else Some(theories.map(resources.import_name(qualifier, master_directory(master_dir), _)))
389      resources.purge_theories(session, nodes)
390    }
391  }
392
393
394
395  /** resources **/
396
397  object Resources
398  {
399    def apply(base_info: Sessions.Base_Info, log: Logger = No_Logger): Resources =
400      new Resources(base_info, log = log)
401
402    def make(
403      options: Options,
404      session_name: String,
405      session_dirs: List[Path] = Nil,
406      include_sessions: List[String] = Nil,
407      progress: Progress = No_Progress,
408      log: Logger = No_Logger): Resources =
409    {
410      val base_info =
411        Sessions.base_info(options, session_name, dirs = session_dirs,
412          include_sessions = include_sessions, progress = progress)
413      apply(base_info, log = log)
414    }
415
416    final class Theory private[Headless](
417      val node_name: Document.Node.Name,
418      val node_header: Document.Node.Header,
419      val text: String,
420      val node_required: Boolean)
421    {
422      override def toString: String = node_name.toString
423
424      def node_perspective: Document.Node.Perspective_Text =
425        Document.Node.Perspective(node_required, Text.Perspective.empty, Document.Node.Overlays.empty)
426
427      def make_edits(text_edits: List[Text.Edit]): List[Document.Edit_Text] =
428        List(node_name -> Document.Node.Deps(node_header),
429          node_name -> Document.Node.Edits(text_edits),
430          node_name -> node_perspective)
431
432      def node_edits(old: Option[Theory]): List[Document.Edit_Text] =
433      {
434        val (text_edits, old_required) =
435          if (old.isEmpty) (Text.Edit.inserts(0, text), false)
436          else (Text.Edit.replace(0, old.get.text, text), old.get.node_required)
437
438        if (text_edits.isEmpty && node_required == old_required) Nil
439        else make_edits(text_edits)
440      }
441
442      def purge_edits: List[Document.Edit_Text] =
443        make_edits(Text.Edit.removes(0, text))
444
445      def required(required: Boolean): Theory =
446        if (required == node_required) this
447        else new Theory(node_name, node_header, text, required)
448    }
449
450    sealed case class State(
451      blobs: Map[Document.Node.Name, Document.Blob] = Map.empty,
452      theories: Map[Document.Node.Name, Theory] = Map.empty,
453      required: Multi_Map[Document.Node.Name, UUID.T] = Multi_Map.empty)
454    {
455      /* blobs */
456
457      def doc_blobs: Document.Blobs = Document.Blobs(blobs)
458
459      def update_blobs(names: List[Document.Node.Name]): (Document.Blobs, State) =
460      {
461        val new_blobs =
462          names.flatMap(name =>
463          {
464            val bytes = Bytes.read(name.path)
465            def new_blob: Document.Blob =
466            {
467              val text = bytes.text
468              Document.Blob(bytes, text, Symbol.Text_Chunk(text), changed = true)
469            }
470            blobs.get(name) match {
471              case Some(blob) => if (blob.bytes == bytes) None else Some(name -> new_blob)
472              case None => Some(name -> new_blob)
473            }
474          })
475        val blobs1 = (blobs /: new_blobs)(_ + _)
476        val blobs2 = (blobs /: new_blobs)({ case (map, (a, b)) => map + (a -> b.unchanged) })
477        (Document.Blobs(blobs1), copy(blobs = blobs2))
478      }
479
480      def blob_edits(name: Document.Node.Name, old_blob: Option[Document.Blob])
481        : List[Document.Edit_Text] =
482      {
483        val blob = blobs.getOrElse(name, error("Missing blob " + quote(name.toString)))
484        val text_edits =
485          old_blob match {
486            case None => List(Text.Edit.insert(0, blob.source))
487            case Some(blob0) => Text.Edit.replace(0, blob0.source, blob.source)
488          }
489        if (text_edits.isEmpty) Nil
490        else List(name -> Document.Node.Blob(blob), name -> Document.Node.Edits(text_edits))
491      }
492
493
494      /* theories */
495
496      lazy val theory_graph: Document.Node.Name.Graph[Unit] =
497        Document.Node.Name.make_graph(
498          for ((name, theory) <- theories.toList)
499          yield ((name, ()), theory.node_header.imports.filter(theories.isDefinedAt(_))))
500
501      def is_required(name: Document.Node.Name): Boolean = required.isDefinedAt(name)
502
503      def insert_required(id: UUID.T, names: List[Document.Node.Name]): State =
504        copy(required = (required /: names)(_.insert(_, id)))
505
506      def remove_required(id: UUID.T, names: List[Document.Node.Name]): State =
507        copy(required = (required /: names)(_.remove(_, id)))
508
509      def update_theories(update: List[(Document.Node.Name, Theory)]): State =
510        copy(theories =
511          (theories /: update)({ case (thys, (name, thy)) =>
512            thys.get(name) match {
513              case Some(thy1) if thy1 == thy => thys
514              case _ => thys + (name -> thy)
515            }
516          }))
517
518      def remove_theories(remove: List[Document.Node.Name]): State =
519      {
520        require(remove.forall(name => !is_required(name)))
521        copy(theories = theories -- remove)
522      }
523
524      def unload_theories(session: Session, id: UUID.T, theories: List[Document.Node.Name])
525        : (List[Document.Edit_Text], State) =
526      {
527        val st1 = remove_required(id, theories)
528        val theory_edits =
529          for {
530            node_name <- theories
531            theory <- st1.theories.get(node_name)
532          }
533          yield {
534            val theory1 = theory.required(st1.is_required(node_name))
535            val edits = theory1.node_edits(Some(theory))
536            (edits, (node_name, theory1))
537          }
538        (theory_edits.flatMap(_._1), st1.update_theories(theory_edits.map(_._2)))
539      }
540
541      def purge_theories(session: Session, nodes: Option[List[Document.Node.Name]])
542        : ((List[Document.Node.Name], List[Document.Node.Name], List[Document.Edit_Text]), State) =
543      {
544        val all_nodes = theory_graph.topological_order
545        val purge = nodes.getOrElse(all_nodes).filterNot(is_required(_)).toSet
546
547        val retain = theory_graph.all_preds(all_nodes.filterNot(purge)).toSet
548        val (retained, purged) = all_nodes.partition(retain)
549        val purge_edits = purged.flatMap(name => theories(name).purge_edits)
550
551        ((purged, retained, purge_edits), remove_theories(purged))
552      }
553    }
554  }
555
556  class Resources private[Headless](
557      val session_base_info: Sessions.Base_Info,
558      log: Logger = No_Logger)
559    extends isabelle.Resources(
560      session_base_info.sessions_structure, session_base_info.check_base, log = log)
561  {
562    resources =>
563
564    def options: Options = session_base_info.options
565
566
567    /* session */
568
569    def start_session(print_mode: List[String] = Nil, progress: Progress = No_Progress): Session =
570    {
571      val session = new Session(session_base_info.session, options, resources)
572
573      val session_error = Future.promise[String]
574      var session_phase: Session.Consumer[Session.Phase] = null
575      session_phase =
576        Session.Consumer(getClass.getName) {
577          case Session.Ready =>
578            session.phase_changed -= session_phase
579            session_error.fulfill("")
580          case Session.Terminated(result) if !result.ok =>
581            session.phase_changed -= session_phase
582            session_error.fulfill("Session start failed: return code " + result.rc)
583          case _ =>
584        }
585      session.phase_changed += session_phase
586
587      progress.echo("Starting session " + session_base_info.session + " ...")
588      Isabelle_Process.start(session, options,
589        logic = session_base_info.session, dirs = session_base_info.dirs, modes = print_mode)
590
591      session_error.join match {
592        case "" => session
593        case msg => session.stop(); error(msg)
594      }
595    }
596
597
598    /* theories */
599
600    private val state = Synchronized(Resources.State())
601
602    def load_theories(
603      session: Session,
604      id: UUID.T,
605      theories: List[Document.Node.Name],
606      files: List[Document.Node.Name],
607      unicode_symbols: Boolean,
608      progress: Progress)
609    {
610      val loaded_theories =
611        for (node_name <- theories)
612        yield {
613          val path = node_name.path
614          if (!node_name.is_theory) error("Not a theory file: " + path)
615
616          progress.expose_interrupt()
617          val text0 = File.read(path)
618          val text = if (unicode_symbols) Symbol.decode(text0) else text0
619          val node_header = resources.check_thy_reader(node_name, Scan.char_reader(text))
620          new Resources.Theory(node_name, node_header, text, true)
621        }
622
623      val loaded = loaded_theories.length
624      if (loaded > 1) progress.echo("Loading " + loaded + " theories ...")
625
626      state.change(st =>
627        {
628          val (doc_blobs1, st1) = st.insert_required(id, theories).update_blobs(files)
629          val theory_edits =
630            for (theory <- loaded_theories)
631            yield {
632              val node_name = theory.node_name
633              val theory1 = theory.required(st1.is_required(node_name))
634              val edits = theory1.node_edits(st1.theories.get(node_name))
635              (edits, (node_name, theory1))
636            }
637          val file_edits =
638            for { node_name <- files if doc_blobs1.changed(node_name) }
639            yield st1.blob_edits(node_name, st.blobs.get(node_name))
640
641          session.update(doc_blobs1, theory_edits.flatMap(_._1) ::: file_edits.flatten)
642          st1.update_theories(theory_edits.map(_._2))
643        })
644    }
645
646    def unload_theories(session: Session, id: UUID.T, theories: List[Document.Node.Name])
647    {
648      state.change(st =>
649      {
650        val (edits, st1) = st.unload_theories(session, id, theories)
651        session.update(st.doc_blobs, edits)
652        st1
653      })
654    }
655
656    def clean_theories(session: Session, id: UUID.T, theories: List[Document.Node.Name])
657    {
658      state.change(st =>
659      {
660        val (edits1, st1) = st.unload_theories(session, id, theories)
661        val ((_, _, edits2), st2) = st1.purge_theories(session, None)
662        session.update(st.doc_blobs, edits1 ::: edits2)
663        st2
664      })
665    }
666
667    def purge_theories(session: Session, nodes: Option[List[Document.Node.Name]])
668      : (List[Document.Node.Name], List[Document.Node.Name]) =
669    {
670      state.change_result(st =>
671      {
672        val ((purged, retained, _), st1) = st.purge_theories(session, nodes)
673        ((purged, retained), st1)
674      })
675    }
676  }
677}
678