123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179 |
- # -*- coding: Latin-1 -*-
- """
- The Activity Heap is based on a heap, though allows for reschedules.
- To allow reschedules to happen, a model is accompagnied by a flag to
- indicate whether or not it is still valid.
- As soon as a model is rescheduled, the flag of the previously scheduled
- time is set and another entry is added. This causes the heap to become *dirty*,
- requiring a check for the flag as soon as the first element is requested.
- Due to the possibility for a dirty heap, the heap will be cleaned up as
- soon as the number of invalid elements becomes too high.
- This cleanup method has O(n) complexity and is therefore only
- ran when the heap becomes way too dirty.
- Another problem is that it might consume more memory than other schedulers,
- due to invalid elements being kept in memory.
- However, the actual model and states are not duplicated as they are references.
- The additional memory requirement should not be a problem in most situations.
- The 'activity' part from the name stems from the fact that only models where
- the *time_next* attribute is smaller than infinity will be scheduled.
- Since these elements are not added to the heap, they aren't taken into account
- in the complexity. This allows for severe optimisations in situations where
- a lot of models can be scheduled for infinity.
- Of all provided schedulers, this one is the most mature due to it being the
- oldest and also the default scheduler. It is also applicable in every situation
- and it offers sufficient performance in most cases.
- This scheduler is ideal in situations where (nearly) no reschedules happen
- and where most models transition at a different time.
- It results in slow behaviour in situations requiring lots of rescheduling,
- and thus lots of dirty elements.
- This method is also applied in the VLE simulator and is the common approach
- to heap schedulers that require invalidation. It varies from the scheduler in
- ADEVS due to the heap from the heapq library being used, which doesn't offer
- functions to restructure the heap.
- Reimplementing these methods in pure Python would be unnecessarily slow.
- """
- from heapq import heappush, heappop, heapify
- class Scheduler(object):
- """
- Scheduler class itself
- """
- def __init__(self, models, epsilon, totalModels):
- """
- Constructor
- :param models: all models in the simulation
- """
- self.heap = []
- self.id_fetch = [None] * totalModels
- for model in models:
- if model.time_next[0] != float('inf'):
- self.id_fetch[model.model_id] = [model.time_next, model.model_id, True, model]
- heappush(self.heap, self.id_fetch[model.model_id])
- else:
- self.id_fetch[model.model_id] = [model.time_next, model.model_id, False, model]
-
- self.invalids = 0
- self.maxInvalids = len(models)*2
- self.epsilon = epsilon
- def schedule(self, model):
- """
- Schedule a model
- :param model: the model to schedule
- """
- # Create the entry, as we have accepted the model
- elem = [model.time_next, model.model_id, False, model]
- try:
- self.id_fetch[model.model_id] = elem
- except IndexError:
- # A completely new model
- self.id_fetch.append(elem)
- self.maxInvalids += 2
- # Check if it requires to be scheduled
- if model.time_next[0] != float('inf'):
- self.id_fetch[model.model_id][2] = True
- heappush(self.heap, self.id_fetch[model.model_id])
- def unschedule(self, model):
- """
- Unschedule a model
- :param model: model to unschedule
- """
- if model.time_next != float('inf'):
- self.invalids += 1
- # Update the referece still in the heap
- self.id_fetch[model.model_id][2] = False
- # Remove the reference in our id_fetch
- self.id_fetch[model.model_id] = None
- self.maxInvalids -= 2
- def massReschedule(self, reschedule_set):
- """
- Reschedule all models provided.
- Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
- :param reschedule_set: iterable containing all models to reschedule
- """
- #NOTE rather dirty, though a lot faster for huge models
- inf = float('inf')
- for model in reschedule_set:
- if model.model_id is None:
- continue
- event = self.id_fetch[model.model_id]
- if event[2]:
- if model.time_next == event[0]:
- continue
- elif event[0][0] != inf:
- self.invalids += 1
- event[2] = False
- if model.time_next[0] != inf:
- self.id_fetch[model.model_id] = [model.time_next, model.model_id, True, model]
- heappush(self.heap, self.id_fetch[model.model_id])
- if self.invalids >= self.maxInvalids:
- self.heap = [i for i in self.heap if i[2] and (i[0][0] != inf)]
- heapify(self.heap)
- self.invalids = 0
- def readFirst(self):
- """
- Returns the time of the first model that has to transition
- :returns: timestamp of the first model
- """
- self.cleanFirst()
- return self.heap[0][0]
- def cleanFirst(self):
- """
- Clean up the invalid elements in front of the list
- """
- try:
- while not self.heap[0][2]:
- heappop(self.heap)
- self.invalids -= 1
- except IndexError:
- # Nothing left, so it as clean as can be
- pass
- def getImminent(self, time):
- """
- Returns a list of all models that transition at the provided time, with a specified epsilon deviation allowed.
- :param time: timestamp to check for models
- .. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
- """
- immChildren = []
- t, age = time
- try:
- # Age must be exactly the same
- first = self.heap[0]
- while (first[0][0] <= t) and (first[0][1] == age):
- # Check if the found event is actually still active
- if(first[2]):
- # Active, so event is imminent
- immChildren.append(first[3])
- first[2] = False
- else:
- # Wasn't active, but we will have to pop this to get the next
- # So we can lower the number of invalids
- self.invalids -= 1
- # Advance the while loop
- heappop(self.heap)
- first = self.heap[0]
- except IndexError:
- pass
- return immChildren
|