perfsim.service_chain

Contents

perfsim.service_chain#

Module contents#

Submodules#

perfsim.service_chain.load_balancer module#

class perfsim.service_chain.load_balancer.LoadBalancer(items=[], algorithm='round_robin')[source]#

Bases: object

This class is responsible for load balancing. It can be used to balance the load between multiple items.

Parameters:
  • items (List)

  • algorithm (str)

next()[source]#

Get the next item based on the load balancing algorithm.

Returns:

The next item.

perfsim.service_chain.microservice module#

class perfsim.service_chain.microservice.Microservice(name, endpoint_functions=None, replica_count=0, cpu_requests=-1, cpu_limits=-1, memory_requests=0, ingress_bw=inf, egress_bw=inf, ingress_latency=0, egress_latency=0, blkio_capacity=0, resource_allocation_scenario=None)[source]#

Bases: MicroservicePrototype

This class represents a microservice in a service chain. It has a name, a list of endpoints, a list of replicas, a load balancer, and a resource allocation scenario.

Parameters:
  • name (str)

  • endpoint_functions (Dict[str, MicroserviceEndpointFunction])

  • replica_count (int)

  • cpu_requests (int)

  • cpu_limits (int)

  • memory_requests (int)

  • ingress_bw (int)

  • egress_bw (int)

  • ingress_latency (float)

  • egress_latency (float)

  • blkio_capacity (int)

  • resource_allocation_scenario (ResourceAllocationScenario)

name: str#

The name of the microservice

endpoint_functions: Dict[str, MicroserviceEndpointFunction]#

The list of endpoints of the microservice

memory_requests: int#

The memory request of the microservice

ingress_bw: int#

The ingress bandwidth of the microservice

egress_bw: int#

The egress bandwidth of the microservice

ingress_latency: Union[int, float]#

The ingress latency of the microservice

egress_latency: Union[int, float]#

The egress latency of the microservice

blkio_capacity: int#

The storage capacity of the microservice

resource_allocation_scenario: ResourceAllocationScenario#

The resource allocation scenario of the microservice

load_balancer: LoadBalancer#

The load balancer of the microservice that is responsible for the load balancing of the replicas

classmethod from_prototype(name, prototype, replica_count=0, cpu_requests=-1, cpu_limits=-1, memory_requests=-1, ingress_bw=inf, egress_bw=inf, ingress_latency=0, egress_latency=0, blkio_capacity=-1)[source]#
Parameters:
  • name (str)

  • prototype (MicroservicePrototype)

  • replica_count (int)

  • cpu_requests (int)

  • cpu_limits (int)

  • memory_requests (int)

  • ingress_bw (int)

  • egress_bw (int)

  • ingress_latency (float)

  • egress_latency (float)

  • blkio_capacity (int)

is_best_effort()[source]#
Return type:

bool

is_guaranteed()[source]#
Return type:

bool

is_burstable()[source]#
Return type:

bool

is_unlimited_burstable()[source]#
Return type:

bool

is_limited_burstable()[source]#
Return type:

bool

next_replica(increase_replica_id=True)[source]#
Parameters:

increase_replica_id (bool)

Return type:

MicroserviceReplica

add_microservice_affinity_with(ms)[source]#
Parameters:

ms (Microservice)

Return type:

None

add_host_affinity_with(host)[source]#
Parameters:

host (Host)

Return type:

None

delete_microservice_affinity_with(ms)[source]#
Parameters:

ms (Microservice)

Return type:

None

delete_host_affinity_with(host)[source]#
Parameters:

host (Host)

Return type:

None

add_microservice_anti_affinity_with(ms)[source]#
Parameters:

ms (Microservice)

Return type:

None

add_host_anti_affinity_with(host)[source]#
Parameters:

host (Host)

Return type:

None

delete_microservice_anti_affinity_with(ms)[source]#
Parameters:

ms (Microservice)

Return type:

None

delete_host_anti_affinity_with(host)[source]#
Parameters:

host (Host)

Return type:

None

property ms_affinity_rules#
property ms_antiaffinity_rules#
property host_affinity_rules#
property host_antiaffinity_rules#
property replicas#
property hosts#
property replica_count#
property cpu_requests#
property cpu_limits#

perfsim.service_chain.microservice_endpoint_function module#

class perfsim.service_chain.microservice_endpoint_function.MicroserviceEndpointFunction(name, id, threads_instructions, threads_avg_cpi, threads_avg_cpu_usages, threads_avg_mem_accesses, threads_single_core_isolated_cache_misses, threads_single_core_isolated_cache_refs, threads_avg_cache_miss_penalty, threads_avg_blkio_rw, request_timeout=inf, microservice=None)[source]#

Bases: MicroserviceEndpointFunctionPrototype

Parameters:
  • name (str)

  • id (int)

  • threads_instructions (List[int])

  • threads_avg_cpi (List[float])

  • threads_avg_cpu_usages (List[float])

  • threads_avg_mem_accesses (List[int])

  • threads_single_core_isolated_cache_misses (List[int])

  • threads_single_core_isolated_cache_refs (List[int])

  • threads_avg_cache_miss_penalty (List[float])

  • threads_avg_blkio_rw (List[int])

  • request_timeout (float)

  • microservice (Microservice)

update_name_with_microservice_prefix()[source]#
property microservice#
classmethod from_prototype(name, id, prototype, microservice=None)[source]#
Parameters:

perfsim.service_chain.microservice_endpoint_function_dtype module#

class perfsim.service_chain.microservice_endpoint_function_dtype.MicroserviceEndpointFunctionDtype[source]#

Bases: TypedDict

name: str#
prototype: MicroserviceEndpointFunctionPrototype#

perfsim.service_chain.microservice_replica module#

class perfsim.service_chain.microservice_replica.MicroserviceReplica(name, microservice)[source]#

Bases: object

Parameters:
reinit()[source]#
property host: Host#
property microservice: Microservice#
remove_host_without_eviction()[source]#
Return type:

None

reserve_egress_bw(bw)[source]#
Parameters:

bw (float)

release_egress_bw(bw)[source]#
Parameters:

bw (float)

reserve_ingress_bw(bw)[source]#
Parameters:

bw (float)

release_ingress_bw(bw)[source]#
Parameters:

bw (float)

generate_threads(from_subchain_id, node_in_subchain, replica_identifier_in_subchain, load_balance=False, parent_request=None)[source]#
Parameters:
Return type:

Dict[str, ReplicaThread]

perfsim.service_chain.process module#

class perfsim.service_chain.process.Process(pname, cpu_requests_share, cpu_limits, memory_capacity, ingress_bw, egress_bw, ingress_latency, egress_latency, blkio_capacity, endpoint_functions, ms_replica)[source]#

Bases: object

Process class is used to represent a process in the system.

Parameters:
  • pname (str)

  • cpu_requests_share (int)

  • cpu_limits (int)

  • memory_capacity (int)

  • ingress_bw (int)

  • egress_bw (int)

  • ingress_latency (float)

  • egress_latency (float)

  • blkio_capacity (int)

  • endpoint_functions (Dict[str, MicroserviceEndpointFunction])

  • ms_replica (MicroserviceReplica)

threads: Set[ReplicaThread]#
property active_threads_count#
property original_cpu_requests_share#
property cpu_requests_share#
property cpu_limits#
get_cpu_request_per_thread()[source]#

perfsim.service_chain.replica_thread module#

class perfsim.service_chain.replica_thread.ReplicaThread(process, replica, replica_identifier_in_subchain, node_in_alt_graph, thread_id_in_node, subchain_id, average_load=1, core=None, parent_request=None)[source]#

Bases: Observable

This class represents a thread of execution of a microservice replica.

Parameters:
before_killing_thread: str#

The event that is going to be notified before killing a thread.

before_executing_thread: str#

The event that is going to be notified before executing a thread.

after_executing_thread: str#

The event that is going to be notified after executing a thread.

replica: MicroserviceReplica#

Replica that the thread belongs to.

replica_identifier_in_subchain: int#

Replica’s identifier in the subchain.

property node_in_alt_graph: Tuple[int, MicroserviceEndpointFunction]#
property thread_id_in_node: int#
set_node_in_alt_graph(node, thread_id_in_node)[source]#
Parameters:
register_events()[source]#

This is for performance optimization purposes. Instead of generating strings for each event, we can register the event_names as attributes, then we send the event_name as reference, instead of copying the string every time. This (slightly) improves performance, specially because we are calling the notify_observers method several times for each request during the simulation.

Returns:

kill()[source]#
Return type:

None

is_runnable()[source]#
exec(duration, simultaneous_flag=False)[source]#
Parameters:
  • duration (int)

  • simultaneous_flag (bool)

Return type:

int

get_best_effort_cpu_requests_share()[source]#
Return type:

int

get_relative_guaranteed_cpu_requests_share()[source]#
Return type:

int

get_exec_time_on_rq()[source]#
Return type:

float

property instructions#
property process#
property on_rq#
property core#
property vruntime#
property load#
property cpu_requests_share#
property cpu_limits#

perfsim.service_chain.service_chain module#

class perfsim.service_chain.service_chain.ServiceChain(name, nodes=None, edges=None, incoming_graph_data=None, **attr)[source]#

Bases: MultiDiGraph

Initialize a graph with edges, name, or graph attributes.

Parameters#

incoming_graph_datainput graph

Data to initialize graph. If incoming_graph_data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a 2D NumPy array, a SciPy sparse array, or a PyGraphviz graph.

multigraph_inputbool or None (default None)

Note: Only used when incoming_graph_data is a dict. If True, incoming_graph_data is assumed to be a dict-of-dict-of-dict-of-dict structure keyed by node to neighbor to edge keys to edge data for multi-edges. A NetworkXError is raised if this is not the case. If False, to_networkx_graph() is used to try to determine the dict’s graph data structure as either a dict-of-dict-of-dict keyed by node to neighbor to edge data, or a dict-of-iterable keyed by node to neighbors. If None, the treatment for True is tried, but if it fails, the treatment for False is tried.

attrkeyword arguments, optional (default= no attributes)

Attributes to add to graph as key=value pairs.

See Also#

convert

Examples#

>>> G = nx.Graph()  # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)]  # list of edges
>>> G = nx.Graph(e)

Arbitrary graph attribute pairs (key=value) may be assigned

>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
param name:

type name:

str

param nodes:

type nodes:

List[MicroserviceEndpointFunction]

param edges:

type edges:

List[ServiceChainLink]

microservices_dict: Dict[str, Microservice]#

microservices_dict is a dictionary of microservices in the service chain

add_nodes_from(nodes_for_adding, **attr)[source]#

Add nodes from a list of nodes.

Parameters:
Returns:

None

add_edges_from(ebunch_to_add, **attr)[source]#

Add edges from a list of edges.

Parameters:
Returns:

add_node(node_for_adding, **attr)[source]#

Add a node to the service chain.

Parameters:
  • node_for_adding

  • attr

Returns:

static copy_to_dict(service_chains)[source]#

Copy service chains to a dictionary.

Parameters:

service_chains (Union[List[ServiceChain], Dict[str, ServiceChain]])

Return type:

Tuple[Dict[str, ServiceChain], Dict[str, Microservice]]

Returns:

static microservices_to_dict_from_list(service_chains)[source]#

Convert a list of service chains to a dictionary of microservices.

Parameters:

service_chains (list[ServiceChain]) – The list of service chains.

Return type:

dict[str, Microservice]

Returns:

The dictionary of microservices.

static microservices_to_dict_from_dict(service_chains)[source]#

Convert a dictionary of service chains to a dictionary of microservices.

Parameters:

service_chains (dict[str, ServiceChain]) – The dictionary of service chains.

Return type:

dict[str, Microservice]

Returns:

The dictionary of microservices.

static from_config(conf, microservice_prototypes_dict)[source]#

Create a service chain from a configuration.

Parameters:
  • conf (dict)

  • microservice_prototypes_dict

Return type:

dict[str, ServiceChain]

Returns:

Parameters:

perfsim.service_chain.service_chain_manager module#

class perfsim.service_chain.service_chain_manager.ServiceChainManager(name, service_chain)[source]#

Bases: object

Parameters:
alternative_graph: DiGraph#

The alternative graph for the service chain, with nodes as a tuple of (node number, MicroserviceEndpointFunction)

name: str#

The service chain manager name

subchains: List[List[Tuple[int, MicroserviceEndpointFunction]]]#

List of subchains (index as subchain ID) including their nodes as list of tuples

node_subchain_id_map: Dict[Tuple[int, MicroserviceEndpointFunction], int]#

Dictionary of subchain IDs (as values) and tuple of (node number, MicroserviceEndpointFunction) as keys

property root#
generate_alternative_graph()[source]#
extract_subchains(current_node, subchain_id, append=False)[source]#
draw_service_chain(save_dir=None, with_labels=False)[source]#
Parameters:
  • save_dir (str)

  • with_labels (bool)

draw_alternative_graph(save_dir=None, with_labels=False, relabel=False)[source]#
Parameters:
  • save_dir (str)

  • with_labels (bool)

  • relabel (bool)

static get_copy(G)[source]#

perfsim.service_chain.service_chain_result_dict module#

class perfsim.service_chain.service_chain_result_dict.ServiceChainResultDict[source]#

Bases: TypedDict

simulation_name: str#
estimated_cost: Union[int, float]#
total_requests: int#
successful_requests: int#
timeout_requests: int#
avg_latency: float#
throughput: Dict[Interval, int]#
arrival_times: ServiceChainResultIterationDict#
latencies: ServiceChainResultIterationDict#
completion_times: ServiceChainResultIterationDict#
traffic_types: ServiceChainResultIterationDict#

perfsim.service_chain.service_chain_result_iteration_dict module#

class perfsim.service_chain.service_chain_result_iteration_dict.ServiceChainResultIterationDict[source]#

Bases: TypedDict

iterations: Dict[int, Dict[str, int]]#

perfsim.service_chain.thread_set module#

class perfsim.service_chain.thread_set.ThreadSet(type_of_set, *args, **kwargs)[source]#

Bases: set

ThreadSet class is used to represent a set of threads in a RunQueue.

Parameters:

type_of_set (int)

sum_cpu_requests: int#
The type of the set:

0: BestEffort 1: Guaranteed 2: Burstable 3: Burstable unlimited 4: Burstable limited

property type_of_set: int#
add(thread)[source]#

Add an element to a set.

This has no effect if the element is already present.

Parameters:

thread (ReplicaThread)

Return type:

None

remove(thread)[source]#

Remove an element from a set; it must be a member.

If the element is not a member, raise a KeyError.

Parameters:

thread (ReplicaThread)

Return type:

None

recalculate_sum_cpu_requests()[source]#

Recalculate the sum of cpu requests of all threads in the set.

Return type:

int

Returns:

The sum of cpu requests of all threads in the set.