ANAZMRJNM2021

Citation Author(s):
Ali
Nouruzi
Abolfazl
Zakeri
Mohammad Reza
Javan
Nader
Mokari
Submitted by:
Mohammad Reza Javan
Last updated:
Thu, 04/15/2021 - 03:40
DOI:
10.21227/r1j8-tc84
License:
0
0 ratings - Please login to submit your rating.

Abstract 

# -*- coding: utf-8 -*-

"""

Created on Wed Feb 26 11:19:38 2020

 

@author: ali nouruzi

"""

 

import numpy as np

import random

import sys

import os

import scipy

import networkx as nx

from collections import deque

from keras.models import Sequential

from keras.layers import Dense

from keras.optimizers import Adam

from matplotlib import pyplot as plt

from timeit import default_timer as timer

 

 

number_function=5

number_node=10

number_service=5

number_user=5

Episode=2000

Ts=1000

L=1000

mu=240

batch_size = 8

number_VM=6

 

    #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

#service random creation

class service:

    def service_specify(number_node,number_service):

        K=np.zeros([number_service,5])

        tt=[100, 500]

        bandwidth=[.064, .1, 1, 4]

        for i in range(number_service):

            K[i,0]=5                                               #    number function

            K[i,1]=np.random.randint(1,number_node,1)          #    ingres node of service

            K[i,2]=np.random.randint(1,number_node,1)              #    Engress node of servuce

            K[i,3]= tt[np.random.randint(len(tt))]                 #    tolorable time

            temp_band=bandwidth[np.random.randint(len(bandwidth))] #    bandwidth

            K[i,4]=temp_band

            # if K[i,3]==60:

            #     K[i,1]=K[i,2]

 

        return K

 

    #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

class Graph:

 

      def G_generator(number_node):

            yale=0

            g=np.zeros([number_node,number_node])

 

            if number_node>10:

                con=4

            else:

                con=6

 

            for n in range(number_node):

                    if np.sum(g[n,:])<1:

                        temp_l=np.random.randint(1,con)

                        for y in range(temp_l):

                            temp_n=np.random.randint(0,number_node)

                            g[n,temp_n]=1

 

 

            g=g+ np.transpose(g)

            x,y =np.where(g>1)

            g[x,y]=1

 

            g=g-np.eye(number_node)

            x,y = np.where(g<1)

            g[x,y]=0

 

            yale = np.sum(g)

 

            G = nx.from_numpy_matrix(np.array(g))

            nx.draw(G, with_labels=True)

            print(yale)

            return g,G

 

 

      def find_all_path(G,s,d,number_node):

        memory_paths_all=[]

        for s in range(0, number_node):

            for d in range(0, number_node):

                for path in nx.all_simple_paths(G, source=s, target=d):

                    # print(path)

                    memory_paths_all.append(path)

 

        return memory_paths_all

 

      def find_all_path_s_d(G,s,d):

        memory_path=[]

        for path in nx.all_simple_paths(G, source=s, target=d):

            memory_path.append(path)

 

        return memory_path

 

 

      def shortest_path(G,s,d):

         pth=nx.shortest_path(G,s,d)

 

         return pth

 

      def link_indicator(g,Cap_link,W_link,s,d):

        link=np.zeros([number_node,number_node])

        temp_w=10000/(Cap_link+1)*W_link

        temp_w=np.round(temp_w)

        G=temp_w*g

        G=nx.from_numpy_matrix(G)

 

        pth=nx.shortest_path(G,s,d,weight='weight',method='dijkstra')

 

        il=0

        while il !=len(pth)-1:

            si=pth[il]

            di=pth[il+1]

            si=np.int64(si)

            di=np.int64(di)

 

            link[si,di]=1

            il = il+1

 

          # For symmetric

        link=link+np.transpose(link)

        i,j =np.where(link>1)

        link[i,j]=1

 

        link = link - np.eye(number_node)

        i,j =np.where(link<0)

        link[i,j]=0

 

        return link

 

 

      def cap_weight_delay(g,number_node):

         ##  weghith of links

           W_link=5*np.random.rand(number_node,number_node)

           W_link=W_link*np.transpose(W_link)

           W_link=np.round(W_link)+50

           W_link=g*W_link

 

          #   weghith of nodes

           W_node=np.round(20.*np.random.rand(1,number_node))+50

 

          # Cap links

           cl=np.round(40.*(np.random.rand(number_node,number_node)+1))

           cl=cl*np.transpose(cl)

           Cap_link=g*cl

 

         # Cap node

           Cap_node=np.round(600.*(10.*np.random.rand(1,number_node)+2))

           D_link=1.5 + np.random.rand(number_node ,number_node)

           D_link=D_link*np.transpose(D_link)

           D_link=np.ceil(2*D_link)

 

           return Cap_node,  Cap_link, W_node, W_link,  D_link

 

    #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

class DQNAgent:

    def __init__(self, state_size, action_size):

        self.state_size = state_size

        self.action_size = action_size

        self.memory = deque(maxlen=2000)

        self.gamma = 0.95    # discount rate

        self.epsilon = 1.0   # exploration rate

        self.epsilon_min = 0.01

        self.epsilon_decay = 0.995

        self.learning_rate = 0.001

        self.model = self._build_model()

 

    def _build_model(self):

        # Neural Net for Deep-Q learning Model

        model = Sequential()

        model.add(Dense(24, input_dim=self.state_size, activation='relu'))

        model.add(Dense(24, activation='relu'))

        model.add(Dense(self.action_size, activation='linear'))

        model.compile(loss='mse',

                      optimizer=Adam(lr=self.learning_rate))

        return model

 

    def memorize(self, state, action, reward, next_state, done):

        self.memory.append((state, action, reward, next_state, done))

 

 

 

    def act(self, state):

        if np.random.rand() <= self.epsilon:

            return random.randrange(self.action_size)

 

        act_values = self.model.predict(state)

        return np.argmax(act_values[0])  # returns action

 

 

 

    def replay(self, batch_size):

        minibatch = random.sample(self.memory, batch_size)

        states, targets_f = [], []

        for state, action, reward, next_state, done in minibatch:

            target = reward

            if not done  or reward<0 :

                target = (reward + self.gamma *

                          np.amax(self.model.predict(next_state)[0]))

            target_f = self.model.predict(state)

            target_f[0][action] = target

            # Filtering out states and targets for training

            states.append(state[0])

            targets_f.append(target_f[0])

        history = self.model.fit(np.array(states), np.array(targets_f), epochs=1, verbose=0)

        # Keeping track of loss

        loss = history.history['loss'][0]

        if self.epsilon > self.epsilon_min:

            self.epsilon *= self.epsilon_decay

        return loss

 

    def load(self, name):

        self.model.load_weights(name)

 

    def save(self, name):

        self.model.save_weights(name)

 

    #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

 

s=timer()

U=number_node

action_size=number_node#*number_VM

state_size=U+U*U+6

agent = DQNAgent(state_size, action_size)

 

 

 

K=service.service_specify(number_node, number_service)

g, G=Graph.G_generator(number_node)

Cap_node,  Cap_link, W_node, W_link,  D_link=Graph.cap_weight_delay(g, number_node)

 

acc_cpu=np.ones([Ts,number_service,number_function,number_user])

acc_delay=np.ones([Ts,number_service,number_user])

acc_link=np.ones([Ts,number_service,number_user])

acc=np.zeros([Ts,number_service,number_user])

 

 

exp_time=np.zeros([Ts,number_service,number_user])

mem=np.zeros([Ts,number_service,number_user])

 

 

cost_link=np.zeros([Ts,number_service,number_user])

cost_link_f = np.zeros([Ts,number_service,number_function,number_user])

cost_cpu=np.zeros([Ts,number_service,number_function,number_user])

cost_total=np.zeros([Ts,number_service,number_user])

 

reward_u=np.zeros([Ts,number_service,number_user])

alpha=.1

 

Cap_link0=Cap_link

Cap_node0=Cap_node

 

 

cost_Episode = []

acc_Episode = []

r = []

 

r_time = []

LOSS = np.zeros([Ts,number_service,number_user])

loss_episode = []

Mount=10

# for m in range(Mount):

for e in range(Episode):

        MEM_link=np.zeros([Ts,number_service,number_user,number_node,number_node])

        MEM_cpu=np.zeros([Ts,number_service,number_function,number_user,number_node])

        Cap_link=Cap_link0

        Cap_node=Cap_node0

        cost_total=np.zeros([Ts,number_service,number_user])

        for t in range(Ts):

                for i_K in range(number_service):

                    number_function = K[i_K,0]

                    ing = K[i_K,1]

                    eg = K[i_K,2]

                    tolorable_time = K[i_K,3]

                    cap_needed=K[i_K,4]

                    number_user_rand=np.random.randint(1,number_user) #   Unifrom  random request

                    for u in range(number_user_rand):

                        Cap_link_u=Cap_link

                        Cap_node_u=Cap_node

                        Flag_done=0

                        state=np.zeros([1,state_size])

                        next_state=np.zeros([1,state_size])

 

                        reward_u[t,i_K,u]=0

 

                        temp_node=np.zeros([1,number_node])

 

                        number_function=np.int64(number_function)

                        U=number_node

 

                        curent_node=ing

                        temp_delay = 0

                        j=0

                        for i_f in range(number_function):

                             reward = 0

                             done = 0

                             j+=1

                             done = bool(done)

                             temp_f=np.zeros([1,number_node])

 

                             state[0,0:U]=L*(Cap_node0-Cap_node)/(Cap_node0)

                             s_link=L*(Cap_link0-Cap_link)/(Cap_link0+1)

                             s_link_resh=np.reshape(s_link,[1,number_node*number_node])

                             state[0,U+1:U*U+(U+1)]=s_link_resh

                             state[0,-5]=ing          #  Ingress

                             state[0,-4]=eg           #  Egress

                             state[0,-3]=curent_node  # curent node

                             state[0,-2]=i_f          # function

                             state[0,-1]=1

                             state=np.round(state)

                             temp_link = np.zeros([number_node,number_node])

                             if ing == eg:            # MEC Condition Service

                                action = ing

                                

                             if j>100:

                                break

 

                             action = agent.act(state)  # the last node for function placment   Egress node

                             if  i_f == number_node-1:

                                 action = eg

                             curent_node=int(curent_node)

                             neiegh=np.where(g[curent_node,:]==1)

                             neiegh=neiegh[0]

                             check_neigh=0

                             action = int(action)

                             act_neigh=np.where(action==neiegh)

                             act_neigh=act_neigh[0]

                             if len(act_neigh)>0:

                                 check_neigh=1

                             if check_neigh==1:

                                 if Cap_node[0,action]>cap_needed:

                                     temp_f[0,action]=1

                                     temp_node_old=temp_node

                                     temp_node[0,action]+=1

   

                                     s=curent_node       # curent node

                                     d=action            #  Next node

                                     Link=Graph.link_indicator(G,Cap_link,W_link,s,d)

 

                                 xl,yl=np.where(Link==1)

                                 temp_c=(Cap_link[xl,yl]<cap_needed)

                                 temp_c=temp_c.astype(np.integer)

 

                                 if np.sum(temp_c) == 0:

                                     delay_prop=np.sum(D_link*Link)

                                     delay_prop = delay_prop/2   #   Because of Symetric  double links

                                     temp_delay=temp_delay+delay_prop

 

                                     if  temp_delay < tolorable_time:

 

                                         Cap_node_old = Cap_node

                                         Cap_node=Cap_node-cap_needed*temp_f

                                         MEM_cpu[t,i_K,i_f,u,:]=Cap_node_old-Cap_node

 

 

                                         # MEM_link_f[t,i_K,i_f,u,:,:]=Cap_link_old-Cap_link

 

 

                                         cost_cpu[t,i_K,i_f,u]  = np.sum(cap_needed*W_node*temp_f)

                                         cost_link_f[t,i_K,i_f,u] = np.sum(cap_needed*Link*W_link)

 

                                         Flag_done+=1

                                         done = 1

                                         done = bool(done)

                                         cost = cost_cpu[t,i_K,i_f,u] + cost_link_f[t,i_K,i_f,u]

                                         reward=100-alpha*cost

                                         curent_node = action

 

 

                             next_state[0,0:U]=L*(Cap_node0-Cap_node)/(Cap_node0)

                             s_link=L*(Cap_link0-Cap_link)/(Cap_link0+1)

                             s_link_resh=np.reshape(s_link,[1,number_node*number_node])

                             next_state[0,U+1:U*U+(U+1)]=s_link_resh

                             next_state[0,-5]=ing               #  Ingress

                             next_state[0,-4]=eg                #  Egress

                             next_state[0,-3]=curent_node       #  Current node

                             next_state[0,-2]=i_f               # function

                             next_state[0,-1]=1                 # delay

                             next_state=np.round(next_state)

 

                             state=state.reshape([1,state_size])

                             next_state=next_state.reshape([1,state_size])

                             temp_link += Link

                             reward_u[t,i_K,u] += reward

 

                             agent.memorize(state, action, reward, next_state, done)

 

                             if len(agent.memory) > batch_size:

                                agent.replay(batch_size)

                                loss = agent.replay(batch_size)

                                LOSS[t,i_K,u] += loss/number_function

                                # print (loss)

 

 

                        if Flag_done==number_service:

                            acc[t,i_K,u]=1

                            cost_total[t,i_K,u]=np.sum(cost_cpu[t,i_K,:,u])+np.sum(cost_link_f[t,i_K,:,u])

                            reward_u[t,i_K,u]

                            Cap_link_old  = Cap_link

                            Cap_link = Cap_link - temp_link*cap_needed

                            MEM_link[t,i_K,u,:,:] = Cap_link_old - Cap_link

                        else:

                            acc[t,i_K,u] = 0

                            cost_total[t,i_K,u]=0

                            reward_u[t,i_K,u] = 0

 

                    # print("Service_User : {},{}" .format(i_K,u))

                    # print("Episode ,Time_Slot, Service : {},{},{}".format(e,t,i_K))

 

 

                # Release resoureces after departure users

                re_CPU=np.zeros([1,number_node])

                re_link=np.zeros([number_node,number_node])

                for i_K in range(number_service):      #  

                    exp_time[t,i_K,:]=np.random.exponential(mu-1,number_user)   # Service life time  exponential    Duration 

                    exp_time[t,i_K,:]=np.round(exp_time[t,i_K,:]+1)

                    mem[t,i_K,:]=exp_time[t,i_K,:]

                    temp_mem=np.reshape(mem[:,i_K,:],[Ts,number_user])

                    temp_t,temp_u=np.where(temp_mem==1)     #   departure users

 

                    mem[:,i_K,:]=mem[:,i_K,:]-1              # elapse one time slot

                    xm,ym=np.where(mem[:,i_K,:]<0)

                    mem[xm,i_K,ym]=0

                    temp_t_len=len(temp_t)

 

                    cpu_resh=np.zeros([1,number_node])

                    link_resh=np.zeros([number_node,number_node])

 

                    for tt in range(temp_t_len):

                        cpu_resh=sum(MEM_cpu[temp_t[tt],i_K,:,temp_u[tt]])

                        link_resh=(MEM_link[temp_t[tt],i_K,temp_u[tt],:,:])

 

                        re_CPU=re_CPU+cpu_resh

                        re_link=re_link+link_resh

 

                Cap_link=Cap_link+re_link

                Cap_node=Cap_node+re_CPU

 

                #print( "time_slot : {}" .format(t))

                #print (re_CPU)

 

                r_time = np.append(r_time,  np.sum(reward_u[t,:,:]))

 

 

 

 

        # cost_Episode = np.append(cost_Episode, np.sum(cost_total))

 

        # acc_Episode = np.append(acc_Episode, np.sum(acc)/((Ts)*number_service*number_user))

 

        # r = np.append(r, np.sum(reward_u))

 

        # loss_episode = np.append(loss_episode, np.sum(LOSS))

 

    #    print( "EPISODE : {}" .format(e))

 

# plt.plot(r)

# plt.plot(acc_Episode)

# plt.plot(cost_Episode)

# tim_FINISH=timer()

Instructions: 

$$$$$$$$$$$$$$$$$$$$$$$$$$$$$

 

To set the network and system paramter, enter the number of the users, services, functions, nodes,

 

and service life time, please check the follwoing parameters:

 

number_function=5

 

number_node=10

 

number_service=5

 

number_user=5   # average number of the arrival user of each time slot

 

Episode=2000    #  Episode for solving

 

Ts=1000         # duration of a period of time that we evaluate the user arrivial and depurture service

 

L=1000

 

mu=240   ###   average duration of service lifetime

 

batch_size = 8

 

number_VM=6

 

Moreover to change the coefficient of the cost of each action, please change the parameter $alpha$

 

in line 376.

Dataset Files

LOGIN TO ACCESS DATASET FILES

Documentation

AttachmentSize
File guide_text.txt694 bytes