Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code Formatting #52

Merged
merged 3 commits into from
Nov 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
535 changes: 364 additions & 171 deletions benchmarks.py

Large diffs are not rendered by default.

24 changes: 14 additions & 10 deletions example.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,22 +9,26 @@

# Select optimizers
# "SSA","PSO","GA","BAT","FFA","GWO","WOA","MVO","MFO","CS","HHO","SCA","JAYA","DE"
optimizer=["SSA","PSO","GWO"]
optimizer = ["SSA", "PSO", "GWO"]

# Select benchmark function"
# "F1","F2","F3","F4","F5","F6","F7","F8","F9","F10","F11","F12","F13","F14","F15","F16","F17","F18","F19"
# "Ca1","Ca2","Gt1","Mes","Mef","Sag","Tan","Ros"
objectivefunc=["F3","F4"]
# "Ca1","Ca2","Gt1","Mes","Mef","Sag","Tan","Ros"
objectivefunc = ["F3", "F4"]

# Select number of repetitions for each experiment.
# Select number of repetitions for each experiment.
# To obtain meaningful statistical results, usually 30 independent runs are executed for each algorithm.
NumOfRuns=3
NumOfRuns = 3

# Select general parameters for all optimizers (population size, number of iterations) ....
params = {'PopulationSize' : 30, 'Iterations' : 50}

#Choose whether to Export the results in different formats
export_flags = {'Export_avg':True, 'Export_details':True,
'Export_convergence':True, 'Export_boxplot':True}
params = {"PopulationSize": 30, "Iterations": 50}

# Choose whether to Export the results in different formats
export_flags = {
"Export_avg": True,
"Export_details": True,
"Export_convergence": True,
"Export_boxplot": True,
}

run(optimizer, objectivefunc, NumOfRuns, params, export_flags)
204 changes: 109 additions & 95 deletions optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,44 +28,45 @@
import plot_convergence as conv_plot
import plot_boxplot as box_plot

warnings.simplefilter(action='ignore')

def selector(algo,func_details,popSize,Iter):
function_name=func_details[0]
lb=func_details[1]
ub=func_details[2]
dim=func_details[3]

if(algo=="SSA"):
x=ssa.SSA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="PSO"):
x=pso.PSO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="GA"):
x=ga.GA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="BAT"):
x=bat.BAT(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="FFA"):
x=ffa.FFA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="GWO"):
x=gwo.GWO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="WOA"):
x=woa.WOA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="MVO"):
x=mvo.MVO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="MFO"):
x=mfo.MFO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="CS"):
x=cs.CS(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="HHO"):
x=hho.HHO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="SCA"):
x=sca.SCA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="JAYA"):
x=jaya.JAYA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="DE"):
x=de.DE(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
warnings.simplefilter(action="ignore")


def selector(algo, func_details, popSize, Iter):
function_name = func_details[0]
lb = func_details[1]
ub = func_details[2]
dim = func_details[3]

if algo == "SSA":
x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "PSO":
x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "GA":
x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "BAT":
x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "FFA":
x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "GWO":
x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "WOA":
x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "MVO":
x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "MFO":
x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "CS":
x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "HHO":
x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "SCA":
x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "JAYA":
x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "DE":
x = de.DE(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
else:
return null;
return null
return x


Expand All @@ -75,100 +76,113 @@ def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):
It serves as the main interface of the framework for running the experiments.

Parameters
----------
----------
optimizer : list
The list of optimizers names
objectivefunc : list
The list of benchmark functions
NumOfRuns : int
The number of independent runs
The number of independent runs
params : set
The set of parameters which are:
The set of parameters which are:
1. Size of population (PopulationSize)
2. The number of iterations (Iterations)
export_flags : set
The set of Boolean flags which are:
The set of Boolean flags which are:
1. Export (Exporting the results in a file)
2. Export_details (Exporting the detailed results in files)
3. Export_convergence (Exporting the covergence plots)
4. Export_boxplot (Exporting the box plots)

Returns
-----------
N/A
"""

# Select general parameters for all optimizers (population size, number of iterations) ....
PopulationSize = params['PopulationSize']
Iterations= params['Iterations']
PopulationSize = params["PopulationSize"]
Iterations = params["Iterations"]

#Export results ?
Export=export_flags['Export_avg']
Export_details=export_flags['Export_details']
Export_convergence = export_flags['Export_convergence']
Export_boxplot = export_flags['Export_boxplot']
# Export results ?
Export = export_flags["Export_avg"]
Export_details = export_flags["Export_details"]
Export_convergence = export_flags["Export_convergence"]
Export_boxplot = export_flags["Export_boxplot"]

Flag=False
Flag_details=False
Flag = False
Flag_details = False

# CSV Header for for the cinvergence
CnvgHeader=[]
# CSV Header for for the cinvergence
CnvgHeader = []

results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + '/'
results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/"
Path(results_directory).mkdir(parents=True, exist_ok=True)

for l in range(0,Iterations):
CnvgHeader.append("Iter"+str(l+1))
for l in range(0, Iterations):
CnvgHeader.append("Iter" + str(l + 1))


for i in range (0, len(optimizer)):
for j in range (0, len(objectivefunc)):
convergence = [0]*NumOfRuns
executionTime = [0]*NumOfRuns
for k in range (0,NumOfRuns):
func_details=benchmarks.getFunctionDetails(objectivefunc[j])
x=selector(optimizer[i],func_details,PopulationSize,Iterations)
for i in range(0, len(optimizer)):
for j in range(0, len(objectivefunc)):
convergence = [0] * NumOfRuns
executionTime = [0] * NumOfRuns
for k in range(0, NumOfRuns):
func_details = benchmarks.getFunctionDetails(objectivefunc[j])
x = selector(optimizer[i], func_details, PopulationSize, Iterations)
convergence[k] = x.convergence
optimizerName = x.optimizer
objfname = x.objfname
if(Export_details==True):
ExportToFile=results_directory + "experiment_details.csv"
with open(ExportToFile, 'a',newline='\n') as out:
writer = csv.writer(out,delimiter=',')
if (Flag_details==False): # just one time to write the header of the CSV file
header= numpy.concatenate([["Optimizer","objfname","ExecutionTime"],CnvgHeader])
objfname = x.objfname
if Export_details == True:
ExportToFile = results_directory + "experiment_details.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag_details == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag_details=True # at least one experiment
Flag_details = True # at least one experiment
executionTime[k] = x.executionTime
a=numpy.concatenate([[x.optimizer,x.objfname,x.executionTime],x.convergence])
a = numpy.concatenate(
[[x.optimizer, x.objfname, x.executionTime], x.convergence]
)
writer.writerow(a)
out.close()

if(Export==True):
ExportToFile=results_directory + "experiment.csv"

with open(ExportToFile, 'a',newline='\n') as out:
writer = csv.writer(out,delimiter=',')
if (Flag==False): # just one time to write the header of the CSV file
header= numpy.concatenate([["Optimizer","objfname","ExecutionTime"],CnvgHeader])
writer.writerow(header)
Flag=True

avgExecutionTime = float("%0.2f"%(sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2).tolist()
a=numpy.concatenate([[optimizerName,objfname,avgExecutionTime],avgConvergence])
if Export == True:
ExportToFile = results_directory + "experiment.csv"

with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag = True

avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(
numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2
).tolist()
a = numpy.concatenate(
[[optimizerName, objfname, avgExecutionTime], avgConvergence]
)
writer.writerow(a)
out.close()

if Export_convergence == True:
conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)



if Export_boxplot == True:
box_plot.run(results_directory, optimizer, objectivefunc, Iterations)

if (Flag==False): # Faild to run at least one experiment
print("No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions")

print("Execution completed")

if Flag == False: # Faild to run at least one experiment
print(
"No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions"
)

print("Execution completed")
Loading