Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Several features I've assembled from other people #46

Open
wants to merge 17 commits into
base: master
Choose a base branch
from
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
pep8 update
skada authored and Israel Fruchter committed Sep 20, 2014

Verified

This commit was created on github.com and signed with GitHub’s verified signature. The key has expired.
commit 78bb1603c256bef33093267555c894de25727326
4 changes: 2 additions & 2 deletions multimechanize/core.py
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@
from multimechanize.script_loader import ScriptLoader
import os.path


def init(projects_dir, project_name):
"""
Sanity check that all test scripts can be loaded.
@@ -28,6 +29,7 @@ def init(projects_dir, project_name):
# -- NORMAL-CASE: Ensure that all scripts can be loaded (at program start).
ScriptLoader.load_all(scripts_path, validate=True)


def load_script(script_file):
"""
Load a test scripts as Python module.
@@ -73,7 +75,6 @@ def run(self):
agent_thread.join()



class Agent(threading.Thread):
def __init__(self, queue, process_num, thread_num, start_time, run_time,
user_group_name, script_module, script_file, user_group_config):
@@ -95,7 +96,6 @@ def __init__(self, queue, process_num, thread_num, start_time, run_time,
else:
self.default_timer = time.time


def run(self):
elapsed = 0
trans = self.script_module.Transaction(self.user_group_config)
57 changes: 26 additions & 31 deletions multimechanize/graph.py
Original file line number Diff line number Diff line change
@@ -5,95 +5,90 @@
#
# This file is part of Multi-Mechanize | Performance Test Framework
#


import sys

try:
import matplotlib

matplotlib.use('Agg') # use a non-GUI backend
from pylab import *
except ImportError:
print 'ERROR: can not import Matplotlib. install Matplotlib to generate graphs'



# response time graph for raw data
def resp_graph_raw(nested_resp_list, image_name, dir='./'):
fig = figure(figsize=(8, 3.3)) # image dimensions
ax = fig.add_subplot(111)
ax.set_xlabel('Elapsed Time In Test (secs)', size='x-small')
ax.set_ylabel('Response Time (secs)' , size='x-small')
ax.set_ylabel('Response Time (secs)', size='x-small')
ax.grid(True, color='#666666')
xticks(size='x-small')
yticks(size='x-small')
x_seq = [item[0] for item in nested_resp_list]
y_seq = [item[1] for item in nested_resp_list]
ax.plot(x_seq, y_seq,
color='blue', linestyle='-', linewidth=0.0, marker='o',
markeredgecolor='blue', markerfacecolor='blue', markersize=2.0)
ax.plot([0.0,], [0.0,], linewidth=0.0, markersize=0.0)
color='blue', linestyle='-', linewidth=0.0, marker='o',
markeredgecolor='blue', markerfacecolor='blue', markersize=2.0)
ax.plot([0.0, ], [0.0, ], linewidth=0.0, markersize=0.0)
savefig(dir + image_name)



# response time graph for bucketed data
def resp_graph(avg_resptime_points_dict, percentile_80_resptime_points_dict, percentile_90_resptime_points_dict, image_name, dir='./'):
def resp_graph(avg_resptime_points_dict, percentile_80_resptime_points_dict, percentile_90_resptime_points_dict,
image_name, dir='./'):
fig = figure(figsize=(8, 3.3)) # image dimensions
ax = fig.add_subplot(111)
ax.set_xlabel('Elapsed Time In Test (secs)', size='x-small')
ax.set_ylabel('Response Time (secs)' , size='x-small')
ax.set_ylabel('Response Time (secs)', size='x-small')
ax.grid(True, color='#666666')
xticks(size='x-small')
yticks(size='x-small')

x_seq = sorted(avg_resptime_points_dict.keys())
y_seq = [avg_resptime_points_dict[x] for x in x_seq]
ax.plot(x_seq, y_seq,
color='green', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='green', markerfacecolor='yellow', markersize=2.0)
color='green', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='green', markerfacecolor='yellow', markersize=2.0)

x_seq = sorted(percentile_80_resptime_points_dict.keys())
y_seq = [percentile_80_resptime_points_dict[x] for x in x_seq]
ax.plot(x_seq, y_seq,
color='orange', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='orange', markerfacecolor='yellow', markersize=2.0)
color='orange', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='orange', markerfacecolor='yellow', markersize=2.0)

x_seq = sorted(percentile_90_resptime_points_dict.keys())
y_seq = [percentile_90_resptime_points_dict[x] for x in x_seq]
ax.plot(x_seq, y_seq,
color='purple', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='purple', markerfacecolor='yellow', markersize=2.0)
color='purple', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='purple', markerfacecolor='yellow', markersize=2.0)

ax.plot([0.0,], [0.0,], linewidth=0.0, markersize=0.0)
ax.plot([0.0, ], [0.0, ], linewidth=0.0, markersize=0.0)

legend_lines = reversed(ax.get_lines()[:3])
ax.legend(
legend_lines,
('90pct', '80pct', 'Avg'),
loc='best',
handlelength=1,
borderpad=1,
prop=matplotlib.font_manager.FontProperties(size='xx-small')
)
legend_lines,
('90pct', '80pct', 'Avg'),
loc='best',
handlelength=1,
borderpad=1,
prop=matplotlib.font_manager.FontProperties(size='xx-small')
)

savefig(dir + image_name)



# throughput graph
def tp_graph(throughputs_dict, image_name, dir='./'):
fig = figure(figsize=(8, 3.3)) # image dimensions
ax = fig.add_subplot(111)
ax.set_xlabel('Elapsed Time In Test (secs)', size='x-small')
ax.set_ylabel('Transactions Per Second (count)' , size='x-small')
ax.set_ylabel('Transactions Per Second (count)', size='x-small')
ax.grid(True, color='#666666')
xticks(size='x-small')
yticks(size='x-small')
x_seq = sorted(throughputs_dict.keys())
y_seq = [throughputs_dict[x] for x in x_seq]
ax.plot(x_seq, y_seq,
color='red', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='red', markerfacecolor='yellow', markersize=2.0)
ax.plot([0.0,], [0.0,], linewidth=0.0, markersize=0.0)
color='red', linestyle='-', linewidth=0.75, marker='o',
markeredgecolor='red', markerfacecolor='yellow', markersize=2.0)
ax.plot([0.0, ], [0.0, ], linewidth=0.0, markersize=0.0)
savefig(dir + image_name)
1 change: 0 additions & 1 deletion multimechanize/progressbar.py
Original file line number Diff line number Diff line change
@@ -7,7 +7,6 @@
#



class ProgressBar(object):
def __init__(self, duration):
self.duration = duration
3 changes: 0 additions & 3 deletions multimechanize/reportwriter.py
Original file line number Diff line number Diff line change
@@ -13,12 +13,10 @@ def __init__(self, results_dir):
self.fn = results_dir + 'results.html'
self.write_head_html()


def write_line(self, line):
with open(self.fn, 'a') as f:
f.write('%s\n' % line)


def write_head_html(self):
with open(self.fn, 'w') as f:
f.write("""\
@@ -92,7 +90,6 @@ def write_head_html(self):
<body>
""")


def write_closing_html(self):
with open(self.fn, 'a') as f:
f.write("""\
1 change: 0 additions & 1 deletion multimechanize/reportwriterxml.py
Original file line number Diff line number Diff line change
@@ -10,7 +10,6 @@
from xml.etree import ElementTree as ET



def write_jmeter_output(mm_data, output_path):
"""
Take the list of ResponseStats objects and write a JMeter 2.1
22 changes: 0 additions & 22 deletions multimechanize/results.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,6 @@
import reportwriterxml
import re


def output_results(results_dir, results_file, run_time, rampup, ts_interval, user_group_configs=None, xml_reports=False):
results = Results(results_dir + results_file, run_time)

@@ -79,7 +78,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
))
report.write_line('</table>')


# all transactions - interval details
avg_resptime_points = {} # {intervalnumber: avg_resptime}
percentile_80_resptime_points = {} # {intervalnumber: 80pct_resptime}
@@ -113,7 +111,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
report.write_line('</table>')
graph.resp_graph(avg_resptime_points, percentile_80_resptime_points, percentile_90_resptime_points, 'All_Transactions_response_times_intervals.png', results_dir)


report.write_line('<h3>Graphs</h3>')
report.write_line('<h4>Response Time: %s sec time-series</h4>' % ts_interval)
report.write_line('<img src="All_Transactions_response_times_intervals.png"></img>')
@@ -122,8 +119,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
report.write_line('<h4>Throughput: 5 sec time-series</h4>')
report.write_line('<img src="All_Transactions_throughput.png"></img>')



# all transactions - throughput
throughput_points = {} # {intervalnumber: numberofrequests}
interval_secs = ts_interval
@@ -132,8 +127,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
throughput_points[int((i + 1) * interval_secs)] = (len(bucket) / interval_secs)
graph.tp_graph(throughput_points, 'All_Transactions_throughput.png', results_dir)



# custom timers
for timer_name in sorted(results.uniq_timer_names):
custom_timer_vals = []
@@ -173,7 +166,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
))
report.write_line('</table>')


# custom timers - interval details
avg_resptime_points = {} # {intervalnumber: avg_resptime}
percentile_80_resptime_points = {} # {intervalnumber: 80pct_resptime}
@@ -207,7 +199,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
report.write_line('</table>')
graph.resp_graph(avg_resptime_points, percentile_80_resptime_points, percentile_90_resptime_points, timer_name + '_response_times_intervals.png', results_dir)


report.write_line('<h3>Graphs</h3>')
report.write_line('<h4>Response Time: %s sec time-series</h4>' % ts_interval)
report.write_line('<img src="%s_response_times_intervals.png"></img>' % timer_name)
@@ -216,8 +207,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
report.write_line('<h4>Throughput: %s sec time-series</h4>' % ts_interval)
report.write_line('<img src="%s_throughput.png"></img>' % timer_name)



## user group times
#for user_group_name in sorted(results.uniq_user_group_names):
# ug_timer_vals = []
@@ -237,8 +226,6 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
report.write_closing_html()




class Results(object):
def __init__(self, results_file_name, run_time):
self.results_file_name = results_file_name
@@ -255,8 +242,6 @@ def __init__(self, results_file_name, run_time):
self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))
self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish))



def __parse_file(self):
f = open(self.results_file_name, 'rb')
resp_stats_list = []
@@ -302,7 +287,6 @@ def __parse_file(self):
return resp_stats_list



class ResponseStats(object):
def __init__(self, request_num, elapsed_time, epoch_secs, user_group_name, trans_time, error, custom_timers):
self.request_num = request_num
@@ -314,7 +298,6 @@ def __init__(self, request_num, elapsed_time, epoch_secs, user_group_name, trans
self.custom_timers = custom_timers



def split_series(points, interval):
offset = points[0][0]
maxval = int((points[-1][0] - offset) // interval)
@@ -325,13 +308,11 @@ def split_series(points, interval):
return series



def average(seq):
avg = (float(sum(seq)) / len(seq))
return avg



def standard_dev(seq):
avg = average(seq)
sdsq = sum([(i - avg) ** 2 for i in seq])
@@ -342,14 +323,11 @@ def standard_dev(seq):
return stdev



def percentile(seq, percentile):
i = int(len(seq) * (percentile / 100.0))
seq.sort()
return seq[i]




if __name__ == '__main__':
output_results('./', 'results.csv', 60, 30, 10)
Loading