-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsend_batch_requests.py
76 lines (54 loc) · 2.8 KB
/
send_batch_requests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import math
import sys
from threading import Thread
import os
import random
import pandas as pd
import requests
def worker(image_path, estimated_excution_time, sledge_url, rps, ts, i):
request = 'hey -disable-compression -disable-keepalive -disable-redirects -H "expected_cost: {}" -c 4 -o csv -t 0 -q {} -z {}s -m POST -D "{}" "{}" >> result{}.csv'.format(estimated_excution_time,rps,ts,image_path,sledge_url, i)
os.system(request)
# Credit: https://cmdlinetips.com/2022/07/randomly-sample-rows-from-a-big-csv-file/
def sample_n_from_csv(filename:str, n:int=100, total_rows:int=None) -> pd.DataFrame:
if total_rows==None:
with open(filename,"r") as fh:
total_rows = sum(1 for row in fh)
if(n>total_rows):
print("Error: n > total_rows", file=sys.stderr)
skip_rows = random.sample(range(1,total_rows+1), total_rows-n)
return pd.read_csv(filename, skiprows=skip_rows)
if __name__=="__main__":
requests_per_second = int(sys.argv[1])
total_seconds = int(sys.argv[2])
num_threads = int(sys.argv[3])
deadline = float(sys.argv[4])
sledge_url = sys.argv[5]
predictions_index = "estimated_time" + sledge_url.split("/")[-2][len("resize"):]
predictions = sample_n_from_csv(filename="predictions.csv", n=num_threads)
# url,filename,estimated_time_small,estimated_time_medium,estimated_time_large
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'images')
if not os.path.exists(path):
os.mkdir(path)
for i in range(0, len(predictions)):
image_url = predictions['url'][i]
filename = predictions['filename'][i]
with open(os.path.join(path,filename), 'wb') as handler:
handler.write(requests.get(image_url).content)
threads = []
for i in range(0, num_threads):
# image_path, estimated_excution_time, sledge_url, rps, i
threads.append(Thread(target=worker, args=(os.path.join(path,predictions['filename'][i]), float(predictions[predictions_index][i]) * pow(10, 6), sledge_url, requests_per_second, total_seconds, i)))
threads[i].start()
for t in threads:
t.join()
all_data = pd.DataFrame(columns=("filename", "status_code", "missed_deadline"))
for i in range(0, num_threads):
cur_csv = pd.read_csv("result{}.csv".format(i))
for j in range(0, len(cur_csv)):
missed_deadline = True if deadline - float(cur_csv['response-time'][i]) < 0 else False
all_data.loc[len(all_data.index)] = [predictions['filename'][i], cur_csv['status-code'][j], missed_deadline]
all_data.to_csv('results.csv', index=False)
for i in range(0, len(predictions)):
filename = predictions['filename'][i]
os.remove(os.path.join(path,filename))
os.remove("result{}.csv".format(i))