Skip to content

Commit

Permalink
add insert scenario (#32)
Browse files Browse the repository at this point in the history
* add insert scenario

* fix github action
  • Loading branch information
HarrisChu authored Aug 5, 2021
1 parent 973e514 commit d8f27bb
Show file tree
Hide file tree
Showing 8 changed files with 86 additions and 8 deletions.
5 changes: 1 addition & 4 deletions .github/workflows/import.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,7 @@ jobs:
env:
CGO_ENABLED: 0
run: |
export GOPATH=$HOME/go
export GOBIN=$GOPATH/bin
export PATH=$PATH:$GOBIN
/bin/bash scripts/setup.sh
./scripts/setup.sh
- name: generate the data
run: |
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ prepare nebula tools.
* [xk6-nebula](https://github.com/HarrisChu/xk6-nebula)

```bash
/bin/bash scripts/setup.sh
./scripts/setup.sh
```

After compilation, it would put binaries in `scripts` folder.
Expand Down
1 change: 1 addition & 0 deletions nebula_bench/common/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ def __new__(cls, name, bases, attrs, *args, **kwargs):

class BaseScenario(metaclass=ScenarioMeta):
abstract = True
is_insert_scenario = False
nGQL: str
stage: dict
csv_path: str
Expand Down
1 change: 1 addition & 0 deletions nebula_bench/scenarios/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from nebula_bench.scenarios import go
from nebula_bench.scenarios import find_path
from nebula_bench.scenarios import insert
10 changes: 10 additions & 0 deletions nebula_bench/scenarios/insert.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# -*- encoding: utf-8 -*-
from nebula_bench.common.base import BaseScenario


class InsertPersonScenario(BaseScenario):
is_insert_scenario = True
nGQL = "INSERT VERTEX Person(firstName, lastName, gender, birthday, creationDate, locationIP, browserUsed) VALUES "
abstract = False
csv_path = "social_network/dynamic/person.csv"
csv_index = [1, 2, 3, 4, 5, 6, 7]
17 changes: 15 additions & 2 deletions nebula_bench/stress.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,14 @@ def get_all_stress_class(cls):


class K6Stress(Stress):
def _update_read_config(self, scenario, kwargs):
kwargs["param"] = ",".join(["d[" + str(x) + "]" for x in scenario.csv_index])
return kwargs

def _update_insert_config(self, scenario, kwargs):
kwargs["csv_index"] = ",".join([str(x) for x in scenario.csv_index])
return kwargs

def dump_config(self, scenario):
assert issubclass(scenario, BaseScenario)
name = scenario.name
Expand All @@ -118,14 +126,19 @@ def dump_config(self, scenario):
"output_path": "{}/output_{}.csv".format(self.output_folder, name),
"nGQL": scenario.nGQL,
}
if scenario.is_insert_scenario:
kwargs = self._update_insert_config(scenario, kwargs)
template_file = "k6_config_insert.js.j2"
else:
kwargs = self._update_read_config(scenario, kwargs)
template_file = "k6_config.js.j2"

kwargs["param"] = ",".join(["d[" + str(x) + "]" for x in scenario.csv_index])
logger.info(
"begin dump stress config, config file is {}".format(
"{}/{}.js".format(self.output_folder, name)
)
)
jinja_dump("k6_config.js.j2", "{}/{}.js".format(self.output_folder, name), kwargs)
jinja_dump(template_file, "{}/{}.js".format(self.output_folder, name), kwargs)

def run(self):
logger.info("run stress test in k6")
Expand Down
56 changes: 56 additions & 0 deletions templates/k6_config_insert.js.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import nebulaPool from 'k6/x/nebulagraph';
import { check } from 'k6';
import { Trend } from 'k6/metrics';
import { sleep } from 'k6';

var lantencyTrend = new Trend('latency');
var responseTrend = new Trend('responseTime');
// initial nebula connect pool
var pool = nebulaPool.initWithSize("{{ address }}", 2000, 4000);

// set csv strategy, 1 means each vu has a separate csv reader.
pool.configCsvStrategy(1)

// initial session for every vu
var session = pool.getSession("{{ user }}", "{{ password }}")
session.execute("USE {{ space }}")


export function setup() {
// config csv file
pool.configCSV("{{ csv_path }}", "|", false)
// config output file, save every query information
pool.configOutput("{{ output_path }}")
sleep(1)
}

export default function (data) {
let ngql = '{{ nGQL }}'
let batches = []
let batchSize = 100
// batch size 100
for (let i = 0; i < batchSize; i++) {
let d = session.getData();
let values = []
let arr = [{{ csv_index }}]
arr.forEach(function(e){
let value = '"' + d[e] + '"'
values.push(value)
})

let batch = d[0] + ":(" + values.join(",") + ")"
batches.push(batch)
}
ngql = ngql + batches.join(',')
let response = session.execute(ngql)
check(response, {
"IsSucceed": (r) => r.isSucceed() === true
});
// add trend
lantencyTrend.add(response.getLatency());
responseTrend.add(response.getResponseTime());
};

export function teardown() {
pool.close()
}
2 changes: 1 addition & 1 deletion third/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ docker-compose 中,配置了默认数据库为 `k6`。
*`.env` 中,配置 influxdb 的地址,如:`http://192.168.8.60:8086/k6`
* 登录 grafana,添加 influxdb 数据源。
* 添加 dashboard,json 文件见 [k6_influxdb.json](./promethues/k6_influxdb.json)
* 执行压力测试,查看 grafana 的图
* 执行压力测试,查看 grafana 的面板

## 效果

Expand Down

0 comments on commit d8f27bb

Please sign in to comment.