Ramping vus traffic tagging

I am looking for a way to be able to distinguish between ramp-up\ramp-down stage and actual target load stage. Looking at the Ramping VUs I do not see a way to tag specific stage.
I want to use ramp-up traffic so I can warm up the environment before the load test and do not want the metrics to be included in final summary report.

Example:
Run the test with 100 VU for 10 minutes with ramp-up time for 1 minute. I want to get metrics only from these 10 minutes of 100 VU and not latencies from ramp-up stage.

1 Like

Hello and welcome!

We will be making it easier to exclude undesired metrics as part of #1321 but for now, you might consider using Trends along with some code that keeps track of whether the test is ramping up or down.

Here’s a working example, using some code from SO to help with the date comparison:

import { sleep } from 'k6';
import http from 'k6/http';
import { Trend } from 'k6/metrics';

let rampingUp = new Trend('http_req_duration_ramping');
let steadyState = new Trend('http_req_duration_steady');

export let options = {
  stages: [
    { target: 1, duration: '15s' }, // ramp-up
    { target: 1, duration: '15s' }, // steady-state
    { target: 0, duration: '15s' } // ramp-down
  ]
};

// use setup to calculate at which point (date + time) the test will be fully ramped up
export function setup() {
  const rampupDt = dateAdd(new Date(), 'second', 15);
  const rampdownDt = dateAdd(rampupDt, 'second', 15);

  console.log('Test will be fully ramped up at ' + rampupDt.toString() + ' and ramping down at ' + rampdownDt.toString());

  return { rampupDt: rampupDt, rampdownDt: rampdownDt }; // setup() needs to return an object
}

export default function(data) {

  const res = http.get('https://test-api.k6.io/public/crocodiles/');

  isRamping(data) ? rampingUp.add(res.timings.duration) : steadyState.add(res.timings.duration);

  sleep(3);
}

function isRamping(data) {
  const now = new Date();
  return now < new Date(data.rampupDt) || now > new Date(data.rampdownDt);
}

/**
 * Adds time to a date. Modelled after MySQL DATE_ADD function.
 * Example: dateAdd(new Date(), 'minute', 30)  //returns 30 minutes from now.
 * https://stackoverflow.com/a/1214753/18511
 * 
 * @param date  Date to start with
 * @param interval  One of: year, quarter, month, week, day, hour, minute, second
 * @param units  Number of units of the given interval to add.
 */
 function dateAdd(date, interval, units) {
  if(!(date instanceof Date))
    return undefined;
  var ret = new Date(date); //don't change original date
  var checkRollover = function() { if(ret.getDate() != date.getDate()) ret.setDate(0);};
  switch(String(interval).toLowerCase()) {
    case 'year'   :  ret.setFullYear(ret.getFullYear() + units); checkRollover();  break;
    case 'quarter':  ret.setMonth(ret.getMonth() + 3*units); checkRollover();  break;
    case 'month'  :  ret.setMonth(ret.getMonth() + units); checkRollover();  break;
    case 'week'   :  ret.setDate(ret.getDate() + 7*units);  break;
    case 'day'    :  ret.setDate(ret.getDate() + units);  break;
    case 'hour'   :  ret.setTime(ret.getTime() + units*3600000);  break;
    case 'minute' :  ret.setTime(ret.getTime() + units*60000);  break;
    case 'second' :  ret.setTime(ret.getTime() + units*1000);  break;
    default       :  ret = undefined;  break;
  }
  return ret;
}

With the above, you just need to make sure you set rampupDt and rampdownDt according to your stages.

You should end up with output like this:

     data_received..................: 18 kB  378 B/s
     data_sent......................: 1.9 kB 40 B/s
     http_req_blocked...............: avg=16.99ms   min=0s      med=0s       max=254.87ms p(90)=0s        p(95)=76.46ms
     http_req_connecting............: avg=3.83ms    min=0s      med=0s       max=57.57ms  p(90)=0s        p(95)=17.27ms
     http_req_duration..............: avg=101.63ms  min=66.17ms med=86.22ms  max=192.96ms p(90)=173.45ms  p(95)=191.68ms
       { expected_response:true }...: avg=101.63ms  min=66.17ms med=86.22ms  max=192.96ms p(90)=173.45ms  p(95)=191.68ms
     http_req_failed................: 0.00%  ✓ 0        ✗ 15
     http_req_receiving.............: avg=652.62µs  min=0s      med=234.2µs  max=3.63ms   p(90)=1.88ms    p(95)=2.84ms
     http_req_sending...............: avg=0s        min=0s      med=0s       max=0s       p(90)=0s        p(95)=0s
     http_req_tls_handshaking.......: avg=9.51ms    min=0s      med=0s       max=142.75ms p(90)=0s        p(95)=42.82ms
     http_req_waiting...............: avg=100.98ms  min=66.01ms med=85.93ms  max=192.83ms p(90)=172.81ms  p(95)=190.98ms
     http_reqs......................: 15     0.319758/s
     iteration_duration.............: avg=2.93s     min=998.5µs med=3.09s    max=3.34s    p(90)=3.2s      p(95)=3.24s
     iterations.....................: 15     0.319758/s
     response_time_ramping..........: avg=110.97054 min=66.1739 med=91.37655 max=192.9672 p(90)=191.31993 p(95)=192.143565
     response_time_steady...........: avg=82.96818  min=66.554  med=86.2253  max=102.3571 p(90)=98.08138  p(95)=100.21924
     vus............................: 1      min=1      max=1
     vus_max........................: 1      min=1      max=1

If you need to break this down further, i.e. having request-level response times instead of a single aggregated metric for all HTTP requests, you would probably need to make use of tags.

2 Likes