public function performanceChartsAction() { $exec_rows = null; $id_exec_rows = null; $dbUtil = $this->container->getDBUtils(); $this->buildFilters(array('perf_details' => array('default' => 1))); $charts = array(); $clusters = array(); try { //TODO fix, initialize variables $dbUtil->get_exec_details('1', 'id_exec', $exec_rows, $id_exec_rows); //check the URL $execs = Utils::get_GET_intArray('execs'); if (empty($execs)) { $whereClause = $this->filters->getWhereClause(); $query = "SELECT e.id_exec FROM aloja2.execs e JOIN aloja2.clusters c USING (id_cluster)\n LEFT JOIN aloja_ml.predictions p USING (id_exec)\n WHERE 1 " . DBUtils::getFilterExecs() . "{$whereClause} "; $query .= isset($_GET['random']) ? '' : 'LIMIT 1'; $idExecs = $dbUtil->get_rows($query); if (isset($_GET['random'])) { $execs = array($idExecs[rand(0, sizeof($idExecs) - 1)]['id_exec']); } else { $execs[] = $idExecs[0]['id_exec']; } } if (Utils::get_GET_string('random') && !$execs) { $keys = array_keys($exec_rows); $execs = array_unique(array($keys[array_rand($keys)], $keys[array_rand($keys)])); } if (Utils::get_GET_string('hosts')) { $hosts = Utils::get_GET_string('hosts'); } else { $hosts = 'Slaves'; } if (Utils::get_GET_string('metric')) { $metric = Utils::get_GET_string('metric'); } else { $metric = 'CPU'; } if (Utils::get_GET_string('aggr')) { $aggr = Utils::get_GET_string('aggr'); } else { $aggr = 'AVG'; } if (Utils::get_GET_string('detail')) { $detail = Utils::get_GET_int('detail'); } else { $detail = 10; } if ($aggr == 'AVG') { $aggr_text = "Average"; } elseif ($aggr == 'SUM') { $aggr_text = "SUM"; } else { throw new \Exception("Aggregation type '{$aggr}' is not valid."); } if ($hosts == 'Slaves') { $selectedHosts = $dbUtil->get_rows("SELECT h.host_name from execs e inner join hosts h where e.id_exec IN (" . implode(", ", $execs) . ") AND h.id_cluster = e.id_cluster AND h.role='slave'"); $selected_hosts = array(); foreach ($selectedHosts as $host) { array_push($selected_hosts, $host['host_name']); } } elseif ($hosts == 'Master') { $selectedHosts = $dbUtil->get_rows("SELECT h.host_name from execs e inner join hosts h where e.id_exec IN (" . implode(", ", $execs) . ") AND h.id_cluster = e.id_cluster AND h.role='master' AND h.host_name != ''"); $selected_hosts = array(); foreach ($selectedHosts as $host) { array_push($selected_hosts, $host['host_name']); } } else { $selected_hosts = array($hosts); } $charts = array(); $exec_details = array(); $chart_details = array(); $clusters = array(); foreach ($execs as $exec) { //do a security check $tmp = filter_var($exec, FILTER_SANITIZE_NUMBER_INT); if (!is_numeric($tmp) || !($tmp > 0)) { unset($execs[$exec]); continue; } $exec_title = $dbUtil->get_exec_details($exec, 'exec', $exec_rows, $id_exec_rows); $pos_name = strpos($exec_title, '/'); $exec_title = ' ' . strtoupper(substr($exec_title, $pos_name + 1)) . ' ' . (strpos($exec_title, '_az') > 0 ? 'AZURE' : 'LOCAL') . " ID_{$exec} " . substr($exec_title, 21, strlen($exec_title) - $pos_name - (strpos($exec_title, '_az') > 0 ? 21 : 18)); $exec_details[$exec]['time'] = $dbUtil->get_exec_details($exec, 'exe_time', $exec_rows, $id_exec_rows); $exec_details[$exec]['start_time'] = $dbUtil->get_exec_details($exec, 'start_time', $exec_rows, $id_exec_rows); $exec_details[$exec]['end_time'] = $dbUtil->get_exec_details($exec, 'end_time', $exec_rows, $id_exec_rows); $id_cluster = $dbUtil->get_exec_details($exec, 'id_cluster', $exec_rows, $id_exec_rows); if (!in_array($id_cluster, $clusters)) { $clusters[] = $id_cluster; } //$end_time = get_exec_details($exec, 'init_time'); $date_where = " AND date BETWEEN '{$exec_details[$exec]['start_time']}' and '{$exec_details[$exec]['end_time']}' "; $where = " WHERE id_exec = '{$exec}' AND host IN ('" . join("','", $selected_hosts) . "') {$date_where}"; $where_BWM = " WHERE id_exec = '{$exec}' AND host IN ('" . join("','", $selected_hosts) . "') "; $where_VMSTATS = " WHERE id_exec = '{$exec}' AND host IN ('" . join("','", $selected_hosts) . "') "; $where_sampling = "round(time/{$detail})"; $group_by = " GROUP BY {$where_sampling} ORDER by time"; $group_by_vmstats = " GROUP BY {$where_sampling} ORDER by time"; $where_sampling_BWM = "round(unix_timestamp/{$detail})"; $group_by_BWM = " GROUP BY {$where_sampling_BWM} ORDER by unix_timestamp"; $charts[$exec] = array('job_status' => array('metric' => "ALL", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time,\n maps map,shuffle,merge,reduce,waste FROM aloja_logs.JOB_status\n WHERE id_exec = '{$exec}' {$date_where} GROUP BY job_name, date ORDER by job_name, time;", 'fields' => array('map', 'shuffle', 'reduce', 'waste', 'merge'), 'title' => "Job execution history {$exec_title} ", 'group_title' => 'Job execution history (number of running Hadoop processes)', 'percentage' => false, 'stacked' => false, 'negative' => false), 'cpu' => array('metric' => "CPU", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`%user`) `%user`, {$aggr}(`%system`) `%system`, {$aggr}(`%steal`) `%steal`, {$aggr}(`%iowait`)\n `%iowait`, {$aggr}(`%nice`) `%nice` FROM aloja_logs.SAR_cpu {$where} {$group_by};", 'fields' => array('%user', '%system', '%steal', '%iowait', '%nice'), 'title' => "CPU Utilization ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'CPU Utilization ' . "({$aggr_text}, {$hosts})", 'percentage' => $aggr == 'SUM' ? '300' : 100, 'stacked' => true, 'negative' => false), 'load' => array('metric' => "CPU", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`ldavg-1`) `ldavg-1`, {$aggr}(`ldavg-5`) `ldavg-5`, {$aggr}(`ldavg-15`) `ldavg-15`\n FROM aloja_logs.SAR_load {$where} {$group_by};", 'fields' => array('ldavg-15', 'ldavg-5', 'ldavg-1'), 'title' => "CPU Load Average ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'CPU Load Average ' . "({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'load_queues' => array('metric' => "CPU", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`runq-sz`) `runq-sz`, {$aggr}(`blocked`) `blocked`\n FROM aloja_logs.SAR_load {$where} {$group_by};", 'fields' => array('runq-sz', 'blocked'), 'title' => "CPU Queues ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'CPU Queues ' . "({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'load_tasks' => array('metric' => "CPU", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`plist-sz`) `plist-sz` FROM aloja_logs.SAR_load {$where} {$group_by};", 'fields' => array('plist-sz'), 'title' => "Number of tasks for CPUs ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Number of tasks for CPUs ' . "({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'switches' => array('metric' => "CPU", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`proc/s`) `proc/s`, {$aggr}(`cswch/s`) `cswch/s` FROM aloja_logs.SAR_switches {$where} {$group_by};", 'fields' => array('proc/s', 'cswch/s'), 'title' => "CPU Context Switches ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'CPU Context Switches' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'interrupts' => array('metric' => "CPU", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`intr/s`) `intr/s` FROM aloja_logs.SAR_interrupts {$where} {$group_by};", 'fields' => array('intr/s'), 'title' => "CPU Interrupts ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'CPU Interrupts ' . "({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'memory_util' => array('metric' => "Memory", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(kbmemfree)*1024 kbmemfree, {$aggr}(kbmemused)*1024 kbmemused\n FROM aloja_logs.SAR_memory_util {$where} {$group_by};", 'fields' => array('kbmemfree', 'kbmemused'), 'title' => "Memory Utilization ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Memory Utilization' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => true, 'negative' => false), 'memory_util_det' => array('metric' => "Memory", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(kbbuffers)*1024 kbbuffers, {$aggr}(kbcommit)*1024 kbcommit, {$aggr}(kbcached)*1024 kbcached,\n {$aggr}(kbactive)*1024 kbactive, {$aggr}(kbinact)*1024 kbinact\n FROM aloja_logs.SAR_memory_util {$where} {$group_by};", 'fields' => array('kbcached', 'kbbuffers', 'kbinact', 'kbcommit', 'kbactive'), 'title' => "Memory Utilization Details ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Memory Utilization Details' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => true, 'negative' => false), 'memory' => array('metric' => "Memory", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`frmpg/s`) `frmpg/s`, {$aggr}(`bufpg/s`) `bufpg/s`, {$aggr}(`campg/s`) `campg/s`\n FROM aloja_logs.SAR_memory {$where} {$group_by};", 'fields' => array('frmpg/s', 'bufpg/s', 'campg/s'), 'title' => "Memory Stats ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Memory Stats' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'io_pagging_disk' => array('metric' => "Memory", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`pgpgin/s`)*1024 `pgpgin/s`, {$aggr}(`pgpgout/s`)*1024 `pgpgout/s`\n FROM aloja_logs.SAR_io_paging {$where} {$group_by};", 'fields' => array('pgpgin/s', 'pgpgout/s'), 'title' => "I/O Paging IN/OUT to disk ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'I/O Paging IN/OUT to disk' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'io_pagging' => array('metric' => "Memory", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`fault/s`) `fault/s`, {$aggr}(`majflt/s`) `majflt/s`, {$aggr}(`pgfree/s`) `pgfree/s`,\n {$aggr}(`pgscank/s`) `pgscank/s`, {$aggr}(`pgscand/s`) `pgscand/s`, {$aggr}(`pgsteal/s`) `pgsteal/s`\n FROM aloja_logs.SAR_io_paging {$where} {$group_by};", 'fields' => array('fault/s', 'majflt/s', 'pgfree/s', 'pgscank/s', 'pgscand/s', 'pgsteal/s'), 'title' => "I/O Paging ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'I/O Paging' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'io_pagging_vmeff' => array('metric' => "Memory", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`%vmeff`) `%vmeff` FROM aloja_logs.SAR_io_paging {$where} {$group_by};", 'fields' => array('%vmeff'), 'title' => "I/O Paging %vmeff ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'I/O Paging %vmeff' . " ({$aggr_text}, {$hosts})", 'percentage' => $aggr == 'SUM' ? '300' : 100, 'stacked' => false, 'negative' => false), 'io_transactions' => array('metric' => "Disk", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`tps`) `tp/s`, {$aggr}(`rtps`) `read tp/s`, {$aggr}(`wtps`) `write tp/s`\n FROM aloja_logs.SAR_io_rate {$where} {$group_by};", 'fields' => array('tp/s', 'read tp/s', 'write tp/s'), 'title' => "I/O Transactions/s ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'I/O Transactions/s' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'io_bytes' => array('metric' => "Disk", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`bread/s`)/(1024) `KB_read/s`, {$aggr}(`bwrtn/s`)/(1024) `KB_wrtn/s`\n FROM aloja_logs.SAR_io_rate {$where} {$group_by};", 'fields' => array('KB_read/s', 'KB_wrtn/s'), 'title' => "KB R/W ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'KB R/W' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'block_devices_util' => array('metric' => "Disk", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`%util_SUM`) `%util_SUM`, {$aggr}(`%util_MAX`) `%util_MAX`\n FROM (\n select\n id_exec, host, date,\n sum(`%util`) `%util_SUM`,\n max(`%util`) `%util_MAX`\n from aloja_logs.SAR_block_devices d WHERE id_exec = '{$exec}'\n GROUP BY date, host\n ) t {$where} {$group_by};", 'fields' => array('%util_SUM', '%util_MAX'), 'title' => "Disk Uitlization percentage (All DEVs, {$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Disk Uitlization percentage' . " (All DEVs, {$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'block_devices_await' => array('metric' => "Disk", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`await_SUM`) `await_SUM`, {$aggr}(`await_MAX`) `await_MAX`\n FROM (\n select\n id_exec, host, date,\n sum(`await`) `await_SUM`,\n max(`await`) `await_MAX`\n from aloja_logs.SAR_block_devices d WHERE id_exec = '{$exec}'\n GROUP BY date, host\n ) t {$where} {$group_by};", 'fields' => array('await_SUM', 'await_MAX'), 'title' => "Disk request wait time in ms (All DEVs, {$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Disk request wait time in ms' . " (All DEVs, {$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'block_devices_svctm' => array('metric' => "Disk", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`svctm_SUM`) `svctm_SUM`, {$aggr}(`svctm_MAX`) `svctm_MAX`\n FROM (\n select\n id_exec, host, date,\n sum(`svctm`) `svctm_SUM`,\n max(`svctm`) `svctm_MAX`\n from aloja_logs.SAR_block_devices d WHERE id_exec = '{$exec}'\n GROUP BY date, host\n ) t {$where} {$group_by};", 'fields' => array('svctm_SUM', 'svctm_MAX'), 'title' => "Disk service time in ms (All DEVs, {$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Disk service time in ms' . " (All DEVs, {$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'block_devices_queues' => array('metric' => "Disk", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`avgrq-sz`) `avg-req-size`, {$aggr}(`avgqu-sz`) `avg-queue-size`\n FROM (\n select\n id_exec, host, date,\n max(`avgrq-sz`) `avgrq-sz`,\n max(`avgqu-sz`) `avgqu-sz`\n from aloja_logs.SAR_block_devices d WHERE id_exec = '{$exec}'\n GROUP BY date, host\n ) t {$where} {$group_by};", 'fields' => array('avg-req-size', 'avg-queue-size'), 'title' => "Disk req and queue sizes ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Disk req and queue sizes' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'vmstats_io' => array('metric' => "Disk", 'query' => "SELECT time, {$aggr}(`bi`)/(1024) `KB_IN`, {$aggr}(`bo`)/(1024) `KB_OUT`\n FROM aloja_logs.VMSTATS {$where_VMSTATS} {$group_by_vmstats};", 'fields' => array('KB_IN', 'KB_OUT'), 'title' => "VMSTATS KB I/O ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'VMSTATS KB I/O' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'vmstats_rb' => array('metric' => "CPU", 'query' => "SELECT time, {$aggr}(`r`) `runnable procs`, {$aggr}(`b`) `sleep procs` FROM aloja_logs.VMSTATS {$where_VMSTATS} {$group_by_vmstats};", 'fields' => array('runnable procs', 'sleep procs'), 'title' => "VMSTATS Processes (r-b) ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'VMSTATS Processes (r-b)' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'vmstats_memory' => array('metric' => "Memory", 'query' => "SELECT time, {$aggr}(`buff`) `buff`,\n {$aggr}(`cache`) `cache`,\n {$aggr}(`free`) `free`,\n {$aggr}(`swpd`) `swpd`\n FROM aloja_logs.VMSTATS {$where_VMSTATS} {$group_by_vmstats};", 'fields' => array('buff', 'cache', 'free', 'swpd'), 'title' => "VMSTATS Processes (r-b) ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'VMSTATS Processes (r-b)' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => true, 'negative' => false), 'net_devices_kbs' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(if(IFACE != 'lo', `rxkB/s`, NULL))/1024 `rxMB/s_NET`, {$aggr}(if(IFACE != 'lo', `txkB/s`, NULL))/1024 `txMB/s_NET`\n FROM aloja_logs.SAR_net_devices {$where} AND IFACE not IN ('') {$group_by};", 'fields' => array('rxMB/s_NET', 'txMB/s_NET'), 'title' => "MB/s received and transmitted ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'MB/s received and transmitted' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'net_devices_kbs_local' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(if(IFACE = 'lo', `rxkB/s`, NULL))/1024 `rxMB/s_LOCAL`, {$aggr}(if(IFACE = 'lo', `txkB/s`, NULL))/1024 `txMB/s_LOCAL`\n FROM aloja_logs.SAR_net_devices {$where} AND IFACE not IN ('') {$group_by};", 'fields' => array('rxMB/s_LOCAL', 'txMB/s_LOCAL'), 'title' => "MB/s received and transmitted LOCAL ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'MB/s received and transmitted LOCAL' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'net_devices_pcks' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(if(IFACE != 'lo', `rxpck/s`, NULL))/1024 `rxpck/s_NET`, {$aggr}(if(IFACE != 'lo', `txkB/s`, NULL))/1024 `txpck/s_NET`\n FROM aloja_logs.SAR_net_devices {$where} AND IFACE not IN ('') {$group_by};", 'fields' => array('rxpck/s_NET', 'txpck/s_NET'), 'title' => "Packets/s received and transmitted ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Packets/s received and transmitted' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'net_devices_pcks_local' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(if(IFACE = 'lo', `rxkB/s`, NULL))/1024 `rxpck/s_LOCAL`, {$aggr}(if(IFACE = 'lo', `txkB/s`, NULL))/1024 `txpck/s_LOCAL`\n FROM aloja_logs.SAR_net_devices {$where} AND IFACE not IN ('') {$group_by};", 'fields' => array('rxpck/s_LOCAL', 'txpck/s_LOCAL'), 'title' => "Packets/s received and transmitted LOCAL ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Packets/s received and transmitted LOCAL' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'net_sockets_pcks' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`totsck`) `totsck`,\n {$aggr}(`tcpsck`) `tcpsck`,\n {$aggr}(`udpsck`) `udpsck`,\n {$aggr}(`rawsck`) `rawsck`,\n {$aggr}(`ip-frag`) `ip-frag`,\n {$aggr}(`tcp-tw`) `tcp-time-wait`\n FROM aloja_logs.SAR_net_sockets {$where} {$group_by};", 'fields' => array('totsck', 'tcpsck', 'udpsck', 'rawsck', 'ip-frag', 'tcp-time-wait'), 'title' => "Packets/s received and transmitted ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Packets/s received and transmitted' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'net_erros' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(date, '{$exec_details[$exec]['start_time']}')) time, {$aggr}(`rxerr/s`) `rxerr/s`,\n {$aggr}(`txerr/s`) `txerr/s`,\n {$aggr}(`coll/s`) `coll/s`,\n {$aggr}(`rxdrop/s`) `rxdrop/s`,\n {$aggr}(`txdrop/s`) `txdrop/s`,\n {$aggr}(`txcarr/s`) `txcarr/s`,\n {$aggr}(`rxfram/s`) `rxfram/s`,\n {$aggr}(`rxfifo/s`) `rxfifo/s`,\n {$aggr}(`txfifo/s`) `txfifo/s`\n FROM aloja_logs.SAR_net_errors {$where} {$group_by};", 'fields' => array('rxerr/s', 'txerr/s', 'coll/s', 'rxdrop/s', 'txdrop/s', 'txcarr/s', 'rxfram/s', 'rxfifo/s', 'txfifo/s'), 'title' => "Network errors ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'Network errors' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'bwm_in_out_total' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(FROM_UNIXTIME(unix_timestamp),'{$exec_details[$exec]['start_time']}')) time,\n {$aggr}(`bytes_in`)/(1024*1024) `MB_in`,\n {$aggr}(`bytes_out`)/(1024*1024) `MB_out`\n FROM aloja_logs.BWM2 {$where_BWM} AND iface_name = 'total' {$group_by_BWM};", 'fields' => array('MB_in', 'MB_out'), 'title' => "BW Monitor NG Total Bytes IN/OUT ({$aggr_text}, {$hosts}) {$exec_title}", 'group_title' => 'BW Monitor NG Total Bytes IN/OUT' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'bwm_packets_total' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(FROM_UNIXTIME(unix_timestamp),'{$exec_details[$exec]['start_time']}')) time,\n {$aggr}(`packets_in`) `packets_in`,\n {$aggr}(`packets_out`) `packets_out`\n FROM aloja_logs.BWM2 {$where_BWM} AND iface_name = 'total' {$group_by_BWM};", 'fields' => array('packets_in', 'packets_out'), 'title' => "BW Monitor NG Total packets IN/OUT ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'BW Monitor NG Total packets IN/OUT' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false), 'bwm_errors_total' => array('metric' => "Network", 'query' => "SELECT time_to_sec(timediff(FROM_UNIXTIME(unix_timestamp),'{$exec_details[$exec]['start_time']}')) time,\n {$aggr}(`errors_in`) `errors_in`,\n {$aggr}(`errors_out`) `errors_out`\n FROM aloja_logs.BWM2 {$where_BWM} AND iface_name = 'total' {$group_by_BWM};", 'fields' => array('errors_in', 'errors_out'), 'title' => "BW Monitor NG Total errors IN/OUT ({$aggr_text}, {$hosts}) {$exec_title} ", 'group_title' => 'BW Monitor NG Total errors IN/OUT' . " ({$aggr_text}, {$hosts})", 'percentage' => false, 'stacked' => false, 'negative' => false)); $has_records = false; //of any chart foreach ($charts[$exec] as $key_type => $chart) { if ($chart['metric'] == 'ALL' || $metric == $chart['metric']) { $charts[$exec][$key_type]['chart'] = new HighCharts(); $charts[$exec][$key_type]['chart']->setTitle($chart['title']); $charts[$exec][$key_type]['chart']->setPercentage($chart['percentage']); $charts[$exec][$key_type]['chart']->setStacked($chart['stacked']); $charts[$exec][$key_type]['chart']->setFields($chart['fields']); $charts[$exec][$key_type]['chart']->setNegativeValues($chart['negative']); list($rows, $max, $min) = Utils::minimize_exec_rows($dbUtil->get_rows($chart['query']), $chart['stacked']); if (!isset($chart_details[$key_type]['max']) || $max > $chart_details[$key_type]['max']) { $chart_details[$key_type]['max'] = $max; } if (!isset($chart_details[$key_type]['min']) || $min < $chart_details[$key_type]['min']) { $chart_details[$key_type]['min'] = $min; } //$charts[$exec][$key_type]['chart']->setMax($max); //$charts[$exec][$key_type]['chart']->setMin($min); if (count($rows) > 0) { $has_records = true; $charts[$exec][$key_type]['chart']->setRows($rows); } } } } if ($exec_details) { $max_time = null; foreach ($exec_details as $exec => $exe_time) { if (!$max_time || $exe_time['time'] > $max_time) { $max_time = $exe_time['time']; } } foreach ($exec_details as $exec => $exe_time) { #if (!$max_time) throw new Exception('Missing MAX time'); $exec_details[$exec]['size'] = round($exe_time['time'] / $max_time * 100, 2); //TODO improve $exec_details[$exec]['max_time'] = $max_time; } } if (isset($has_records)) { } else { throw new \Exception("No results for query!"); } } catch (\Exception $e) { if (empty($execs)) { $this->container->getTwig()->addGlobal('message', "No results for query!\n"); } else { $this->container->getTwig()->addGlobal('message', $e->getMessage() . "\n"); } } $chartsJS = ''; if ($charts) { reset($charts); $current_chart = current($charts); foreach ($current_chart as $chart_type => $chart) { foreach ($execs as $exec) { if (isset($charts[$exec][$chart_type]['chart'])) { //make Y axis all the same when comparing $charts[$exec][$chart_type]['chart']->setMax($chart_details[$chart_type]['max']); //the same for max X (plus 10%) $charts[$exec][$chart_type]['chart']->setMaxX($exec_details[$exec]['max_time'] * 1.007); //print the JS $chartsJS .= $charts[$exec][$chart_type]['chart']->getChartJS() . "\n\n"; } } } } if (!isset($exec)) { $exec = ''; } return $this->render('perfDetailsViews/perfcharts.html.twig', array('title' => 'Hadoop Job/s Execution details and System Performance Charts', 'chartsJS' => $chartsJS, 'charts' => $charts, 'metric' => $metric, 'execs' => $execs, 'aggr' => $aggr, 'hosts' => $hosts, 'host_rows' => $dbUtil->get_hosts($clusters), 'detail' => $detail)); }
public function dbscanexecsDataAction() { //ini_set('memory_limit', '384M'); $db = $this->container->getDBUtils(); $this->buildFilters(); $whereClause = $this->filters->getWhereClause(array('execs' => 'e', 'clusters' => 'c')); $table_name = "e"; $jobid = Utils::get_GET_string("jobid"); $metric_x = Utils::get_GET_int("metric_x") !== null ? Utils::get_GET_int("metric_x") : 0; $metric_y = Utils::get_GET_int("metric_y") !== null ? Utils::get_GET_int("metric_y") : 1; $task_type = $db->get_task_type(Utils::get_GET_string("task_type")); list($bench, $job_offset, $id_exec) = $db->get_jobid_info($jobid); // Calc pending dbscanexecs (if any) $pending = $db->get_dbscanexecs_pending($bench, $job_offset, $metric_x, $metric_y, $task_type, $whereClause); if (count($pending) > 0) { $db->get_dbscan($pending[0]['jobid'], $metric_x, $metric_y, $task_type); } // Retrieve calculated dbscanexecs from database $task_type_select = $db->get_task_type_query($task_type, $filter_null = true); $query = "\n SELECT\n d.`id_exec`,\n d.`centroid_x`,\n d.`centroid_y`\n FROM aloja2.JOB_dbscan d, aloja2.execs e\n JOIN aloja2.clusters c USING (id_cluster)\n LEFT JOIN aloja_ml.predictions p USING (id_exec)\n WHERE\n d.`id_exec` = e.`id_exec` AND\n d.`bench` = :bench AND\n d.`job_offset` = :job_offset AND\n d.`metric_x` = :metric_x AND\n d.`metric_y` = :metric_y\n " . $task_type_select('d') . "\n {$whereClause}\n ;"; $query_params = array(":bench" => $bench, ":job_offset" => $job_offset, ":metric_x" => $metric_x, ":metric_y" => $metric_y); // Since we are calculating new results, we have to bypass the cache $_GET['NO_CACHE'] = 1; $rows = $db->get_rows($query, $query_params); $points = new Cluster(); // Used instead of a simple array to calc x/y min/max foreach ($rows as $row) { $points[] = new Point($row['centroid_x'], $row['centroid_y'], array('id_exec' => $row['id_exec'])); } $dbscan = new DBSCAN(); list($clusters, $noise) = $dbscan->execute((array) $points); $seriesData = array(); foreach ($clusters as $cluster) { $data = array(); foreach ($cluster as $point) { $data[] = array('x' => $point->x, 'y' => $point->y, 'id_exec' => $point->info['id_exec']); } if ($data) { $seriesData[] = array('points' => $data, 'size' => $cluster->count(), 'x_min' => $cluster->getXMin(), 'x_max' => $cluster->getXMax(), 'y_min' => $cluster->getYMin(), 'y_max' => $cluster->getYMax()); } } $noiseData = array(); foreach ($noise as $point) { $noiseData[] = array('x' => $point->x, 'y' => $point->y, 'id_exec' => $point->info['id_exec']); } $result = ['seriesData' => $seriesData, 'noiseData' => $noiseData, 'pending' => max(0, count($pending) - 1)]; header('Content-Type: application/json'); ob_start('ob_gzhandler'); echo json_encode($result, JSON_NUMERIC_CHECK); }
public function paramEvaluationAction() { $db = $this->container->getDBUtils(); $this->buildFilters(array('minexecs' => array('default' => null, 'type' => 'inputNumber', 'label' => 'Minimum executions:', 'parseFunction' => function () { return 0; }, 'filterGroup' => 'basic'))); $whereClause = $this->filters->getWhereClause(); $model_html = ''; $model_info = $db->get_rows("SELECT id_learner, model, algorithm, dataslice FROM aloja_ml.learners"); foreach ($model_info as $row) { $model_html = $model_html . "<li><b>" . $row['id_learner'] . "</b> => " . $row['algorithm'] . " : " . $row['model'] . " : " . $row['dataslice'] . "</li>"; } $categories = ''; $series = ''; try { $paramEval = isset($_GET['parameval']) && Utils::get_GET_string('parameval') != '' ? Utils::get_GET_string('parameval') : 'maps'; $minExecs = isset($_GET['minexecs']) ? Utils::get_GET_int('minexecs') : -1; $this->filters->changeCurrentChoice('minexecs', $minExecs == -1 ? null : $minExecs); $shortAliasParamEval = array('maps' => 'e', 'comp' => 'e', 'id_cluster' => 'c', 'net' => 'e', 'disk' => 'e', 'replication' => 'e', 'iofilebuf' => 'e', 'blk_size' => 'e', 'iosf' => 'e', 'vm_size' => 'c', 'vm_cores' => 'c', 'vm_ram' => 'c', 'datanodes' => 'c', 'hadoop_version' => 'e', 'type' => 'c'); $minExecsFilter = ""; if ($minExecs > 0) { $minExecsFilter = "HAVING COUNT(*) > {$minExecs}"; } $filter_execs = DBUtils::getFilterExecs(); $options = $this->filters->getFiltersArray()[$paramEval]['choices']; $benchOptions = "SELECT DISTINCT e.bench FROM aloja2.execs e JOIN aloja2.clusters c USING (id_cluster) LEFT JOIN aloja_ml.predictions p USING (id_exec) WHERE 1 {$filter_execs} {$whereClause} GROUP BY {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench order by {$shortAliasParamEval[$paramEval]}.{$paramEval}"; $params = $this->filters->getFiltersSelectedChoices(array('prediction_model', 'upred', 'uobsr')); $whereClauseML = str_replace("exe_time", "pred_time", $whereClause); $whereClauseML = str_replace("start_time", "creation_time", $whereClauseML); $query = "SELECT COUNT(*) AS count, {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench, avg(e.exe_time) avg_exe_time, min(e.exe_time) min_exe_time\n\t\t\t\t\t FROM aloja2.execs AS e JOIN aloja2.clusters AS c USING (id_cluster)\n\t\t\t\t\t LEFT JOIN aloja_ml.predictions AS p USING (id_exec)\n\t\t\t\t\t WHERE 1 {$filter_execs} {$whereClause}\n\t\t\t\t\t GROUP BY {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench {$minExecsFilter} ORDER BY e.bench, {$shortAliasParamEval[$paramEval]}.{$paramEval}"; $queryPredictions = "\n\t\t\t\t\tSELECT COUNT(*) AS count, {$shortAliasParamEval[$paramEval]}.{$paramEval}, CONCAT('pred_',e.bench) as bench,\n\t\t\t\t\t\tavg(e.pred_time) as avg_exe_time, min(e.pred_time) as min_exe_time\n\t\t\t\t\t\tFROM aloja_ml.predictions AS e\n\t\t\t\t\t\tJOIN clusters c USING (id_cluster)\n\t\t\t\t\t\tWHERE 1 {$filter_execs} " . str_replace("p.", "e.", $whereClauseML) . " AND e.id_learner = '" . $params['prediction_model'] . "'\n\t\t\t\t\t\tGROUP BY {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench {$minExecsFilter} ORDER BY e.bench, {$shortAliasParamEval[$paramEval]}.{$paramEval}"; // get the result rows if ($params['uobsr'] == 1 && $params['upred'] == 1) { $query = "({$query}) UNION ({$queryPredictions})"; $benchOptions = "SELECT DISTINCT e.bench FROM aloja2.execs e JOIN aloja2.clusters c USING (id_cluster) LEFT JOIN aloja_ml.predictions p USING (id_exec) WHERE 1 {$filter_execs} {$whereClause} GROUP BY {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench\n\t\t\t\t\t\t\t\t UNION\n\t\t\t\t\t\t\t\t (SELECT DISTINCT CONCAT('pred_', e.bench) as bench FROM aloja_ml.predictions AS e\n\t\t\t\t\t\t\t\t JOIN clusters c USING (id_cluster)\n\t\t\t\t\t\t\t\t WHERE 1 {$filter_execs} " . str_replace("p.", "e.", $whereClauseML) . " AND e.id_learner = '" . $params['prediction_model'] . "'\n\t\t\t\t\t\t\t\t GROUP BY {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench {$minExecsFilter})\n\t\t\t\t\t\t\t\t ORDER BY bench"; $optionsPredictions = "SELECT DISTINCT {$shortAliasParamEval[$paramEval]}.{$paramEval} FROM aloja_ml.predictions AS e JOIN clusters c USING (id_cluster) WHERE 1 {$filter_execs} " . str_replace("p.", "e.", $whereClauseML) . " AND e.id_learner = '" . $params['prediction_model'] . "' ORDER BY {$shortAliasParamEval[$paramEval]}.{$paramEval}"; $optionsPredictions = $db->get_rows($optionsPredictions); foreach ($optionsPredictions as $predOption) { $options[] = $predOption[$paramEval]; } } else { if ($params['uobsr'] == 0 && $params['upred'] == 1) { $query = $queryPredictions; $benchOptions = "SELECT DISTINCT CONCAT('pred_', e.bench) as bench FROM aloja_ml.predictions AS e\n \t\t\t\t\t\t\t\t JOIN clusters c USING (id_cluster)\n\t\t\t\t\t\t\t\t WHERE 1 {$filter_execs} " . str_replace("p.", "e.", $whereClauseML) . " AND e.id_learner = '" . $params['prediction_model'] . "'\n\t\t\t\t\t\t\t\t GROUP BY {$shortAliasParamEval[$paramEval]}.{$paramEval}, e.bench {$minExecsFilter} ORDER BY e.bench, {$shortAliasParamEval[$paramEval]}.{$paramEval}"; $options = array(); $optionsPredictions = "SELECT DISTINCT {$shortAliasParamEval[$paramEval]}.{$paramEval} FROM aloja_ml.predictions AS e JOIN clusters c USING (id_cluster) WHERE 1 {$filter_execs} " . str_replace("p.", "e.", $whereClauseML) . " AND e.id_learner = '" . $params['prediction_model'] . "' ORDER BY {$shortAliasParamEval[$paramEval]}.{$paramEval}"; $optionsPredictions = $db->get_rows($optionsPredictions); foreach ($optionsPredictions as $predOption) { $options[] = $predOption[$paramEval]; } } else { if ($params['uobsr'] == 0 && $params['upred'] == 0) { $this->container->getTwig()->addGlobal('message', "Warning: No data selected (Predictions|Observations) from the ML Filters. Adding the Observed executions to the figure by default.\n"); } } } $rows = $db->get_rows($query); $benchOptions = $db->get_rows($benchOptions); if (!$rows) { throw new \Exception("No results for query!"); } $paramOptions = array(); foreach ($options as $option) { if ($paramEval == 'id_cluster') { $paramOptions[] = Utils::getClusterName($option, $db); } else { if ($paramEval == 'comp') { $paramOptions[] = Utils::getCompressionName($option); } else { if ($paramEval == 'net') { $paramOptions[] = Utils::getNetworkName($option); } else { if ($paramEval == 'disk') { $paramOptions[] = Utils::getDisksName($option); } else { if ($paramEval == 'vm_ram') { $paramOptions[] = Utils::getBeautyRam($option); } else { $paramOptions[] = $option; } } } } } } $categories = ''; $arrayBenchs = array(); foreach ($paramOptions as $param) { $categories .= "'{$param}" . Utils::getParamevalUnit($paramEval) . "',"; foreach ($benchOptions as $bench) { $arrayBenchs[$bench['bench']][$param] = null; } } $series = array(); foreach ($rows as $row) { if ($paramEval == 'comp') { $row[$paramEval] = Utils::getCompressionName($row['comp']); } else { if ($paramEval == 'id_cluster') { $row[$paramEval] = Utils::getClusterName($row[$paramEval], $db); } else { if ($paramEval == 'net') { $row[$paramEval] = Utils::getNetworkName($row['net']); } else { if ($paramEval == 'disk') { $row[$paramEval] = Utils::getDisksName($row['disk']); } else { if ($paramEval == 'vm_ram') { $row[$paramEval] = Utils::getBeautyRam($row['vm_ram']); } } } } } $arrayBenchs[strtolower($row['bench'])][$row[$paramEval]]['y'] = round((int) $row['avg_exe_time'], 2); $arrayBenchs[strtolower($row['bench'])][$row[$paramEval]]['count'] = (int) $row['count']; } foreach ($arrayBenchs as $key => $arrayBench) { $series[] = array('name' => $key, 'data' => array_values($arrayBench)); } $series = json_encode($series); } catch (\Exception $e) { $this->container->getTwig()->addGlobal('message', $e->getMessage() . "\n"); } return $this->render('configEvaluationViews/parameval.html.twig', array('title' => 'Improvement of Hadoop Execution by SW and HW Configurations', 'minexecs' => $minExecs, 'categories' => $categories, 'series' => $series, 'paramEval' => $paramEval, 'models' => $model_html)); }
public function dbscanexecsDataAction() { //ini_set('memory_limit', '384M'); $db = $this->container->getDBUtils(); $where_configs = ''; $table_name = "e"; $datefrom = Utils::read_params('datefrom', $where_configs, false, $table_name); $dateto = Utils::read_params('dateto', $where_configs, false, $table_name); $benchs = Utils::read_params('benchs', $where_configs, false, $table_name); $nets = Utils::read_params('nets', $where_configs, false, $table_name); $disks = Utils::read_params('disks', $where_configs, false, $table_name); $blk_sizes = Utils::read_params('blk_sizes', $where_configs, false, $table_name); $comps = Utils::read_params('comps', $where_configs, false, $table_name); $id_clusters = Utils::read_params('id_clusters', $where_configs, false, $table_name); $mapss = Utils::read_params('mapss', $where_configs, false, $table_name); $replications = Utils::read_params('replications', $where_configs, false, $table_name); $iosfs = Utils::read_params('iosfs', $where_configs, false, $table_name); $iofilebufs = Utils::read_params('iofilebufs', $where_configs, false, $table_name); $money = Utils::read_params('money', $where_configs, false, $table_name); $datanodes = Utils::read_params('datanodess', $where_configs, false, $table_name); $benchtype = Utils::read_params('bench_types', $where_configs, false, $table_name); $vm_sizes = Utils::read_params('vm_sizes', $where_configs, false, $table_name); $vm_coress = Utils::read_params('vm_coress', $where_configs, false, $table_name); $vm_RAMs = Utils::read_params('vm_RAMs', $where_configs, false, $table_name); $hadoop_versions = Utils::read_params('hadoop_versions', $where_configs, false, $table_name); $types = Utils::read_params('types', $where_configs, false, $table_name); $filters = Utils::read_params('filters', $where_configs, false, $table_name); $allunchecked = isset($_GET['allunchecked']) ? $_GET['allunchecked'] : ''; $minexetime = Utils::read_params('minexetime', $where_configs, false, $table_name); $maxexetime = Utils::read_params('maxexetime', $where_configs, false, $table_name); $provider = Utils::read_params('providers', $where_configs, false, $table_name); $jobid = Utils::get_GET_string("jobid"); $metric_x = Utils::get_GET_int("metric_x") !== null ? Utils::get_GET_int("metric_x") : 0; $metric_y = Utils::get_GET_int("metric_y") !== null ? Utils::get_GET_int("metric_y") : 1; $task_type = $db->get_task_type(Utils::get_GET_string("task_type")); list($bench, $job_offset, $id_exec) = $db->get_jobid_info($jobid); // Calc pending dbscanexecs (if any) $pending = $db->get_dbscanexecs_pending($bench, $job_offset, $metric_x, $metric_y, $task_type, $where_configs); if (count($pending) > 0) { $db->get_dbscan($pending[0]['jobid'], $metric_x, $metric_y, $task_type); } // Retrieve calculated dbscanexecs from database $task_type_select = $db->get_task_type_query($task_type, $filter_null = true); $query = "\n SELECT\n d.`id_exec`,\n d.`centroid_x`,\n d.`centroid_y`\n FROM `JOB_dbscan` d, `execs` e\n WHERE\n d.`id_exec` = e.`id_exec` AND\n d.`bench` = :bench AND\n d.`job_offset` = :job_offset AND\n d.`metric_x` = :metric_x AND\n d.`metric_y` = :metric_y\n " . $task_type_select('d') . "\n {$where_configs}\n ;"; $query_params = array(":bench" => $bench, ":job_offset" => $job_offset, ":metric_x" => $metric_x, ":metric_y" => $metric_y); // Since we are calculating new results, we have to bypass the cache $_GET['NO_CACHE'] = 1; $rows = $db->get_rows($query, $query_params); $points = new Cluster(); // Used instead of a simple array to calc x/y min/max foreach ($rows as $row) { $points[] = new Point($row['centroid_x'], $row['centroid_y'], array('id_exec' => $row['id_exec'])); } $dbscan = new DBSCAN(); list($clusters, $noise) = $dbscan->execute((array) $points); $seriesData = array(); foreach ($clusters as $cluster) { $data = array(); foreach ($cluster as $point) { $data[] = array('x' => $point->x, 'y' => $point->y, 'id_exec' => $point->info['id_exec']); } if ($data) { $seriesData[] = array('points' => $data, 'size' => $cluster->count(), 'x_min' => $cluster->getXMin(), 'x_max' => $cluster->getXMax(), 'y_min' => $cluster->getYMin(), 'y_max' => $cluster->getYMax()); } } $noiseData = array(); foreach ($noise as $point) { $noiseData[] = array('x' => $point->x, 'y' => $point->y, 'id_exec' => $point->info['id_exec']); } $result = ['seriesData' => $seriesData, 'noiseData' => $noiseData, 'pending' => max(0, count($pending) - 1)]; header('Content-Type: application/json'); ob_start('ob_gzhandler'); echo json_encode($result, JSON_NUMERIC_CHECK); }