import { useTranslation } from "next-i18next";
import Container from "components/services/widget/container";
import Block from "components/services/widget/block";
import useWidgetAPI from "utils/proxy/use-widget-api";
function calcRunning(total, current) {
return current.status === "running" ? total + 1 : total;
}
export default function Component({ service }) {
const { t } = useTranslation();
const { widget } = service;
const { data: clusterData, error: clusterError } = useWidgetAPI(widget, "cluster/resources");
if (clusterError) {
return ;
}
if (!clusterData || !clusterData.data) {
return (
);
}
const { data } = clusterData ;
const vms = data.filter(item => item.type === "qemu" && item.template === 0 && (widget.node === undefined || widget.node === item.node)) || [];
const lxc = data.filter(item => item.type === "lxc" && item.template === 0 && (widget.node === undefined || widget.node === item.node)) || [];
const nodes = data.filter(item => item.type === "node" && (widget.node === undefined || widget.node === item.node)) || [];
const runningVMs = vms.reduce(calcRunning, 0);
const runningLXC = lxc.reduce(calcRunning, 0);
if (nodes.length === 0) {
return (
);
}
const maxMemory = nodes.reduce((sum, n) => n.maxmem + sum, 0);
const usedMemory = nodes.reduce((sum, n) => n.mem + sum, 0);
const maxCpu = nodes.reduce((sum, n) => n.maxcpu + sum, 0);
const usedCpu = nodes.reduce((sum, n) => (n.cpu * n.maxcpu) + sum, 0);
return (
);
}