fix: now really use multiple clusters in the loop

This commit is contained in:
Magel, Denis
2025-09-18 15:57:35 +02:00
parent e8aa7d7df5
commit 7d4b42df11
2 changed files with 14 additions and 12 deletions

View File

@@ -1,7 +1,7 @@
# contains the business logic for aggregates # contains the business logic for aggregates
from typing import List from typing import List
from pprint import pprint
from fastapi import Request from fastapi import Request
from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum
from logging import getLogger from logging import getLogger
@@ -16,9 +16,8 @@ async def get_aggregates(request: Request, metric: str = "relative") -> List[Agg
# You can use the metric parameter to filter or modify results as needed # You can use the metric parameter to filter or modify results as needed
# For now, just return the same data and show metric usage # For now, just return the same data and show metric usage
logger.debug(f"Metric used: {metric}") logger.debug(f"Metric used: {metric}")
__aggregates = await get_data_from_ontap(request, logger, "172.16.57.2", "admin", "Netapp12", "storage/aggregates", "fields=name,uuid,space,node,home_node") __aggregates = await get_data_from_ontap(request, logger, "storage/aggregates", "fields=name,uuid,space,node,home_node")
logger.debug(__aggregates) pprint(__aggregates)
__aggregates = __aggregates.get("records")
if metric == MetricEnum.relative: if metric == MetricEnum.relative:
__aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True) __aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True)
elif metric == MetricEnum.absolute: elif metric == MetricEnum.absolute:

View File

@@ -1,7 +1,7 @@
import logging import logging
from fastapi import Request from fastapi import Request
import httpx import httpx
from pprint import pprint
from src.database import get_config_from_db from src.database import get_config_from_db
@@ -14,26 +14,29 @@ def round_bytes(size_in_bytes: int) -> str:
return f"{size_in_bytes:.2f}EB" return f"{size_in_bytes:.2f}EB"
async def get_data_from_ontap(request: Request, logger, hostname: str, username: str, password: str, endpoint: str, query_string: str = ""): async def get_data_from_ontap(request: Request, logger, endpoint: str, query_string: str = ""):
# get clusters from redis # get clusters from redis
url = f"https://{hostname}/api/{endpoint}"
if query_string:
url += f"?{query_string}"
redis_conn = request.state.redis_conn redis_conn = request.state.redis_conn
config = get_config_from_db(redis_conn) config = get_config_from_db(redis_conn)
logger.debug("Got the config from REDIS: %s", config) logger.debug("Got the config from REDIS: %s", config)
for cluster in config: results = []
async with request.state.requests_client as _client: async with request.state.requests_client as _client:
for cluster in config:
print(f"\n\n looping, {cluster}")
url = f"https://{cluster.hostname}/api/{endpoint}"
if query_string:
url += f"?{query_string}"
try: try:
logger.debug(f"Fetching data from ONTAP: {url}") logger.debug(f"Fetching data from ONTAP: {url}")
response = await _client.get(url, auth=(cluster.username, cluster.password)) response = await _client.get(url, auth=(cluster.username, cluster.password))
response.raise_for_status() response.raise_for_status()
return response.json() results.extend(response.json()["records"])
except httpx.HTTPError as e: except httpx.HTTPError as e:
logger.error(f"HTTP error occurred: {e}") logger.error(f"HTTP error occurred: {e}")
return None return None
return results
def setup_logging() -> None: def setup_logging() -> None: