Compare commits

10 Commits

Author SHA1 Message Date
Magel, Denis
362c470b3c fix: API does not suspend after 1st request 2025-09-18 18:36:51 +02:00
Alexey
b8885d7d73 tags dev 2025-09-18 18:01:06 +02:00
Magel, Denis
d2db261152 docs: add drawio 2025-09-18 17:05:10 +02:00
Magel, Denis
e73a18e981 docs: update readme 2025-09-18 16:39:54 +02:00
Magel, Denis
7d4b42df11 fix: now really use multiple clusters in the loop 2025-09-18 15:57:35 +02:00
Magel, Denis
e8aa7d7df5 feat: get cluster information from redis
feat: loop over all clusters
2025-09-18 15:36:39 +02:00
Magel, Denis
b60383071a make shared connections available via request.shared.XXX 2025-09-18 15:28:50 +02:00
58f7c5c393 Merge pull request 'Refactoring config to config_upload, making important vars global' (#6) from feature/config_features into main
Reviewed-on: #6
2025-09-18 13:01:16 +00:00
Pascal Scheiben
5dfba7416b Refactoring config to config_upload, making important vars global 2025-09-18 15:00:03 +02:00
Magel, Denis
fc3f39c6ae fix: duplicate API startup 2025-09-18 14:48:46 +02:00
16 changed files with 105 additions and 38 deletions

View File

@@ -1,3 +1,19 @@
# generic_api_endpoint # generic_api_endpoint
Hackathon API endpoint Hackathon API endpoint
## management summary // usecase
This API acts as a middelware for service portals and frontends (like SNOW), that can retrieve data via REST API. It manages metadata.
## ideas for future
- store the data in redis on initialization or on first request
- also first query redis, and not directly ONTAP
- documentation -> make it understandable, so that users will use it!
- add capability to apply filters/conditions on the return
- Alexeys
-
- performance based filtering
- add capability for finding best clusters, volumes
- get credentials from credential-mgmt-system
-

BIN
concept.drawio.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

3
src/.env Normal file
View File

@@ -0,0 +1,3 @@
cluster_inventory_path = ./config/inventory.yml
redis_host = '172.16.0.208'
redis_port = '6379'

View File

@@ -1,6 +1,6 @@
# contains the router for the aggregates endpoint # contains the router for the aggregates endpoint
from fastapi import APIRouter, Query, Request from fastapi import APIRouter, Query, Request
from typing import List from typing import List, Dict
from .aggregate_schema import AggregateSchema, MetricEnum from .aggregate_schema import AggregateSchema, MetricEnum
from .aggregate_service import get_aggregates from .aggregate_service import get_aggregates
@@ -13,4 +13,11 @@ async def aggregates_endpoint(
request: Request, request: Request,
metric: MetricEnum = Query(MetricEnum.relative, description="Metric type"), metric: MetricEnum = Query(MetricEnum.relative, description="Metric type"),
): ):
return await get_aggregates(request, metric) # Extract tag parameters from query string
tags: Dict[str, str] = {}
for param_name, param_value in request.query_params.items():
if param_name.startswith("tag."):
tag_key = param_name[4:]
tags[tag_key] = param_value
return await get_aggregates(request, metric, tags)

View File

@@ -13,3 +13,11 @@ class AggregateSchema(BaseModel):
class MetricEnum(str, Enum): class MetricEnum(str, Enum):
relative = "relative" relative = "relative"
absolute = "absolute" absolute = "absolute"
TAG2REST = {
'worm_compliance': { 'snaplock_type': 'compliance' },
'worm_enterprise': { 'snaplock_type': 'enterprise' },
'flash': { 'block_storage.storage_type': 'ssd' },
'hdd': { 'block_storage.storage_type': 'hdd' },
'mcc': { 'block_storage.mirror.enabled': 'true' }
}

View File

@@ -1,7 +1,7 @@
# contains the business logic for aggregates # contains the business logic for aggregates
from typing import List from typing import List, Dict
from pprint import pprint
from fastapi import Request from fastapi import Request
from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum
from logging import getLogger from logging import getLogger
@@ -10,16 +10,36 @@ from src.utils import round_bytes, get_data_from_ontap
logger = getLogger("uvicorn") logger = getLogger("uvicorn")
logger.setLevel("DEBUG") logger.setLevel("DEBUG")
# TAG2REST = {
# 'worm_compliance': { 'snaplock_type': 'compliance' },
# 'worm_enterprise': { 'snaplock_type': 'enterprise' },
# 'flash': { 'block_storage.storage_type': 'ssd' },
# 'hdd': { 'block_storage.storage_type': 'hdd' },
# 'mcc': { 'block_storage.mirror.enabled': 'true' }
# }
async def get_aggregates(request: Request, metric: str = "relative") -> List[AggregateSchema]: # {
# "flash": "production",
# "performance": "gold",
# "worm": "compliance"
# }
async def get_aggregates(request: Request, metric: str = "relative", tags: Dict[str, str] = None) -> List[AggregateSchema]:
# Dummy data for demonstration # Dummy data for demonstration
# You can use the metric parameter to filter or modify results as needed # You can use the metric parameter to filter or modify results as needed
# For now, just return the same data and show metric usage # For now, just return the same data and show metric usage
logger.debug(f"Metric used: {metric}") logger.debug(f"Metric used: {metric}")
client = request.app.requests_client logger.debug(f"Tags used: {tags}")
__aggregates = await get_data_from_ontap(client, logger, "172.16.57.2", "admin", "Netapp12", "storage/aggregates", "fields=name,uuid,space,node,home_node")
logger.debug(__aggregates) # convert tags to ONTAP filter
__aggregates = __aggregates.get("records") # filter_str = ""
# if tags:
# str_filter_parts = [f"tag.{key} eq '{value}'" for key, value in tags.items()]
# param_str = "&".join([f"{TAG2REST[key]}" for key, value in tags.items()])
__aggregates = await get_data_from_ontap(request, logger, "storage/aggregates", "fields=*")
pprint(__aggregates)
if metric == MetricEnum.relative: if metric == MetricEnum.relative:
__aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True) __aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True)
elif metric == MetricEnum.absolute: elif metric == MetricEnum.absolute:

View File

@@ -1,3 +0,0 @@
from src.config.router import router as config_router
__all__ = ["config_router"]

View File

@@ -1,2 +0,0 @@
# contains the business logic for the config service
async def save_config() -> None: ...

View File

@@ -0,0 +1,3 @@
from src.config_upload.router import router as config_router
__all__ = ["config_router"]

View File

@@ -6,7 +6,7 @@ from .schema import ConfigReturnSchema, ConfigSchema
logger = logging.getLogger("uvicorn") logger = logging.getLogger("uvicorn")
router = APIRouter(tags=["config"]) router = APIRouter(tags=["config_upload"])
@router.post( @router.post(
@@ -20,3 +20,4 @@ async def create_config(config: ConfigSchema) -> ConfigSchema:
""" """
logger.info("Received configuration data") logger.info("Received configuration data")
return config return config

View File

@@ -1,4 +1,4 @@
# contains the schema definitions for the config service # contains the schema definitions for the config_upload service
from pydantic import BaseModel from pydantic import BaseModel

View File

@@ -0,0 +1,2 @@
# contains the business logic for the config_upload service
async def save_config() -> None: ...

View File

@@ -30,7 +30,7 @@ def initialize_config():
print(f"FATAL: Cannot read inventory file {ENV_INVENTORYPATH}. Err: {e}") print(f"FATAL: Cannot read inventory file {ENV_INVENTORYPATH}. Err: {e}")
return False return False
print(f"[INFO] Importing configuration to DB...") log.info(f"Importing configuration to DB...")
try: try:
GLOBAL_INVENTORY_VALID = TypeAdapter(List[ConfigSchema]).validate_python(inv) GLOBAL_INVENTORY_VALID = TypeAdapter(List[ConfigSchema]).validate_python(inv)
redis_conn = setup_db_conn(ENV_REDISHOST, ENV_REDISPORT) redis_conn = setup_db_conn(ENV_REDISHOST, ENV_REDISPORT)

View File

@@ -1,10 +1,10 @@
import os import os
import logging import logging
import httpx
from fastapi import FastAPI from fastapi import FastAPI
from src.aggregate import aggregate_router from src.aggregate import aggregate_router
from src.config import config_router from src.config_upload import config_router
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
@@ -13,16 +13,14 @@ from src.initialize import initialize_config
from .utils import setup_logging from .utils import setup_logging
logger = logging.getLogger("uvicorn") logger = logging.getLogger("uvicorn")
logger.setLevel("DEBUG")
logger.info("Starting application") logger.info("Starting application")
app = FastAPI()
app.include_router(aggregate_router)
app.include_router(config_router)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""make loading it async""" """make loading it async"""
global shared_redis_conn, requests_client
log = logging.getLogger("uvicorn") log = logging.getLogger("uvicorn")
cfg_init_result = initialize_config() cfg_init_result = initialize_config()
@@ -36,8 +34,9 @@ async def lifespan(app: FastAPI):
if not cfg_init_result: if not cfg_init_result:
log.error("Configuration initialization failed. Exiting...") log.error("Configuration initialization failed. Exiting...")
# exit(1) # exit(1)
requests_client = httpx.AsyncClient(verify=False)
yield yield {"redis_conn": shared_redis_conn, "requests_client": requests_client}
await requests_client.aclose()
log.info("Shutting down FastAPI app...") log.info("Shutting down FastAPI app...")
@@ -46,3 +45,5 @@ log = logging.getLogger("uvicorn")
log.info("Starting FastAPI app...") log.info("Starting FastAPI app...")
app = FastAPI(lifespan=lifespan) app = FastAPI(lifespan=lifespan)
app.include_router(aggregate_router)
app.include_router(config_router)

View File

@@ -1,5 +1,8 @@
import logging import logging
from fastapi import Request
import httpx import httpx
from src.database import get_config_from_db
def round_bytes(size_in_bytes: int) -> str: def round_bytes(size_in_bytes: int) -> str:
# Helper function to convert bytes to a human-readable format # Helper function to convert bytes to a human-readable format
@@ -10,24 +13,32 @@ def round_bytes(size_in_bytes: int) -> str:
return f"{size_in_bytes:.2f}EB" return f"{size_in_bytes:.2f}EB"
async def get_data_from_ontap(client, logger, hostname: str, username: str, password: str, endpoint: str, query_string: str = ""): async def get_data_from_ontap(request: Request, logger, endpoint: str, query_string: str = ""):
url = f"https://{hostname}/api/{endpoint}" # get clusters from redis
redis_conn = request.state.redis_conn
config = get_config_from_db(redis_conn)
logger.debug("Got the config from REDIS: %s", config)
results = []
client = request.state.requests_client
for cluster in config:
print(f"\n\n looping, {cluster}")
url = f"https://{cluster.hostname}/api/{endpoint}"
if query_string: if query_string:
url += f"?{query_string}" url += f"?{query_string}"
async with client as _client:
try: try:
logger.debug(f"Fetching data from ONTAP: {url}") logger.debug(f"Fetching data from ONTAP: {url}")
response = await _client.get(url, auth=(username, password)) response = await client.get(url, auth=(cluster.username, cluster.password))
response.raise_for_status() response.raise_for_status()
return response.json() results.extend(response.json()["records"])
except httpx.HTTPError as e: except httpx.HTTPError as e:
logger.error(f"HTTP error occurred: {e}") logger.error(f"HTTP error occurred: {e}")
return None return None
return results
def setup_logging() -> None: def setup_logging() -> None:
"""Configure logging for the application""" """Configure logging for the application"""
logging.basicConfig( logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s] [%(levelname)5s] %(message)s")
level=logging.DEBUG, print("Logger is initialized.")
format="[%(asctime)s] [%(levelname)5s] %(message)s"
)
print(f"Logger is initialized.")