Compare commits

36 Commits

Author SHA1 Message Date
Magel, Denis
362c470b3c fix: API does not suspend after 1st request 2025-09-18 18:36:51 +02:00
Alexey
b8885d7d73 tags dev 2025-09-18 18:01:06 +02:00
Magel, Denis
d2db261152 docs: add drawio 2025-09-18 17:05:10 +02:00
Magel, Denis
e73a18e981 docs: update readme 2025-09-18 16:39:54 +02:00
Magel, Denis
7d4b42df11 fix: now really use multiple clusters in the loop 2025-09-18 15:57:35 +02:00
Magel, Denis
e8aa7d7df5 feat: get cluster information from redis
feat: loop over all clusters
2025-09-18 15:36:39 +02:00
Magel, Denis
b60383071a make shared connections available via request.shared.XXX 2025-09-18 15:28:50 +02:00
58f7c5c393 Merge pull request 'Refactoring config to config_upload, making important vars global' (#6) from feature/config_features into main
Reviewed-on: #6
2025-09-18 13:01:16 +00:00
Pascal Scheiben
5dfba7416b Refactoring config to config_upload, making important vars global 2025-09-18 15:00:03 +02:00
Magel, Denis
fc3f39c6ae fix: duplicate API startup 2025-09-18 14:48:46 +02:00
1ee40b6647 Merge pull request 'feature/config_upload' (#5) from feature/config_upload into main
Reviewed-on: #5
2025-09-18 12:34:38 +00:00
Magel, Denis
60008fa947 fix: adjusted paths to run from root dir
updated inventory.yaml
2025-09-18 14:33:30 +02:00
Magel, Denis
767f43551e rebased 2025-09-18 14:23:19 +02:00
Pascal Scheiben
9d12045b81 Arranging imports 2025-09-18 14:14:53 +02:00
Pascal Scheiben
72992d651d Fixing typos 2025-09-18 14:13:50 +02:00
Pascal Scheiben
ab52169987 Enhancing comments, adding stub for business logic 2025-09-18 14:13:50 +02:00
Pascal Scheiben
1a4e2ff688 Rewriting comments 2025-09-18 14:13:50 +02:00
Pascal Scheiben
579c62319c Adding config endpoint 2025-09-18 14:13:50 +02:00
2a165c91b6 Merge pull request 'backend' (#4) from backend into main
Reviewed-on: #4
2025-09-18 12:09:49 +00:00
Alexey
72f738a816 backedn dev 2025-09-18 14:09:11 +02:00
root
fe13e49172 dev schema 2025-09-18 14:09:11 +02:00
root
d90a18053f restructure 2025-09-18 14:09:11 +02:00
root
22419ecf84 read and write with readis 2025-09-18 14:09:11 +02:00
root
cf09ba6431 req file extended 2025-09-18 14:08:14 +02:00
root
774fa3484c basic creds schema 2025-09-18 14:04:06 +02:00
b0d70e2120 Merge pull request 'feat/handle-get-data-from-ONTAP' (#3) from feat/handle-get-data-from-ONTAP into main
Reviewed-on: #3
2025-09-18 12:00:54 +00:00
Magel, Denis
fc71950039 refactor: added async await to ONTAP call 2025-09-18 13:39:22 +02:00
Magel, Denis
e8efde9892 feat: added functionality inside GET/aggregates 2025-09-18 12:16:30 +02:00
1592333ef8 Merge pull request 'get-aggregates' (#2) from get-aggregates into main
Reviewed-on: #2
2025-09-18 09:06:09 +00:00
Pascal Scheiben
73a42aae3b Moving example files back to its place 2025-09-18 10:39:36 +02:00
Pascal Scheiben
615d290773 Renamed folder from example -> aggregate 2025-09-18 10:23:40 +02:00
Magel, Denis
af4b60a0e3 style: 2025-09-18 10:17:24 +02:00
Magel, Denis
63bcd9b931 feat: add GET /aggregates 2025-09-18 10:10:30 +02:00
d564710004 Merge pull request 'feature/base_file' (#1) from feature/base_file into main
Reviewed-on: #1
2025-09-18 07:36:51 +00:00
Pascal Scheiben
76c5353afa Adding base logic for config handling. Adding example config to root 2025-09-18 09:33:55 +02:00
Pascal Scheiben
19e9cd6625 Adding python dotenv to requirements/pyproject 2025-09-18 09:07:09 +02:00
30 changed files with 440 additions and 18 deletions

3
.env Normal file
View File

@@ -0,0 +1,3 @@
cluster_inventory_path = config/inventory.yml
redis_host = '172.16.0.208'
redis_port = '6379'

2
.gitignore vendored
View File

@@ -129,7 +129,7 @@ celerybeat.pid
*.sage.py *.sage.py
# Environments # Environments
.env #.env
.venv .venv
env/ env/
venv/ venv/

9
Dockerfile Normal file
View File

@@ -0,0 +1,9 @@
FROM python:latest
WORKDIR /usr/local/bin
COPY requirements.txt requirements.txt
RUN pip install -r requirements.txt
COPY src/start.py .
CMD ["src/start.py"]

View File

@@ -1,3 +1,19 @@
# generic_api_endpoint # generic_api_endpoint
Hackathon API endpoint Hackathon API endpoint
## management summary // usecase
This API acts as a middelware for service portals and frontends (like SNOW), that can retrieve data via REST API. It manages metadata.
## ideas for future
- store the data in redis on initialization or on first request
- also first query redis, and not directly ONTAP
- documentation -> make it understandable, so that users will use it!
- add capability to apply filters/conditions on the return
- Alexeys
-
- performance based filtering
- add capability for finding best clusters, volumes
- get credentials from credential-mgmt-system
-

BIN
concept.drawio.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

6
config/inventory.yml Normal file
View File

@@ -0,0 +1,6 @@
- hostname: "172.16.57.2"
username: "admin"
password: "Netapp12"
- hostname: "172.16.56.2"
username: "admin"
password: "Netapp12"

0
initialize.py Normal file
View File

View File

@@ -12,5 +12,6 @@ requires-python = ">=3.13"
dependencies = [ dependencies = [
"fastapi[standard]>=0.116.2", "fastapi[standard]>=0.116.2",
"httpx>=0.28.1", "httpx>=0.28.1",
"python-dotenv>=1.1.1",
"redis>=6.4.0", "redis>=6.4.0",
] ]

View File

@@ -1,3 +1,7 @@
fastapi[standard]>=0.116.2 fastapi[standard]>=0.116.2
httpx>=0.28.1 httpx>=0.28.1
redis>=6.4.0 redis>=6.4.0
python-dotenv>=1.1.1
pydantic
redis[hiredis]
dotenv

3
src/.env Normal file
View File

@@ -0,0 +1,3 @@
cluster_inventory_path = ./config/inventory.yml
redis_host = '172.16.0.208'
redis_port = '6379'

View File

@@ -0,0 +1,5 @@
from src.example.router import router as example_router
from src.aggregate.aggregate_router import router as aggregate_router
__all__ = ["example_router", "aggregate_router"]

View File

@@ -0,0 +1,23 @@
# contains the router for the aggregates endpoint
from fastapi import APIRouter, Query, Request
from typing import List, Dict
from .aggregate_schema import AggregateSchema, MetricEnum
from .aggregate_service import get_aggregates
router = APIRouter(tags=["aggregates"])
@router.get("/aggregates", response_model=List[AggregateSchema])
async def aggregates_endpoint(
request: Request,
metric: MetricEnum = Query(MetricEnum.relative, description="Metric type"),
):
# Extract tag parameters from query string
tags: Dict[str, str] = {}
for param_name, param_value in request.query_params.items():
if param_name.startswith("tag."):
tag_key = param_name[4:]
tags[tag_key] = param_value
return await get_aggregates(request, metric, tags)

View File

@@ -0,0 +1,23 @@
# contains the schema definitions for aggregates
from pydantic import BaseModel
from enum import Enum
class AggregateSchema(BaseModel):
aggregate: str
node: str
available: int
available_str: str
class MetricEnum(str, Enum):
relative = "relative"
absolute = "absolute"
TAG2REST = {
'worm_compliance': { 'snaplock_type': 'compliance' },
'worm_enterprise': { 'snaplock_type': 'enterprise' },
'flash': { 'block_storage.storage_type': 'ssd' },
'hdd': { 'block_storage.storage_type': 'hdd' },
'mcc': { 'block_storage.mirror.enabled': 'true' }
}

View File

@@ -0,0 +1,58 @@
# contains the business logic for aggregates
from typing import List, Dict
from pprint import pprint
from fastapi import Request
from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum
from logging import getLogger
from src.utils import round_bytes, get_data_from_ontap
logger = getLogger("uvicorn")
logger.setLevel("DEBUG")
# TAG2REST = {
# 'worm_compliance': { 'snaplock_type': 'compliance' },
# 'worm_enterprise': { 'snaplock_type': 'enterprise' },
# 'flash': { 'block_storage.storage_type': 'ssd' },
# 'hdd': { 'block_storage.storage_type': 'hdd' },
# 'mcc': { 'block_storage.mirror.enabled': 'true' }
# }
# {
# "flash": "production",
# "performance": "gold",
# "worm": "compliance"
# }
async def get_aggregates(request: Request, metric: str = "relative", tags: Dict[str, str] = None) -> List[AggregateSchema]:
# Dummy data for demonstration
# You can use the metric parameter to filter or modify results as needed
# For now, just return the same data and show metric usage
logger.debug(f"Metric used: {metric}")
logger.debug(f"Tags used: {tags}")
# convert tags to ONTAP filter
# filter_str = ""
# if tags:
# str_filter_parts = [f"tag.{key} eq '{value}'" for key, value in tags.items()]
# param_str = "&".join([f"{TAG2REST[key]}" for key, value in tags.items()])
__aggregates = await get_data_from_ontap(request, logger, "storage/aggregates", "fields=*")
pprint(__aggregates)
if metric == MetricEnum.relative:
__aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True)
elif metric == MetricEnum.absolute:
__aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("available"), reverse=False)
aggregates: list = [
AggregateSchema(
aggregate=a["name"],
node=a["node"]["name"],
available=a["space"]["block_storage"]["available"],
available_str=round_bytes(a["space"]["block_storage"]["available"]),
)
for a in __aggregates
]
return aggregates

View File

@@ -0,0 +1,3 @@
from src.config_upload.router import router as config_router
__all__ = ["config_router"]

View File

@@ -0,0 +1,14 @@
POST http://127.0.0.1:8000/config
Content-Type: application/json
{
"cluster_list": [
{
"hostname": "cluster1.demo.netapp.com",
"username": "admin",
"password": "Netapp1!"
}
]
}
###

View File

@@ -0,0 +1,23 @@
import logging
from fastapi import APIRouter
from .schema import ConfigReturnSchema, ConfigSchema
logger = logging.getLogger("uvicorn")
router = APIRouter(tags=["config_upload"])
@router.post(
"/config", summary="Upload a configuration", response_model=ConfigReturnSchema
)
async def create_config(config: ConfigSchema) -> ConfigSchema:
"""
Endpoint to receive and store configuration data.
⚠️ at this time the configuration is not stored anywhere. It's like logging to /dev/null
"""
logger.info("Received configuration data")
return config

View File

@@ -0,0 +1,21 @@
# contains the schema definitions for the config_upload service
from pydantic import BaseModel
class ConfigEntrySchema(BaseModel):
hostname: str
username: str
password: str
class ConfigOutSchema(BaseModel):
hostname: str
username: str
class ConfigReturnSchema(BaseModel):
cluster_list: list[ConfigOutSchema]
class ConfigSchema(BaseModel):
cluster_list: list[ConfigEntrySchema]

View File

@@ -0,0 +1,2 @@
# contains the business logic for the config_upload service
async def save_config() -> None: ...

40
src/database.py Normal file
View File

@@ -0,0 +1,40 @@
import json
import logging
from redis import Redis, ConnectionError
from typing import List
from pydantic import TypeAdapter
from src.schema import ConfigSchema
def setup_db_conn(redishost, redisport: str):
''' Setup Redis connection and return it open'''
log = logging.getLogger('uvicorn')
try:
redisclient = Redis(host=redishost, port=redisport, decode_responses=True)
if redisclient.ping():
log.info(f"Connected to Redis DB {redishost} on port {redisport}")
else:
log.error(f"Cannot connect to Redis DB {redishost} on port {redisport}")
exit(1)
return redisclient
except ConnectionError as e:
print(f"FATAL: Redis DB {redishost} is unreachable on port {redisport}. Err: {e}")
return None
except Exception as e:
print(f"FATAL: {e}")
return None
def get_inventory_from_redis(redisclient: Redis):
''' Read inventory from Redis '''
cluster_inv = redisclient.hgetall('cluster_inventory')
if 'inventory' in cluster_inv:
return json.loads(cluster_inv['inventory'])
return {}
def get_config_from_db(redisclient: Redis) -> ConfigSchema:
''' Load inventory to global vars'''
GLOBAL_INVENTORY = get_inventory_from_redis(redisclient)
GLOBAL_INVENTORY_VALID = TypeAdapter(List[ConfigSchema]).validate_python(GLOBAL_INVENTORY)
return GLOBAL_INVENTORY_VALID

View File

@@ -1,3 +0,0 @@
from .router import router as example_router
__all__ = ["example_router"]

View File

@@ -1,2 +1,2 @@
# contains a constant definition # contains a constant definition
FOO: int = 42 FOO: int = 42

View File

@@ -1,9 +1,11 @@
# contains the router for the example endpoint # contains the router for the aggregate endpoint
from fastapi import APIRouter from fastapi import APIRouter
from .schema import ExampleSchema
router = APIRouter(tags=["example"]) from src.example.schema import ExampleSchema
router = APIRouter(tags=["aggregate"])
@router.get("/example") @router.get("/example")
async def example_endpoint() -> ExampleSchema: async def example_endpoint() -> ExampleSchema:
return ExampleSchema(example_field="foo", another_field=42) return ExampleSchema(example_field="foo", another_field=42)

View File

@@ -1,6 +1,18 @@
# contains the schema definitions for the example service # contains the schema definitions for the aggregate service
from pydantic import BaseModel from pydantic import BaseModel
from pathlib import Path
class ExampleSchema(BaseModel): class ExampleSchema(BaseModel):
example_field: str example_field: str
another_field: int another_field: int
class ClusterCreds(BaseModel):
"""A structure to hold basic auth cluster credentials for a cluster"""
username: str
password: str
hostname: str = None
cert_filepath: Path = None
key_filepath: Path = None

View File

@@ -1,3 +1,3 @@
# contains the business logic for the example service # contains the business logic for the aggregate service
async def example_service() -> str: async def example_service() -> str:
return "This is an example service" return "This is an aggregate service"

45
src/initialize.py Normal file
View File

@@ -0,0 +1,45 @@
import os
import json
import logging
import yaml
from pathlib import Path
from dotenv import load_dotenv
from src.database import setup_db_conn
from src.schema import ConfigSchema
from typing import List
from pydantic import TypeAdapter
def initialize_config():
load_dotenv()
log = logging.getLogger("uvicorn")
ENV_INVENTORYPATH = os.getenv("cluster_inventory_path")
ENV_REDISHOST = os.getenv("redis_host")
ENV_REDISPORT = os.getenv("redis_port")
log.info(f"Found Cluster Inventory file at: {ENV_INVENTORYPATH}")
if not ENV_INVENTORYPATH or not Path(ENV_INVENTORYPATH).is_file():
print(f"FATAL: Inventory file {ENV_INVENTORYPATH} is missing or not a file.")
return False
try:
with open(ENV_INVENTORYPATH, "r") as f:
inv = yaml.safe_load(f)
inventory = json.dumps(inv)
except Exception as e:
print(f"FATAL: Cannot read inventory file {ENV_INVENTORYPATH}. Err: {e}")
return False
log.info(f"Importing configuration to DB...")
try:
GLOBAL_INVENTORY_VALID = TypeAdapter(List[ConfigSchema]).validate_python(inv)
redis_conn = setup_db_conn(ENV_REDISHOST, ENV_REDISPORT)
redis_conn.hset("cluster_inventory", mapping={"inventory": inventory})
redis_conn.close()
log.info("Configuration has been loaded.")
return True
except Exception as e:
print(f"FATAL: Redis DB error: {e}")
return False

View File

@@ -1,5 +1,49 @@
def main() -> None: import os
print("Hello, World!") import logging
import httpx
from fastapi import FastAPI
if __name__ == "__main__": from src.aggregate import aggregate_router
main() from src.config_upload import config_router
from contextlib import asynccontextmanager
from .database import setup_db_conn, get_config_from_db
from src.initialize import initialize_config
from .utils import setup_logging
logger = logging.getLogger("uvicorn")
logger.setLevel("DEBUG")
logger.info("Starting application")
@asynccontextmanager
async def lifespan(app: FastAPI):
"""make loading it async"""
global shared_redis_conn, requests_client
log = logging.getLogger("uvicorn")
cfg_init_result = initialize_config()
shared_redis_conn = setup_db_conn(os.getenv("redis_host"), os.getenv("redis_port"))
if not shared_redis_conn:
log.error("Cannot connect to Redis DB. Exiting...")
exit(1)
inv_check = get_config_from_db(shared_redis_conn)
log.info(f"[DEBUG] Data validity healthcheck (DEVELOPER MODE): {inv_check}")
if not cfg_init_result:
log.error("Configuration initialization failed. Exiting...")
# exit(1)
requests_client = httpx.AsyncClient(verify=False)
yield {"redis_conn": shared_redis_conn, "requests_client": requests_client}
await requests_client.aclose()
log.info("Shutting down FastAPI app...")
setup_logging()
log = logging.getLogger("uvicorn")
log.info("Starting FastAPI app...")
app = FastAPI(lifespan=lifespan)
app.include_router(aggregate_router)
app.include_router(config_router)

7
src/schema.py Normal file
View File

@@ -0,0 +1,7 @@
from pydantic import BaseModel
class ConfigSchema(BaseModel):
hostname: str
username: str
password: str

17
src/service.py Normal file
View File

@@ -0,0 +1,17 @@
import logging
from dotenv import dotenv_values
from src.schema import ConfigSchema
logger = logging.getLogger("uvicorn")
def load_config() -> ConfigSchema:
logger.info("Loading config from .env file")
config = dotenv_values(".env")
return ConfigSchema(
hostname=config["CLUSTER1_HOSTNAME"],
username=config["CLUSTER1_USERNAME"],
password=config["CLUSTER1_PASSWORD"],
)

44
src/utils.py Normal file
View File

@@ -0,0 +1,44 @@
import logging
from fastapi import Request
import httpx
from src.database import get_config_from_db
def round_bytes(size_in_bytes: int) -> str:
# Helper function to convert bytes to a human-readable format
for unit in ["B", "KiB", "MiB", "GiB", "TiB", "PiB"]:
if size_in_bytes < 1024:
return f"{size_in_bytes:.2f}{unit}"
size_in_bytes /= 1024
return f"{size_in_bytes:.2f}EB"
async def get_data_from_ontap(request: Request, logger, endpoint: str, query_string: str = ""):
# get clusters from redis
redis_conn = request.state.redis_conn
config = get_config_from_db(redis_conn)
logger.debug("Got the config from REDIS: %s", config)
results = []
client = request.state.requests_client
for cluster in config:
print(f"\n\n looping, {cluster}")
url = f"https://{cluster.hostname}/api/{endpoint}"
if query_string:
url += f"?{query_string}"
try:
logger.debug(f"Fetching data from ONTAP: {url}")
response = await client.get(url, auth=(cluster.username, cluster.password))
response.raise_for_status()
results.extend(response.json()["records"])
except httpx.HTTPError as e:
logger.error(f"HTTP error occurred: {e}")
return None
return results
def setup_logging() -> None:
"""Configure logging for the application"""
logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s] [%(levelname)5s] %(message)s")
print("Logger is initialized.")