Compare commits
7 Commits
58f7c5c393
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
362c470b3c | ||
|
|
b8885d7d73 | ||
|
|
d2db261152 | ||
|
|
e73a18e981 | ||
|
|
7d4b42df11 | ||
|
|
e8aa7d7df5 | ||
|
|
b60383071a |
16
README.md
16
README.md
@@ -1,3 +1,19 @@
|
|||||||
# generic_api_endpoint
|
# generic_api_endpoint
|
||||||
|
|
||||||
Hackathon API endpoint
|
Hackathon API endpoint
|
||||||
|
|
||||||
|
## management summary // usecase
|
||||||
|
This API acts as a middelware for service portals and frontends (like SNOW), that can retrieve data via REST API. It manages metadata.
|
||||||
|
|
||||||
|
## ideas for future
|
||||||
|
- store the data in redis on initialization or on first request
|
||||||
|
- also first query redis, and not directly ONTAP
|
||||||
|
- documentation -> make it understandable, so that users will use it!
|
||||||
|
- add capability to apply filters/conditions on the return
|
||||||
|
- Alexeys
|
||||||
|
-
|
||||||
|
- performance based filtering
|
||||||
|
|
||||||
|
- add capability for finding best clusters, volumes
|
||||||
|
- get credentials from credential-mgmt-system
|
||||||
|
-
|
||||||
BIN
concept.drawio.png
Normal file
BIN
concept.drawio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
3
src/.env
Normal file
3
src/.env
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
cluster_inventory_path = ./config/inventory.yml
|
||||||
|
redis_host = '172.16.0.208'
|
||||||
|
redis_port = '6379'
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# contains the router for the aggregates endpoint
|
# contains the router for the aggregates endpoint
|
||||||
from fastapi import APIRouter, Query, Request
|
from fastapi import APIRouter, Query, Request
|
||||||
from typing import List
|
from typing import List, Dict
|
||||||
from .aggregate_schema import AggregateSchema, MetricEnum
|
from .aggregate_schema import AggregateSchema, MetricEnum
|
||||||
from .aggregate_service import get_aggregates
|
from .aggregate_service import get_aggregates
|
||||||
|
|
||||||
@@ -13,4 +13,11 @@ async def aggregates_endpoint(
|
|||||||
request: Request,
|
request: Request,
|
||||||
metric: MetricEnum = Query(MetricEnum.relative, description="Metric type"),
|
metric: MetricEnum = Query(MetricEnum.relative, description="Metric type"),
|
||||||
):
|
):
|
||||||
return await get_aggregates(request, metric)
|
# Extract tag parameters from query string
|
||||||
|
tags: Dict[str, str] = {}
|
||||||
|
for param_name, param_value in request.query_params.items():
|
||||||
|
if param_name.startswith("tag."):
|
||||||
|
tag_key = param_name[4:]
|
||||||
|
tags[tag_key] = param_value
|
||||||
|
|
||||||
|
return await get_aggregates(request, metric, tags)
|
||||||
|
|||||||
@@ -13,3 +13,11 @@ class AggregateSchema(BaseModel):
|
|||||||
class MetricEnum(str, Enum):
|
class MetricEnum(str, Enum):
|
||||||
relative = "relative"
|
relative = "relative"
|
||||||
absolute = "absolute"
|
absolute = "absolute"
|
||||||
|
|
||||||
|
TAG2REST = {
|
||||||
|
'worm_compliance': { 'snaplock_type': 'compliance' },
|
||||||
|
'worm_enterprise': { 'snaplock_type': 'enterprise' },
|
||||||
|
'flash': { 'block_storage.storage_type': 'ssd' },
|
||||||
|
'hdd': { 'block_storage.storage_type': 'hdd' },
|
||||||
|
'mcc': { 'block_storage.mirror.enabled': 'true' }
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# contains the business logic for aggregates
|
# contains the business logic for aggregates
|
||||||
|
|
||||||
from typing import List
|
from typing import List, Dict
|
||||||
|
from pprint import pprint
|
||||||
from fastapi import Request
|
from fastapi import Request
|
||||||
from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum
|
from src.aggregate.aggregate_schema import AggregateSchema, MetricEnum
|
||||||
from logging import getLogger
|
from logging import getLogger
|
||||||
@@ -10,16 +10,36 @@ from src.utils import round_bytes, get_data_from_ontap
|
|||||||
logger = getLogger("uvicorn")
|
logger = getLogger("uvicorn")
|
||||||
logger.setLevel("DEBUG")
|
logger.setLevel("DEBUG")
|
||||||
|
|
||||||
|
# TAG2REST = {
|
||||||
|
# 'worm_compliance': { 'snaplock_type': 'compliance' },
|
||||||
|
# 'worm_enterprise': { 'snaplock_type': 'enterprise' },
|
||||||
|
# 'flash': { 'block_storage.storage_type': 'ssd' },
|
||||||
|
# 'hdd': { 'block_storage.storage_type': 'hdd' },
|
||||||
|
# 'mcc': { 'block_storage.mirror.enabled': 'true' }
|
||||||
|
# }
|
||||||
|
|
||||||
async def get_aggregates(request: Request, metric: str = "relative") -> List[AggregateSchema]:
|
# {
|
||||||
|
# "flash": "production",
|
||||||
|
# "performance": "gold",
|
||||||
|
# "worm": "compliance"
|
||||||
|
# }
|
||||||
|
|
||||||
|
async def get_aggregates(request: Request, metric: str = "relative", tags: Dict[str, str] = None) -> List[AggregateSchema]:
|
||||||
# Dummy data for demonstration
|
# Dummy data for demonstration
|
||||||
# You can use the metric parameter to filter or modify results as needed
|
# You can use the metric parameter to filter or modify results as needed
|
||||||
# For now, just return the same data and show metric usage
|
# For now, just return the same data and show metric usage
|
||||||
logger.debug(f"Metric used: {metric}")
|
logger.debug(f"Metric used: {metric}")
|
||||||
client = request.app.requests_client
|
logger.debug(f"Tags used: {tags}")
|
||||||
__aggregates = await get_data_from_ontap(client, logger, "172.16.57.2", "admin", "Netapp12", "storage/aggregates", "fields=name,uuid,space,node,home_node")
|
|
||||||
logger.debug(__aggregates)
|
# convert tags to ONTAP filter
|
||||||
__aggregates = __aggregates.get("records")
|
# filter_str = ""
|
||||||
|
# if tags:
|
||||||
|
# str_filter_parts = [f"tag.{key} eq '{value}'" for key, value in tags.items()]
|
||||||
|
# param_str = "&".join([f"{TAG2REST[key]}" for key, value in tags.items()])
|
||||||
|
|
||||||
|
|
||||||
|
__aggregates = await get_data_from_ontap(request, logger, "storage/aggregates", "fields=*")
|
||||||
|
pprint(__aggregates)
|
||||||
if metric == MetricEnum.relative:
|
if metric == MetricEnum.relative:
|
||||||
__aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True)
|
__aggregates = sorted(__aggregates, key=lambda r: r["space"]["block_storage"].get("used_percent"), reverse=True)
|
||||||
elif metric == MetricEnum.absolute:
|
elif metric == MetricEnum.absolute:
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ import logging
|
|||||||
from fastapi import APIRouter
|
from fastapi import APIRouter
|
||||||
|
|
||||||
from .schema import ConfigReturnSchema, ConfigSchema
|
from .schema import ConfigReturnSchema, ConfigSchema
|
||||||
from src.database import get_config_from_db
|
|
||||||
from src.main import shared_redis_conn
|
|
||||||
|
|
||||||
logger = logging.getLogger("uvicorn")
|
logger = logging.getLogger("uvicorn")
|
||||||
|
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ def initialize_config():
|
|||||||
print(f"FATAL: Cannot read inventory file {ENV_INVENTORYPATH}. Err: {e}")
|
print(f"FATAL: Cannot read inventory file {ENV_INVENTORYPATH}. Err: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
print(f"[INFO] Importing configuration to DB...")
|
log.info(f"Importing configuration to DB...")
|
||||||
try:
|
try:
|
||||||
GLOBAL_INVENTORY_VALID = TypeAdapter(List[ConfigSchema]).validate_python(inv)
|
GLOBAL_INVENTORY_VALID = TypeAdapter(List[ConfigSchema]).validate_python(inv)
|
||||||
redis_conn = setup_db_conn(ENV_REDISHOST, ENV_REDISPORT)
|
redis_conn = setup_db_conn(ENV_REDISHOST, ENV_REDISPORT)
|
||||||
|
|||||||
@@ -3,9 +3,6 @@ import logging
|
|||||||
import httpx
|
import httpx
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
|
|
||||||
shared_redis_conn = None
|
|
||||||
requests_client = None
|
|
||||||
|
|
||||||
from src.aggregate import aggregate_router
|
from src.aggregate import aggregate_router
|
||||||
from src.config_upload import config_router
|
from src.config_upload import config_router
|
||||||
|
|
||||||
@@ -20,7 +17,6 @@ logger.setLevel("DEBUG")
|
|||||||
logger.info("Starting application")
|
logger.info("Starting application")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def lifespan(app: FastAPI):
|
async def lifespan(app: FastAPI):
|
||||||
"""make loading it async"""
|
"""make loading it async"""
|
||||||
@@ -39,7 +35,7 @@ async def lifespan(app: FastAPI):
|
|||||||
log.error("Configuration initialization failed. Exiting...")
|
log.error("Configuration initialization failed. Exiting...")
|
||||||
# exit(1)
|
# exit(1)
|
||||||
requests_client = httpx.AsyncClient(verify=False)
|
requests_client = httpx.AsyncClient(verify=False)
|
||||||
yield
|
yield {"redis_conn": shared_redis_conn, "requests_client": requests_client}
|
||||||
await requests_client.aclose()
|
await requests_client.aclose()
|
||||||
log.info("Shutting down FastAPI app...")
|
log.info("Shutting down FastAPI app...")
|
||||||
|
|
||||||
|
|||||||
35
src/utils.py
35
src/utils.py
@@ -1,5 +1,8 @@
|
|||||||
import logging
|
import logging
|
||||||
|
from fastapi import Request
|
||||||
import httpx
|
import httpx
|
||||||
|
from src.database import get_config_from_db
|
||||||
|
|
||||||
|
|
||||||
def round_bytes(size_in_bytes: int) -> str:
|
def round_bytes(size_in_bytes: int) -> str:
|
||||||
# Helper function to convert bytes to a human-readable format
|
# Helper function to convert bytes to a human-readable format
|
||||||
@@ -10,24 +13,32 @@ def round_bytes(size_in_bytes: int) -> str:
|
|||||||
return f"{size_in_bytes:.2f}EB"
|
return f"{size_in_bytes:.2f}EB"
|
||||||
|
|
||||||
|
|
||||||
async def get_data_from_ontap(client, logger, hostname: str, username: str, password: str, endpoint: str, query_string: str = ""):
|
async def get_data_from_ontap(request: Request, logger, endpoint: str, query_string: str = ""):
|
||||||
url = f"https://{hostname}/api/{endpoint}"
|
# get clusters from redis
|
||||||
if query_string:
|
|
||||||
url += f"?{query_string}"
|
redis_conn = request.state.redis_conn
|
||||||
async with client as _client:
|
config = get_config_from_db(redis_conn)
|
||||||
|
logger.debug("Got the config from REDIS: %s", config)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
client = request.state.requests_client
|
||||||
|
for cluster in config:
|
||||||
|
print(f"\n\n looping, {cluster}")
|
||||||
|
url = f"https://{cluster.hostname}/api/{endpoint}"
|
||||||
|
if query_string:
|
||||||
|
url += f"?{query_string}"
|
||||||
try:
|
try:
|
||||||
logger.debug(f"Fetching data from ONTAP: {url}")
|
logger.debug(f"Fetching data from ONTAP: {url}")
|
||||||
response = await _client.get(url, auth=(username, password))
|
response = await client.get(url, auth=(cluster.username, cluster.password))
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
return response.json()
|
results.extend(response.json()["records"])
|
||||||
except httpx.HTTPError as e:
|
except httpx.HTTPError as e:
|
||||||
logger.error(f"HTTP error occurred: {e}")
|
logger.error(f"HTTP error occurred: {e}")
|
||||||
return None
|
return None
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
def setup_logging() -> None:
|
def setup_logging() -> None:
|
||||||
"""Configure logging for the application"""
|
"""Configure logging for the application"""
|
||||||
logging.basicConfig(
|
logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s] [%(levelname)5s] %(message)s")
|
||||||
level=logging.DEBUG,
|
print("Logger is initialized.")
|
||||||
format="[%(asctime)s] [%(levelname)5s] %(message)s"
|
|
||||||
)
|
|
||||||
print(f"Logger is initialized.")
|
|
||||||
|
|||||||
Reference in New Issue
Block a user