diff --git a/fastops/__init__.py b/fastops/__init__.py index 92f8e02..c7a2541 100644 --- a/fastops/__init__.py +++ b/fastops/__init__.py @@ -10,4 +10,9 @@ from .detect import * from .compliance import * from .secrets import * -from .ship import * \ No newline at end of file +from .resources import * +from .connect import * +from .teardown import * +from .infra import * +from .ci import * +from .ship import * diff --git a/fastops/ci.py b/fastops/ci.py new file mode 100644 index 0000000..617c856 --- /dev/null +++ b/fastops/ci.py @@ -0,0 +1,592 @@ +"""CI/CD pipeline generation: GitHub Actions, GitLab CI, and deploy-on-push workflows.""" + +# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/14_ci.ipynb. + +# %% auto #0 +__all__ = ['github_actions', 'gitlab_ci', 'deploy_workflow', 'test_workflow', 'multi_env_workflow'] + +# %% ../nbs/14_ci.ipynb +import os, json +from pathlib import Path + +# %% ../nbs/14_ci.ipynb +def _yaml_dump(data, path=None): + 'Try PyYAML, fallback to simple YAML-like string generation' + try: + import yaml + result = yaml.dump(data, default_flow_style=False, sort_keys=False, allow_unicode=True) + if path: + Path(path).parent.mkdir(parents=True, exist_ok=True) + Path(path).write_text(result) + return result + except ImportError: + # Fallback: simple YAML-like string generation + def _format(obj, indent=0): + prefix = ' ' * indent + if isinstance(obj, dict): + lines = [] + for k, v in obj.items(): + if v is None: + continue + elif isinstance(v, (dict, list)): + lines.append(f'{prefix}{k}:') + lines.append(_format(v, indent + 1)) + elif isinstance(v, bool): + lines.append(f'{prefix}{k}: {str(v).lower()}') + elif isinstance(v, str) and ('\n' in v or ':' in v): + lines.append(f'{prefix}{k}: |') + for line in v.split('\n'): + lines.append(f'{prefix} {line}') + else: + lines.append(f'{prefix}{k}: {v}') + return '\n'.join(lines) + elif isinstance(obj, list): + lines = [] + for item in obj: + if isinstance(item, dict): + first_key = list(item.keys())[0] if item else '' + lines.append(f'{prefix}- {first_key}: {item[first_key]}') + for k, v in list(item.items())[1:]: + if isinstance(v, (dict, list)): + lines.append(f'{prefix} {k}:') + lines.append(_format(v, indent + 2)) + else: + lines.append(f'{prefix} {k}: {v}') + else: + lines.append(f'{prefix}- {item}') + return '\n'.join(lines) + return str(obj) + + result = _format(data) + if path: + Path(path).parent.mkdir(parents=True, exist_ok=True) + Path(path).write_text(result) + return result + +# %% ../nbs/14_ci.ipynb +def github_actions(name='deploy', app_name='app', **kw): + 'Generate GitHub Actions workflow YAML and optionally save to .github/workflows/' + + # Extract parameters with defaults + trigger = kw.get('trigger', {'push': {'branches': ['main']}}) + python_version = kw.get('python_version', '3.12') + node_version = kw.get('node_version', None) + test_cmd = kw.get('test_cmd', 'python -m pytest') + build = kw.get('build', True) + registry = kw.get('registry', 'ghcr') + deploy_target = kw.get('deploy_target', None) + deploy_host = kw.get('deploy_host', None) + deploy_user = kw.get('deploy_user', 'deploy') + domain = kw.get('domain', None) + env_vars = kw.get('env_vars', {}) + services = kw.get('services', None) + cache = kw.get('cache', True) + lint = kw.get('lint', False) + save = kw.get('save', True) + + # Build workflow structure + workflow = { + 'name': name, + 'on': trigger, + 'jobs': {} + } + + # Job: test + if test_cmd is not None: + test_job = { + 'runs-on': 'ubuntu-latest', + 'steps': [ + {'name': 'Checkout code', 'uses': 'actions/checkout@v4'}, + { + 'name': 'Set up Python', + 'uses': 'actions/setup-python@v5', + 'with': {'python-version': python_version} + } + ] + } + + # Add Node setup if needed + if node_version: + test_job['steps'].append({ + 'name': 'Set up Node.js', + 'uses': 'actions/setup-node@v4', + 'with': {'node-version': node_version} + }) + + # Add cache + if cache: + test_job['steps'].append({ + 'name': 'Cache dependencies', + 'uses': 'actions/cache@v4', + 'with': { + 'path': '~/.cache/pip', + 'key': "${{ runner.os }}-pip-${{ hashFiles('**/requirements*.txt', '**/pyproject.toml') }}", + 'restore-keys': '${{ runner.os }}-pip-' + } + }) + + # Install dependencies + test_job['steps'].append({ + 'name': 'Install dependencies', + 'run': "pip install -e '.[dev]'" + }) + + # Add lint step + if lint: + test_job['steps'].append({ + 'name': 'Lint code', + 'run': 'ruff check . || true' + }) + + # Run tests + test_job['steps'].append({ + 'name': 'Run tests', + 'run': test_cmd + }) + + # Add services if needed + if services: + test_job['services'] = {} + for svc in services: + for svc_name, svc_config in svc.items(): + test_job['services'][svc_name] = svc_config + + workflow['jobs']['test'] = test_job + + # Job: build + if build: + build_job = { + 'runs-on': 'ubuntu-latest', + 'steps': [ + {'name': 'Checkout code', 'uses': 'actions/checkout@v4'} + ] + } + + # Add test dependency + if test_cmd is not None: + build_job['needs'] = 'test' + + # Registry login + if registry == 'ghcr': + build_job['steps'].append({ + 'name': 'Login to GitHub Container Registry', + 'uses': 'docker/login-action@v3', + 'with': { + 'registry': 'ghcr.io', + 'username': '${{ github.actor }}', + 'password': '${{ secrets.GITHUB_TOKEN }}' + } + }) + image_prefix = f'ghcr.io/${{ github.repository_owner }}' + elif registry == 'dockerhub': + build_job['steps'].append({ + 'name': 'Login to Docker Hub', + 'uses': 'docker/login-action@v3', + 'with': { + 'username': '${{ secrets.DOCKERHUB_USERNAME }}', + 'password': '${{ secrets.DOCKERHUB_TOKEN }}' + } + }) + image_prefix = '${{ secrets.DOCKERHUB_USERNAME }}' + elif registry == 'ecr': + build_job['steps'].extend([ + { + 'name': 'Configure AWS credentials', + 'uses': 'aws-actions/configure-aws-credentials@v4', + 'with': { + 'aws-access-key-id': '${{ secrets.AWS_ACCESS_KEY_ID }}', + 'aws-secret-access-key': '${{ secrets.AWS_SECRET_ACCESS_KEY }}', + 'aws-region': '${{ secrets.AWS_REGION }}' + } + }, + { + 'name': 'Login to Amazon ECR', + 'uses': 'aws-actions/amazon-ecr-login@v2' + } + ]) + image_prefix = '${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com' + elif registry == 'acr': + build_job['steps'].extend([ + { + 'name': 'Azure Login', + 'uses': 'azure/login@v2', + 'with': { + 'creds': '${{ secrets.AZURE_CREDENTIALS }}' + } + }, + { + 'name': 'Login to Azure Container Registry', + 'uses': 'azure/docker-login@v2', + 'with': { + 'login-server': '${{ secrets.ACR_LOGIN_SERVER }}', + 'username': '${{ secrets.ACR_USERNAME }}', + 'password': '${{ secrets.ACR_PASSWORD }}' + } + } + ]) + image_prefix = '${{ secrets.ACR_LOGIN_SERVER }}' + + # Build and push + build_job['steps'].append({ + 'name': 'Build and push Docker image', + 'run': f''' +docker build -t {image_prefix}/{app_name}:${{{{ github.sha }}}} . +docker tag {image_prefix}/{app_name}:${{{{ github.sha }}}} {image_prefix}/{app_name}:latest +docker push {image_prefix}/{app_name}:${{{{ github.sha }}}} +docker push {image_prefix}/{app_name}:latest + '''.strip() + }) + + workflow['jobs']['build'] = build_job + + # Job: deploy + if deploy_target is not None: + deploy_job = { + 'runs-on': 'ubuntu-latest', + 'needs': 'build' if build else ('test' if test_cmd is not None else None), + 'steps': [ + {'name': 'Checkout code', 'uses': 'actions/checkout@v4'} + ] + } + + if deploy_target in ('docker', 'vps', 'hetzner'): + # SSH-based deployment + deploy_job['steps'].extend([ + { + 'name': 'Setup SSH', + 'run': f''' +mkdir -p ~/.ssh +echo "${{{{ secrets.SSH_PRIVATE_KEY }}}}" > ~/.ssh/id_rsa +chmod 600 ~/.ssh/id_rsa +ssh-keyscan -H {deploy_host} >> ~/.ssh/known_hosts + '''.strip() + }, + { + 'name': 'Deploy to server', + 'run': f''' +ssh {deploy_user}@{deploy_host} "cd /srv/app && docker compose pull && docker compose up -d" + '''.strip() + } + ]) + elif deploy_target == 'azure': + # Azure deployment + deploy_job['steps'].extend([ + { + 'name': 'Azure Login', + 'uses': 'azure/login@v2', + 'with': { + 'creds': '${{ secrets.AZURE_CREDENTIALS }}' + } + }, + { + 'name': 'Deploy to Azure Web App', + 'uses': 'azure/webapps-deploy@v3', + 'with': { + 'app-name': app_name, + 'images': f'{image_prefix}/{app_name}:${{{{ github.sha }}}}' + } + } + ]) + elif deploy_target == 'aws': + # AWS ECS deployment + deploy_job['steps'].extend([ + { + 'name': 'Configure AWS credentials', + 'uses': 'aws-actions/configure-aws-credentials@v4', + 'with': { + 'aws-access-key-id': '${{ secrets.AWS_ACCESS_KEY_ID }}', + 'aws-secret-access-key': '${{ secrets.AWS_SECRET_ACCESS_KEY }}', + 'aws-region': '${{ secrets.AWS_REGION }}' + } + }, + { + 'name': 'Deploy to ECS', + 'run': f''' +aws ecs update-service --cluster {app_name}-cluster --service {app_name}-service --force-new-deployment + '''.strip() + } + ]) + + # Add environment variables + if env_vars: + deploy_job['env'] = {} + for var_name, secret_name in env_vars.items(): + deploy_job['env'][var_name] = f'${{{{ secrets.{secret_name} }}}}' + + if deploy_job['needs'] is not None: + workflow['jobs']['deploy'] = deploy_job + + # Save to file + if save: + output_path = f'.github/workflows/{name}.yml' + _yaml_dump(workflow, output_path) + + return workflow + +# %% ../nbs/14_ci.ipynb +def gitlab_ci(name='deploy', app_name='app', **kw): + 'Generate GitLab CI configuration and optionally save to .gitlab-ci.yml' + + # Extract parameters with defaults + trigger = kw.get('trigger', {'branches': ['main']}) + python_version = kw.get('python_version', '3.12') + test_cmd = kw.get('test_cmd', 'python -m pytest') + build = kw.get('build', True) + registry = kw.get('registry', 'gitlab') + deploy_target = kw.get('deploy_target', None) + deploy_host = kw.get('deploy_host', None) + deploy_user = kw.get('deploy_user', 'deploy') + services = kw.get('services', None) + lint = kw.get('lint', False) + save = kw.get('save', True) + + # Build GitLab CI structure + config = { + 'stages': [] + } + + # Test stage + if test_cmd is not None: + config['stages'].append('test') + + test_job = { + 'stage': 'test', + 'image': f'python:{python_version}', + 'script': [ + "pip install -e '.[dev]'" + ] + } + + if lint: + test_job['script'].append('ruff check . || true') + + test_job['script'].append(test_cmd) + + # Add services + if services: + test_job['services'] = [] + for svc in services: + for svc_name, svc_config in svc.items(): + test_job['services'].append(svc_config['image']) + + # Add only rule + if 'branches' in trigger: + test_job['only'] = trigger['branches'] + + config['test'] = test_job + + # Build stage + if build: + config['stages'].append('build') + + image_name = f'$CI_REGISTRY_IMAGE/{app_name}' if registry == 'gitlab' else f'{app_name}' + + build_job = { + 'stage': 'build', + 'image': 'docker:latest', + 'services': ['docker:dind'], + 'script': [ + 'docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY', + f'docker build -t {image_name}:$CI_COMMIT_SHA .', + f'docker tag {image_name}:$CI_COMMIT_SHA {image_name}:latest', + f'docker push {image_name}:$CI_COMMIT_SHA', + f'docker push {image_name}:latest' + ] + } + + if 'branches' in trigger: + build_job['only'] = trigger['branches'] + + config['build'] = build_job + + # Deploy stage + if deploy_target is not None: + config['stages'].append('deploy') + + deploy_job = { + 'stage': 'deploy', + 'image': 'alpine:latest', + 'script': [] + } + + if deploy_target in ('docker', 'vps', 'hetzner'): + deploy_job['script'] = [ + 'apk add --no-cache openssh-client', + 'mkdir -p ~/.ssh', + 'echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa', + 'chmod 600 ~/.ssh/id_rsa', + f'ssh-keyscan -H {deploy_host} >> ~/.ssh/known_hosts', + f'ssh {deploy_user}@{deploy_host} "cd /srv/app && docker compose pull && docker compose up -d"' + ] + + if 'branches' in trigger: + deploy_job['only'] = trigger['branches'] + + config['deploy'] = deploy_job + + # Save to file + if save: + output_path = '.gitlab-ci.yml' + _yaml_dump(config, output_path) + + return config + +# %% ../nbs/14_ci.ipynb +def deploy_workflow(app_name='app', target='docker', **kw): + 'Convenience wrapper for deploy-focused GitHub Actions workflow' + return github_actions( + name='deploy', + app_name=app_name, + build=True, + deploy_target=target, + trigger={'push': {'branches': ['main']}, 'workflow_dispatch': {}}, + **kw + ) + +# %% ../nbs/14_ci.ipynb +def test_workflow(app_name='app', **kw): + 'Convenience wrapper for test-only GitHub Actions workflow' + return github_actions( + name='test', + app_name=app_name, + build=False, + deploy_target=None, + trigger={'push': {'branches': ['main', 'develop']}, 'pull_request': {'branches': ['main']}}, + lint=True, + **kw + ) + +# %% ../nbs/14_ci.ipynb +def multi_env_workflow(app_name='app', environments=None, **kw): + 'Generate a workflow with staging → production promotion' + + # Default environments + if environments is None: + environments = { + 'staging': {'branch': 'develop', 'domain': f'staging.{app_name}.com'}, + 'production': {'branch': 'main', 'domain': f'{app_name}.com', 'approval': True} + } + + # Extract common parameters + python_version = kw.get('python_version', '3.12') + test_cmd = kw.get('test_cmd', 'python -m pytest') + registry = kw.get('registry', 'ghcr') + deploy_target = kw.get('deploy_target', 'docker') + save = kw.get('save', True) + + # Build workflow + workflow = { + 'name': 'Multi-Environment Deploy', + 'on': { + 'push': { + 'branches': [env['branch'] for env in environments.values()] + }, + 'workflow_dispatch': {} + }, + 'jobs': {} + } + + # Test job (runs on all branches) + workflow['jobs']['test'] = { + 'runs-on': 'ubuntu-latest', + 'steps': [ + {'name': 'Checkout code', 'uses': 'actions/checkout@v4'}, + { + 'name': 'Set up Python', + 'uses': 'actions/setup-python@v5', + 'with': {'python-version': python_version} + }, + { + 'name': 'Install dependencies', + 'run': "pip install -e '.[dev]'" + }, + { + 'name': 'Run tests', + 'run': test_cmd + } + ] + } + + # Build and deploy jobs per environment + for env_name, env_config in environments.items(): + branch = env_config['branch'] + domain = env_config.get('domain') + needs_approval = env_config.get('approval', False) + + # Build job + image_prefix = f'ghcr.io/${{ github.repository_owner }}' if registry == 'ghcr' else registry + + build_job_name = f'build-{env_name}' + workflow['jobs'][build_job_name] = { + 'runs-on': 'ubuntu-latest', + 'needs': 'test', + 'if': f"github.ref == 'refs/heads/{branch}'", + 'steps': [ + {'name': 'Checkout code', 'uses': 'actions/checkout@v4'}, + { + 'name': 'Login to GitHub Container Registry', + 'uses': 'docker/login-action@v3', + 'with': { + 'registry': 'ghcr.io', + 'username': '${{ github.actor }}', + 'password': '${{ secrets.GITHUB_TOKEN }}' + } + }, + { + 'name': 'Build and push Docker image', + 'run': f''' +docker build -t {image_prefix}/{app_name}:{env_name}-${{{{ github.sha }}}} . +docker tag {image_prefix}/{app_name}:{env_name}-${{{{ github.sha }}}} {image_prefix}/{app_name}:{env_name} +docker push {image_prefix}/{app_name}:{env_name}-${{{{ github.sha }}}} +docker push {image_prefix}/{app_name}:{env_name} + '''.strip() + } + ] + } + + # Deploy job + deploy_job_name = f'deploy-{env_name}' + deploy_job = { + 'runs-on': 'ubuntu-latest', + 'needs': build_job_name, + 'if': f"github.ref == 'refs/heads/{branch}'", + 'steps': [ + {'name': 'Checkout code', 'uses': 'actions/checkout@v4'} + ] + } + + # Add environment with approval if needed + if needs_approval: + deploy_job['environment'] = { + 'name': env_name, + 'url': f'https://{domain}' if domain else None + } + + # Add deploy steps based on target + if deploy_target in ('docker', 'vps', 'hetzner'): + deploy_job['steps'].extend([ + { + 'name': 'Setup SSH', + 'run': f''' +mkdir -p ~/.ssh +echo "${{{{ secrets.SSH_PRIVATE_KEY_{env_name.upper()} }}}}" > ~/.ssh/id_rsa +chmod 600 ~/.ssh/id_rsa +ssh-keyscan -H ${{{{ secrets.DEPLOY_HOST_{env_name.upper()} }}}} >> ~/.ssh/known_hosts + '''.strip() + }, + { + 'name': f'Deploy to {env_name}', + 'run': f''' +ssh ${{{{ secrets.DEPLOY_USER_{env_name.upper()} }}}}@${{{{ secrets.DEPLOY_HOST_{env_name.upper()} }}}} "cd /srv/app && docker compose pull && docker compose up -d" + '''.strip() + } + ]) + + workflow['jobs'][deploy_job_name] = deploy_job + + # Save to file + if save: + output_path = '.github/workflows/deploy.yml' + _yaml_dump(workflow, output_path) + + return workflow diff --git a/fastops/connect.py b/fastops/connect.py new file mode 100644 index 0000000..0ec2d06 --- /dev/null +++ b/fastops/connect.py @@ -0,0 +1,358 @@ +"""Resource config export and Pythonic client wrappers. Turns env dicts into saveable configs and ready-to-use Python clients.""" + +__all__ = ['ResourceConfig'] + +import json +from pathlib import Path + + +class ResourceConfig: + 'A config object holding all resource connection details' + + def __init__(self, resources=None): + self._resources = dict(resources or {}) + + @classmethod + def from_env(cls, env_dict): + 'Build config from the merged env dict returned by stack()' + resources = _detect_resource_groups(env_dict) + return cls(resources) + + @classmethod + def load(cls, path='resources.json'): + 'Load config from JSON file' + return cls(json.loads(Path(path).read_text())) + + def save(self, path='resources.json'): + 'Save config to JSON file' + Path(path).write_text(json.dumps(self._resources, indent=2)) + return path + + def to_env(self): + 'Flatten back to a dict of env vars (skip keys starting with _)' + result = {} + for name, group in self._resources.items(): + for key, value in group.items(): + if not key.startswith('_'): + result[key] = value + return result + + def to_dotenv(self, path='.env'): + 'Write a .env file' + env = self.to_env() + lines = [f'{key}={value}' for key, value in env.items()] + Path(path).write_text('\n'.join(lines)) + return path + + def connect(self, resource_name): + 'Return a ready-to-use Python client for the named resource' + if resource_name not in self._resources: + available = ', '.join(self.names) + raise ValueError(f'Resource "{resource_name}" not found. Available: {available}') + + group = self._resources[resource_name] + resource_type = group.get('_type') + + if resource_type in ('postgres', 'mysql', 'sqlite'): + return _connect_database(group) + elif resource_type == 'mongo': + return _connect_mongo(group) + elif resource_type == 'redis': + return _connect_redis(group) + elif resource_type in ('minio', 's3', 'azure_blob', 'gcs'): + return _connect_storage(group) + elif resource_type in ('rabbitmq', 'sqs', 'servicebus', 'pubsub'): + return _connect_queue(group) + elif resource_type in ('elasticsearch', 'opensearch', 'azure_search'): + return _connect_search(group) + elif resource_type in ('openai', 'azure_openai', 'ollama', 'bedrock'): + return _connect_llm(group) + else: + available = ', '.join(self.names) + raise ValueError(f'Unknown resource type "{resource_type}". Available resources: {available}') + + def __getitem__(self, key): + return self._resources[key] + + def __contains__(self, key): + return key in self._resources + + def __repr__(self): + parts = [f'{name}({group.get("_type", "unknown")})' for name, group in self._resources.items()] + return f'ResourceConfig({", ".join(parts)})' + + @property + def names(self): + return list(self._resources.keys()) + + +def _detect_resource_groups(env_dict): + 'Parse env dict and return {name: {_type, ...env_vars...}}' + resources = {} + + # Database detection + if 'DATABASE_URL' in env_dict: + url = env_dict['DATABASE_URL'] + if url.startswith('postgresql'): + db_type = 'postgres' + elif url.startswith('mysql'): + db_type = 'mysql' + elif url.startswith('mongodb'): + db_type = 'mongo' + elif url.startswith('sqlite'): + db_type = 'sqlite' + else: + db_type = 'postgres' # default + + resources['db'] = { + '_type': db_type, + 'DATABASE_URL': url + } + if 'DB_PROVIDER' in env_dict: + resources['db']['DB_PROVIDER'] = env_dict['DB_PROVIDER'] + + # Redis cache detection + if 'REDIS_URL' in env_dict: + resources['cache'] = { + '_type': 'redis', + 'REDIS_URL': env_dict['REDIS_URL'] + } + if 'CACHE_PROVIDER' in env_dict: + resources['cache']['CACHE_PROVIDER'] = env_dict['CACHE_PROVIDER'] + + # Queue detection + if 'QUEUE_URL' in env_dict or 'QUEUE_TOPIC' in env_dict: + queue_provider = env_dict.get('QUEUE_PROVIDER', 'rabbitmq') + resources['queue'] = { + '_type': queue_provider, + } + if 'QUEUE_URL' in env_dict: + resources['queue']['QUEUE_URL'] = env_dict['QUEUE_URL'] + if 'QUEUE_TOPIC' in env_dict: + resources['queue']['QUEUE_TOPIC'] = env_dict['QUEUE_TOPIC'] + if 'QUEUE_NAME' in env_dict: + resources['queue']['QUEUE_NAME'] = env_dict['QUEUE_NAME'] + if 'QUEUE_SUBSCRIPTION' in env_dict: + resources['queue']['QUEUE_SUBSCRIPTION'] = env_dict['QUEUE_SUBSCRIPTION'] + + # Storage detection + storage_keys = ['S3_ENDPOINT', 'S3_BUCKET', 'AZURE_STORAGE_CONNECTION_STRING', 'GCS_BUCKET'] + if any(key in env_dict for key in storage_keys): + provider = env_dict.get('STORAGE_PROVIDER', 'docker') + storage_type_map = { + 'docker': 'minio', + 'aws': 's3', + 'azure': 'azure_blob', + 'gcp': 'gcs' + } + storage_type = storage_type_map.get(provider, 'minio') + + resources['storage'] = {'_type': storage_type} + for key in ['S3_ENDPOINT', 'S3_BUCKET', 'S3_ACCESS_KEY', 'S3_SECRET_KEY', + 'AZURE_STORAGE_CONNECTION_STRING', 'AZURE_STORAGE_CONTAINER', + 'GCS_BUCKET', 'S3_REGION', 'STORAGE_PROVIDER']: + if key in env_dict: + resources['storage'][key] = env_dict[key] + + # LLM detection + if 'LLM_ENDPOINT' in env_dict or 'LLM_MODEL' in env_dict: + llm_provider = env_dict.get('LLM_PROVIDER', 'openai') + resources['llm'] = {'_type': llm_provider} + for key in ['LLM_ENDPOINT', 'LLM_MODEL', 'LLM_PROVIDER', 'OPENAI_API_KEY', + 'AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_DEPLOYMENT', 'AWS_REGION']: + if key in env_dict: + resources['llm'][key] = env_dict[key] + + # Search detection + if 'SEARCH_URL' in env_dict: + search_provider = env_dict.get('SEARCH_PROVIDER', 'elasticsearch') + resources['search'] = { + '_type': search_provider, + 'SEARCH_URL': env_dict['SEARCH_URL'] + } + if 'SEARCH_PROVIDER' in env_dict: + resources['search']['SEARCH_PROVIDER'] = env_dict['SEARCH_PROVIDER'] + if 'SEARCH_API_KEY' in env_dict: + resources['search']['SEARCH_API_KEY'] = env_dict['SEARCH_API_KEY'] + + return resources + + +def _connect_database(group): + 'Connect to SQL database using fastsql or sqlalchemy' + url = group.get('DATABASE_URL') + + try: + from fastsql import database + return database(url) + except ImportError: + pass + + try: + import sqlalchemy + return sqlalchemy.create_engine(url).connect() + except ImportError: + raise ImportError('Install fastsql (pip install fastsql) or sqlalchemy to connect to databases.') + + +def _connect_mongo(group): + 'Connect to MongoDB' + url = group.get('DATABASE_URL') + + try: + from pymongo import MongoClient + return MongoClient(url) + except ImportError: + raise ImportError('Install pymongo (pip install pymongo) to connect to MongoDB.') + + +def _connect_redis(group): + 'Connect to Redis' + url = group.get('REDIS_URL') + + try: + import redis + return redis.Redis.from_url(url) + except ImportError: + raise ImportError('Install redis (pip install redis) to connect to Redis.') + + +def _connect_storage(group): + 'Connect to object storage using fsspec' + storage_type = group.get('_type') + + try: + import fsspec + except ImportError: + raise ImportError('Install fsspec (pip install fsspec s3fs adlfs) to connect to storage.') + + if storage_type == 'minio': + # MinIO with S3 protocol + endpoint = group.get('S3_ENDPOINT') + key = group.get('S3_ACCESS_KEY') + secret = group.get('S3_SECRET_KEY') + return fsspec.filesystem('s3', key=key, secret=secret, + client_kwargs={'endpoint_url': endpoint}) + elif storage_type == 's3': + # AWS S3 (uses default credentials) + return fsspec.filesystem('s3') + elif storage_type == 'azure_blob': + # Azure Blob Storage + connection_string = group.get('AZURE_STORAGE_CONNECTION_STRING') + return fsspec.filesystem('abfs', connection_string=connection_string) + elif storage_type == 'gcs': + # Google Cloud Storage (uses default credentials) + return fsspec.filesystem('gcs') + else: + raise ValueError(f'Unknown storage type: {storage_type}') + + +def _connect_queue(group): + 'Connect to message queue' + queue_type = group.get('_type') + url = group.get('QUEUE_URL') + + if queue_type == 'rabbitmq': + try: + import pika + return pika.BlockingConnection(pika.URLParameters(url)).channel() + except ImportError: + raise ImportError('Install pika (pip install pika) to connect to RabbitMQ.') + elif queue_type == 'sqs': + try: + import boto3 + return boto3.client('sqs') + except ImportError: + raise ImportError('Install boto3 (pip install boto3) to connect to AWS SQS.') + elif queue_type == 'servicebus': + try: + from azure.servicebus import ServiceBusClient + return ServiceBusClient.from_connection_string(url) + except ImportError: + raise ImportError('Install azure-servicebus (pip install azure-servicebus) to connect to Azure Service Bus.') + elif queue_type == 'pubsub': + try: + from google.cloud import pubsub_v1 + return pubsub_v1.PublisherClient() + except ImportError: + raise ImportError('Install google-cloud-pubsub (pip install google-cloud-pubsub) to connect to Google Pub/Sub.') + else: + raise ValueError(f'Unknown queue type: {queue_type}') + + +def _connect_search(group): + 'Connect to search engine' + search_type = group.get('_type') + url = group.get('SEARCH_URL') + + if search_type == 'elasticsearch': + try: + from elasticsearch import Elasticsearch + return Elasticsearch(url) + except ImportError: + raise ImportError('Install elasticsearch (pip install elasticsearch) to connect to Elasticsearch.') + elif search_type == 'opensearch': + try: + from opensearchpy import OpenSearch + return OpenSearch(hosts=[url]) + except ImportError: + raise ImportError('Install opensearch-py (pip install opensearch-py) to connect to OpenSearch.') + elif search_type == 'azure_search': + try: + from azure.search.documents import SearchClient + from azure.core.credentials import AzureKeyCredential + api_key = group.get('SEARCH_API_KEY') + # Parse endpoint and index name from URL + # URL format: https://{name}.search.windows.net + return SearchClient(endpoint=url, index_name='*', credential=AzureKeyCredential(api_key)) + except ImportError: + raise ImportError('Install azure-search-documents (pip install azure-search-documents) to connect to Azure Search.') + else: + raise ValueError(f'Unknown search type: {search_type}') + + +def _connect_llm(group): + 'Connect to LLM endpoint' + llm_type = group.get('_type') + + # Try lisette first (AnswerDotAI's litellm wrapper) + if llm_type in ('openai', 'azure_openai', 'ollama'): + model = group.get('LLM_MODEL', 'gpt-4o') + + try: + from lisette import Chat + return Chat(model) + except ImportError: + pass + + # Fallback to raw openai + try: + import openai + + if llm_type == 'openai': + api_key = group.get('OPENAI_API_KEY') + return openai.OpenAI(api_key=api_key) + elif llm_type == 'azure_openai': + endpoint = group.get('LLM_ENDPOINT') + api_key = group.get('AZURE_OPENAI_API_KEY') + return openai.AzureOpenAI( + azure_endpoint=endpoint, + api_key=api_key, + api_version='2024-02-01' + ) + elif llm_type == 'ollama': + endpoint = group.get('LLM_ENDPOINT') + return openai.OpenAI(base_url=f'{endpoint}/v1', api_key='ollama') + except ImportError: + raise ImportError('Install lisette (pip install lisette) or openai (pip install openai) to connect to LLM services.') + + elif llm_type == 'bedrock': + try: + import boto3 + region = group.get('AWS_REGION', 'us-east-1') + return boto3.client('bedrock-runtime', region_name=region) + except ImportError: + raise ImportError('Install boto3 (pip install boto3) to connect to AWS Bedrock.') + + else: + raise ValueError(f'Unknown LLM type: {llm_type}') diff --git a/fastops/gcp.py b/fastops/gcp.py new file mode 100644 index 0000000..62bdfc9 --- /dev/null +++ b/fastops/gcp.py @@ -0,0 +1,122 @@ +"""Google Cloud Platform CLI wrapper and opinionated resource builders""" + +# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/14_gcp.ipynb. + +# %% auto #0 +__all__ = ['Gcp', 'gcloud', 'gcp_stack'] + +# %% ../nbs/14_gcp.ipynb +import os, json, subprocess +from .core import Cli + +# %% ../nbs/14_gcp.ipynb +def callgcloud(*args): + 'Run a gcloud command and return parsed JSON output' + cmd = ['gcloud'] + list(args) + ['--format=json'] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f'gcloud error: {result.stderr}') + return json.loads(result.stdout) if result.stdout.strip() else {} + +class Gcp(Cli): + 'Google Cloud CLI wrapper' + def _run(self, cmd, *args): return callgcloud(cmd, *args) + +gcloud = Gcp() + +# %% ../nbs/14_gcp.ipynb +def gcp_stack(name, *, image=None, port=8080, region='us-central1', project=None, + postgres=False, redis=False, domain=None, min_instances=0, max_instances=10, + memory='512Mi', cpu='1', env=None, service_account=None): + 'Deploy a containerized app to GCP Cloud Run with optional managed services' + result = {'name': name, 'region': region, 'services': []} + + proj = project or os.environ.get('GCLOUD_PROJECT') or os.environ.get('GOOGLE_CLOUD_PROJECT', '') + proj_args = ['--project', proj] if proj else [] + + app_env = dict(env or {}) + + # Optional: Cloud SQL (PostgreSQL) + if postgres: + db_instance = f'{name}-db' + db_password = os.environ.get('DB_PASSWORD', 'changeme') + try: + callgcloud('sql', 'instances', 'create', db_instance, + '--database-version=POSTGRES_16', + '--tier=db-f1-micro', + f'--region={region}', + '--root-password=' + db_password, + *proj_args) + except RuntimeError as e: + if 'already exists' not in str(e).lower(): raise + + callgcloud('sql', 'databases', 'create', name, + f'--instance={db_instance}', *proj_args) + + app_env['DATABASE_URL'] = f'postgresql://postgres:{db_password}@/{name}?host=/cloudsql/{proj}:{region}:{db_instance}' + result['services'].append({'type': 'cloud-sql', 'instance': db_instance}) + + # Optional: Memorystore (Redis) + if redis: + redis_instance = f'{name}-redis' + try: + callgcloud('redis', 'instances', 'create', redis_instance, + f'--region={region}', + '--size=1', + '--tier=basic', + *proj_args) + except RuntimeError as e: + if 'already exists' not in str(e).lower(): raise + + redis_info = callgcloud('redis', 'instances', 'describe', redis_instance, + f'--region={region}', *proj_args) + redis_host = redis_info.get('host', 'localhost') + redis_port = redis_info.get('port', 6379) + app_env['REDIS_URL'] = f'redis://{redis_host}:{redis_port}' + result['services'].append({'type': 'memorystore', 'instance': redis_instance}) + + # Deploy to Cloud Run + if image: + deploy_args = [ + 'run', 'deploy', name, + f'--image={image}', + f'--port={port}', + f'--region={region}', + f'--memory={memory}', + f'--cpu={cpu}', + f'--min-instances={min_instances}', + f'--max-instances={max_instances}', + '--allow-unauthenticated', + *proj_args, + ] + + if service_account: + deploy_args.append(f'--service-account={service_account}') + + if app_env: + env_str = ','.join(f'{k}={v}' for k, v in app_env.items()) + deploy_args.append(f'--set-env-vars={env_str}') + + if postgres: + deploy_args.append(f'--add-cloudsql-instances={proj}:{region}:{name}-db') + + deploy_result = callgcloud(*deploy_args) + + url = deploy_result.get('status', {}).get('url', f'https://{name}-{region}.run.app') + result['url'] = url + result['services'].append({'type': 'cloud-run', 'name': name, 'url': url}) + + # Custom domain mapping + if domain: + try: + callgcloud('run', 'domain-mappings', 'create', + f'--service={name}', + f'--domain={domain}', + f'--region={region}', + *proj_args) + except RuntimeError as e: + if 'already exists' not in str(e).lower(): raise + result['domain'] = domain + + result['status'] = 'deployed' + return result diff --git a/fastops/infra.py b/fastops/infra.py new file mode 100644 index 0000000..270af9d --- /dev/null +++ b/fastops/infra.py @@ -0,0 +1,389 @@ +"""Idempotent server provisioning via pyinfra. Install: pip install fastops[infra]""" + +# %% auto #0 +__all__ = ['provision', 'deploy_infra', 'harden_server', 'install_docker', 'setup_caddy', 'setup_compose_stack', 'server_status'] + +# %% imports +import os, subprocess, shutil +from pathlib import Path + +# %% helper +def _require_pyinfra(): + 'Check pyinfra is installed, raise helpful error if not' + try: + import pyinfra + return pyinfra + except ImportError: + raise ImportError( + 'pyinfra is required for idempotent provisioning.\n' + 'Install it: pip install fastops[infra]\n' + 'Or: pip install pyinfra>=3.0' + ) + +# %% install_docker +def install_docker(user='deploy'): + 'Idempotently install Docker Engine and add user to docker group' + _require_pyinfra() + from pyinfra.operations import apt, server, files + + results = [] + + # Install prerequisites + results.append(apt.packages( + name='Install Docker prerequisites', + packages=['ca-certificates', 'curl', 'gnupg', 'lsb-release'], + update=True, + )) + + # Add Docker GPG key + results.append(server.shell( + name='Add Docker GPG key', + commands=['curl -fsSL https://get.docker.com | sh'], + )) + + # Add user to docker group + results.append(server.shell( + name=f'Add {user} to docker group', + commands=[f'usermod -aG docker {user}'], + )) + + # Enable Docker service + from pyinfra.operations import systemd + results.append(systemd.service( + name='Enable and start Docker', + service='docker', + running=True, + enabled=True, + )) + + return results + +# %% harden_server +def harden_server(ssh_port=22, allowed_ports=None): + 'Idempotent server hardening: UFW, fail2ban, sysctl security tweaks' + _require_pyinfra() + from pyinfra.operations import apt, server, files + + allowed = allowed_ports or [22, 80, 443] + results = [] + + # Install security packages + results.append(apt.packages( + name='Install security packages', + packages=['ufw', 'fail2ban', 'unattended-upgrades'], + update=True, + )) + + # Configure UFW + results.append(server.shell( + name='Configure UFW defaults', + commands=[ + 'ufw default deny incoming', + 'ufw default allow outgoing', + ], + )) + + for port in allowed: + results.append(server.shell( + name=f'Allow port {port}', + commands=[f'ufw allow {port}'], + )) + + results.append(server.shell( + name='Enable UFW', + commands=['echo "y" | ufw enable'], + )) + + # Configure fail2ban + jail_conf = '''[sshd] +enabled = true +port = {ssh_port} +filter = sshd +logpath = /var/log/auth.log +maxretry = 5 +bantime = 3600 +'''.format(ssh_port=ssh_port) + + results.append(files.put( + name='Configure fail2ban jail', + src=None, # Use content via StringIO + dest='/etc/fail2ban/jail.local', + contents=jail_conf, + )) + + # Sysctl hardening + sysctl_conf = '''# Hardening +net.ipv4.conf.all.rp_filter = 1 +net.ipv4.conf.default.rp_filter = 1 +net.ipv4.icmp_echo_ignore_broadcasts = 1 +net.ipv4.conf.all.accept_redirects = 0 +net.ipv6.conf.all.accept_redirects = 0 +net.ipv4.conf.all.send_redirects = 0 +net.ipv4.conf.all.accept_source_route = 0 +net.ipv6.conf.all.accept_source_route = 0 +kernel.sysrq = 0 +''' + results.append(files.put( + name='Apply sysctl hardening', + src=None, + dest='/etc/sysctl.d/99-hardening.conf', + contents=sysctl_conf, + )) + + results.append(server.shell( + name='Reload sysctl', + commands=['sysctl --system'], + )) + + return results + +# %% setup_caddy +def setup_caddy(domain, app='app', port=5001, email=None): + 'Install Caddy and configure as reverse proxy — idempotent' + _require_pyinfra() + from pyinfra.operations import apt, server, files, systemd + + results = [] + + # Install Caddy from official repo + results.append(server.shell( + name='Add Caddy repository', + commands=[ + 'apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl', + 'curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/gpg.key" | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg 2>/dev/null || true', + 'curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt" | tee /etc/apt/sources.list.d/caddy-stable.list', + ], + )) + + results.append(apt.packages( + name='Install Caddy', + packages=['caddy'], + update=True, + )) + + # Generate Caddyfile using fastops.proxy + from .proxy import Caddyfile + cf = Caddyfile(domain, app, port).production() + if email: + cf = cf.email(email) + + results.append(files.put( + name='Deploy Caddyfile', + dest='/etc/caddy/Caddyfile', + contents=str(cf), + )) + + results.append(systemd.service( + name='Enable and start Caddy', + service='caddy', + running=True, + enabled=True, + restarted=True, # Restart to pick up new config + )) + + return results + +# %% setup_compose_stack +def setup_compose_stack(compose, path='/srv/app', env=None): + 'Deploy a Compose stack idempotently — only changes what differs' + _require_pyinfra() + from pyinfra.operations import server, files + + results = [] + + # Ensure directory exists + results.append(files.directory( + name=f'Create {path}', + path=path, + present=True, + )) + + # Write compose file — pyinfra will only update if content changed + compose_str = str(compose) if hasattr(compose, '__str__') else compose + results.append(files.put( + name='Deploy docker-compose.yml', + dest=f'{path}/docker-compose.yml', + contents=compose_str, + )) + + # Write .env file if provided + if env: + env_content = '\n'.join(f'{k}={v}' for k, v in env.items()) + results.append(files.put( + name='Deploy .env file', + dest=f'{path}/.env', + contents=env_content, + mode='600', # Restrictive permissions for secrets + )) + + # Pull and deploy + results.append(server.shell( + name='Pull and deploy compose stack', + commands=[ + f'cd {path} && docker compose pull --quiet 2>/dev/null || true', + f'cd {path} && docker compose up -d --remove-orphans', + ], + )) + + return results + +# %% provision +def provision(host, *, user='deploy', key=None, port=22, + docker=True, harden=True, compose=None, domain=None, + caddy_port=5001, env=None, packages=None, **kw): + '''Idempotent VPS provisioning — safe to run repeatedly. + + Uses pyinfra for drift detection and state management. + Falls back to fastops.vps (SSH/rsync) if pyinfra is not installed. + + Args: + host: IP or hostname of the server + user: SSH user (default: deploy) + key: Path to SSH private key + port: SSH port (default: 22) + docker: Install Docker (default: True) + harden: Apply security hardening (default: True) + compose: Compose object or YAML string to deploy + domain: Domain for Caddy reverse proxy + caddy_port: App port for Caddy to proxy to (default: 5001) + env: Dict of environment variables for the app + packages: Additional apt packages to install + + Returns dict with status and details. + ''' + result = {'host': host, 'user': user, 'method': 'unknown'} + + try: + _require_pyinfra() + result['method'] = 'pyinfra' + + from pyinfra import state + from pyinfra.api import Config, Inventory, State + from pyinfra.api.connect import connect_all + from pyinfra.api.operations import run_ops + from pyinfra.operations import apt + + # Build inventory + ssh_key_path = os.path.expanduser(key) if key else None + inventory_data = { + host: { + 'ssh_user': user, + 'ssh_port': port, + } + } + if ssh_key_path: + inventory_data[host]['ssh_key'] = ssh_key_path + + # Install additional packages + if packages: + apt.packages( + name='Install additional packages', + packages=list(packages), + update=True, + ) + + # Harden server + if harden: + harden_server(ssh_port=port) + + # Install Docker + if docker: + install_docker(user=user) + + # Setup Caddy if domain provided + if domain: + setup_caddy(domain, port=caddy_port, email=kw.get('email')) + + # Deploy compose stack + if compose: + deploy_path = kw.get('deploy_path', '/srv/app') + setup_compose_stack(compose, path=deploy_path, env=env) + + result['status'] = 'provisioned' + result['docker'] = docker + result['hardened'] = harden + result['domain'] = domain + + except ImportError: + # Fallback to SSH-based approach + result['method'] = 'ssh' + from .vps import run_ssh, deploy as ssh_deploy + + print('pyinfra not installed, using SSH fallback (not idempotent)') + print('Install pyinfra for idempotent provisioning: pip install fastops[infra]') + + if compose: + deploy_path = kw.get('deploy_path', '/srv/app') + ssh_deploy(compose, host, user=user, key=key, path=deploy_path) + + result['status'] = 'deployed' + result['docker'] = docker + result['hardened'] = False + result['domain'] = domain + + return result + +# %% deploy_infra +def deploy_infra(host, compose, *, user='deploy', key=None, path='/srv/app', env=None, pull=True): + 'Deploy/update a Compose stack on a remote server — idempotent if pyinfra is available' + try: + _require_pyinfra() + return setup_compose_stack(compose, path=path, env=env) + except ImportError: + from .vps import deploy as ssh_deploy + return ssh_deploy(compose, host, user=user, key=key, path=path, pull=pull) + +# %% server_status +def server_status(host, *, user='deploy', key=None, port=22): + 'Check server status: Docker, Caddy, containers, disk, memory' + from .vps import run_ssh + + status = {'host': host} + + try: + # Docker status + status['docker'] = 'running' in run_ssh(host, 'systemctl is-active docker 2>/dev/null || echo stopped', user=user, key=key) + except: + status['docker'] = False + + try: + # Running containers + containers = run_ssh(host, 'docker ps --format "{{.Names}}: {{.Status}}" 2>/dev/null || echo none', user=user, key=key) + status['containers'] = [c.strip() for c in containers.split('\n') if c.strip() and c.strip() != 'none'] + except: + status['containers'] = [] + + try: + # Caddy status + status['caddy'] = 'running' in run_ssh(host, 'systemctl is-active caddy 2>/dev/null || echo stopped', user=user, key=key) + except: + status['caddy'] = False + + try: + # Disk usage + disk = run_ssh(host, "df -h / | tail -1 | awk '{print $5}'", user=user, key=key) + status['disk_usage'] = disk.strip() + except: + status['disk_usage'] = 'unknown' + + try: + # Memory usage + mem = run_ssh(host, "free -m | awk 'NR==2{printf \"%s/%sMB (%.0f%%)\", $3, $2, $3*100/$2}'", user=user, key=key) + status['memory'] = mem.strip() + except: + status['memory'] = 'unknown' + + try: + # Uptime + uptime = run_ssh(host, 'uptime -p', user=user, key=key) + status['uptime'] = uptime.strip() + except: + status['uptime'] = 'unknown' + + try: + # UFW status + status['ufw'] = 'active' in run_ssh(host, 'ufw status 2>/dev/null || echo inactive', user=user, key=key) + except: + status['ufw'] = False + + return status diff --git a/fastops/resources.py b/fastops/resources.py new file mode 100644 index 0000000..bbd5fae --- /dev/null +++ b/fastops/resources.py @@ -0,0 +1,872 @@ +"""Cloud-agnostic resource provisioning: databases, caches, queues, storage, LLM endpoints, and serverless functions.""" + +__all__ = ['database', 'cache', 'queue', 'bucket', 'llm', 'function', 'search', 'stack'] + +import os +import json +import subprocess +import shutil +from pathlib import Path + + +DEFAULTS = { + 'db': lambda: database(), + 'cache': lambda: cache(), +} + + +def _check_cli(cli_name): + 'Check if a CLI tool is installed and give a helpful error if not' + if shutil.which(cli_name) is None: + raise EnvironmentError( + f'{cli_name} CLI not found. Install it:\n' + f' aws → pip install awscli OR https://aws.amazon.com/cli/\n' + f' az → https://learn.microsoft.com/en-us/cli/azure/install-azure-cli\n' + f' gcloud → https://cloud.google.com/sdk/docs/install' + ) + +def database(name='db', engine='postgres', provider='docker', **kw): + 'Provision a database: postgres, mysql, or mongo' + password = kw.get('password', os.environ.get('DB_PASSWORD', 'secret')) + + if provider == 'docker': + if engine == 'postgres': + version = kw.get('version', '16') + env_dict = { + 'DATABASE_URL': f'postgresql://postgres:{password}@db:5432/{name}', + 'DB_PROVIDER': 'docker' + } + svc = { + 'image': f'postgres:{version}', + 'env': { + 'POSTGRES_PASSWORD': password, + 'POSTGRES_DB': name + }, + 'ports': {'5432': '5432'}, + 'volumes': {'pgdata': '/var/lib/postgresql/data'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD-SHELL', 'pg_isready -U postgres'], + 'interval': '10s', + 'timeout': '5s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif engine == 'mysql': + version = kw.get('version', '8') + env_dict = { + 'DATABASE_URL': f'mysql://root:{password}@db:3306/{name}', + 'DB_PROVIDER': 'docker' + } + svc = { + 'image': f'mysql:{version}', + 'env': { + 'MYSQL_ROOT_PASSWORD': password, + 'MYSQL_DATABASE': name + }, + 'ports': {'3306': '3306'}, + 'volumes': {'mysqldata': '/var/lib/mysql'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD', 'mysqladmin', 'ping', '-h', 'localhost'], + 'interval': '10s', + 'timeout': '5s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif engine == 'mongo': + version = kw.get('version', '7') + env_dict = { + 'DATABASE_URL': f'mongodb://admin:{password}@db:27017/{name}?authSource=admin', + 'DB_PROVIDER': 'docker' + } + svc = { + 'image': f'mongo:{version}', + 'env': { + 'MONGO_INITDB_ROOT_USERNAME': 'admin', + 'MONGO_INITDB_ROOT_PASSWORD': password + }, + 'ports': {'27017': '27017'}, + 'volumes': {'mongodata': '/data/db'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD', 'mongosh', '--eval', 'db.adminCommand("ping")'], + 'interval': '10s', + 'timeout': '5s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif provider == 'aws': + _check_cli('aws') + from .aws import callaws + instance_class = kw.get('instance_class', 'db.t3.micro') + username = kw.get('username', 'appadmin') + storage = kw.get('storage', 20) + + try: + result = callaws('rds', 'create-db-instance', + '--db-instance-identifier', name, + '--engine', engine, + '--db-instance-class', instance_class, + '--master-username', username, + '--master-user-password', password, + '--allocated-storage', str(storage), + '--no-publicly-accessible', + '--storage-encrypted') + + endpoint = result['DBInstance']['Endpoint']['Address'] + port = result['DBInstance']['Endpoint']['Port'] + except Exception as e: + if 'DBInstanceAlreadyExists' in str(e): + # Describe existing instance instead + result = callaws('rds', 'describe-db-instances', + '--db-instance-identifier', name) + inst = result['DBInstances'][0] + endpoint = inst['Endpoint']['Address'] + port = inst['Endpoint']['Port'] + else: + raise + + env_dict = { + 'DATABASE_URL': f'postgresql://{username}:{password}@{endpoint}:{port}/{name}', + 'DB_PROVIDER': 'rds' + } + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + sku = kw.get('sku', 'Standard_B1ms') + version = kw.get('version', '16') + storage_size = kw.get('storage_size', 32) + admin_user = kw.get('admin_user', 'appadmin') + + try: + result = callaz('postgres', 'flexible-server', 'create', + '--name', name, + '--resource-group', rg, + '--sku-name', sku, + '--version', str(version), + '--storage-size', str(storage_size), + '--admin-user', admin_user, + '--admin-password', password, + '--public-access', 'None') + + host = result.get('fullyQualifiedDomainName', f'{name}.postgres.database.azure.com') + except Exception as e: + if 'ResourceAlreadyExists' in str(e) or 'already exists' in str(e).lower(): + # Get existing server details + result = callaz('postgres', 'flexible-server', 'show', + '--name', name, + '--resource-group', rg) + host = result.get('fullyQualifiedDomainName', f'{name}.postgres.database.azure.com') + else: + raise + + env_dict = { + 'DATABASE_URL': f'postgresql://{admin_user}:{password}@{host}:5432/{name}', + 'DB_PROVIDER': 'azure_postgres' + } + return (env_dict, None) + + return ({}, None) + + +def cache(name='redis', provider='docker', **kw): + 'Provision a Redis cache' + if provider == 'docker': + password = kw.get('password', '') + env_dict = { + 'REDIS_URL': 'redis://redis:6379', + 'CACHE_PROVIDER': 'redis' + } + svc = { + 'image': 'redis:7-alpine', + 'command': 'redis-server --appendonly yes', + 'ports': {'6379': '6379'}, + 'volumes': {'redis-data': '/data'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD', 'redis-cli', 'ping'], + 'interval': '10s', + 'timeout': '5s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif provider == 'aws': + _check_cli('aws') + from .aws import callaws + node_type = kw.get('node_type', 'cache.t3.micro') + + try: + result = callaws('elasticache', 'create-cache-cluster', + '--cache-cluster-id', name, + '--cache-node-type', node_type, + '--engine', 'redis', + '--num-cache-nodes', '1') + + endpoint = result['CacheCluster']['CacheNodes'][0]['Endpoint']['Address'] + port = result['CacheCluster']['CacheNodes'][0]['Endpoint']['Port'] + except Exception as e: + if 'CacheClusterAlreadyExists' in str(e): + # Describe existing cluster + result = callaws('elasticache', 'describe-cache-clusters', + '--cache-cluster-id', name, + '--show-cache-node-info') + cluster = result['CacheClusters'][0] + endpoint = cluster['CacheNodes'][0]['Endpoint']['Address'] + port = cluster['CacheNodes'][0]['Endpoint']['Port'] + else: + raise + + env_dict = { + 'REDIS_URL': f'redis://{endpoint}:{port}', + 'CACHE_PROVIDER': 'elasticache' + } + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + sku = kw.get('sku', 'Basic') + vm_size = kw.get('vm_size', 'C0') + + try: + result = callaz('redis', 'create', + '--name', name, + '--resource-group', rg, + '--sku', sku, + '--vm-size', vm_size) + + host = result.get('hostName', f'{name}.redis.cache.windows.net') + except Exception as e: + if 'ResourceAlreadyExists' in str(e) or 'already exists' in str(e).lower(): + # Get existing cache + result = callaz('redis', 'show', + '--name', name, + '--resource-group', rg) + host = result.get('hostName', f'{name}.redis.cache.windows.net') + else: + raise + + # Get access key + keys = callaz('redis', 'list-keys', + '--name', name, + '--resource-group', rg) + key = keys.get('primaryKey', '') + + env_dict = { + 'REDIS_URL': f'rediss://:{key}@{host}:6380', + 'CACHE_PROVIDER': 'azure_redis' + } + return (env_dict, None) + + return ({}, None) + + +def queue(name='tasks', provider='docker', **kw): + 'Provision a message queue' + if provider == 'docker': + password = kw.get('password', 'guest') + env_dict = { + 'QUEUE_URL': f'amqp://guest:{password}@rabbitmq:5672/', + 'QUEUE_NAME': name + } + svc = { + 'image': 'rabbitmq:3-management', + 'env': { + 'RABBITMQ_DEFAULT_USER': 'guest', + 'RABBITMQ_DEFAULT_PASS': password + }, + 'ports': {'5672': '5672', '15672': '15672'}, + 'volumes': {'rabbitmq-data': '/var/lib/rabbitmq'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD', 'rabbitmq-diagnostics', '-q', 'ping'], + 'interval': '10s', + 'timeout': '5s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif provider == 'aws': + _check_cli('aws') + from .aws import callaws + + result = callaws('sqs', 'create-queue', + '--queue-name', name, + '--attributes', json.dumps({ + 'VisibilityTimeout': '30', + 'MessageRetentionPeriod': '345600' + })) + + env_dict = { + 'QUEUE_URL': result['QueueUrl'], + 'QUEUE_NAME': name, + 'QUEUE_PROVIDER': 'sqs' + } + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + namespace = kw.get('namespace', f'{name}-ns') + + try: + # Create namespace + callaz('servicebus', 'namespace', 'create', + '--name', namespace, + '--resource-group', rg) + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + try: + # Create queue + callaz('servicebus', 'queue', 'create', + '--name', name, + '--namespace-name', namespace, + '--resource-group', rg) + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + # Get connection string + keys = callaz('servicebus', 'namespace', 'authorization-rule', 'keys', 'list', + '--name', 'RootManageSharedAccessKey', + '--namespace-name', namespace, + '--resource-group', rg) + + env_dict = { + 'QUEUE_URL': keys.get('primaryConnectionString', ''), + 'QUEUE_NAME': name, + 'QUEUE_PROVIDER': 'servicebus' + } + return (env_dict, None) + + elif provider == 'gcp': + _check_cli('gcloud') + try: + # Create topic + subprocess.run(['gcloud', 'pubsub', 'topics', 'create', name, '--quiet'], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'already exists' not in e.stderr.lower(): + raise + + try: + # Create subscription + sub_name = f'{name}-sub' + subprocess.run(['gcloud', 'pubsub', 'subscriptions', 'create', sub_name, + '--topic', name, '--quiet'], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'already exists' not in e.stderr.lower(): + raise + + env_dict = { + 'QUEUE_TOPIC': name, + 'QUEUE_SUBSCRIPTION': sub_name, + 'QUEUE_PROVIDER': 'pubsub' + } + return (env_dict, None) + + return ({}, None) + + +def bucket(name='data', provider='docker', **kw): + 'Provision object storage' + if provider == 'docker': + access_key = kw.get('access_key', 'minioadmin') + secret_key = kw.get('secret_key', 'minioadmin') + + env_dict = { + 'S3_ENDPOINT': 'http://minio:9000', + 'S3_BUCKET': name, + 'S3_ACCESS_KEY': access_key, + 'S3_SECRET_KEY': secret_key + } + svc = { + 'image': 'minio/minio:latest', + 'command': 'server /data --console-address ":9001"', + 'env': { + 'MINIO_ROOT_USER': access_key, + 'MINIO_ROOT_PASSWORD': secret_key + }, + 'ports': {'9000': '9000', '9001': '9001'}, + 'volumes': {'minio-data': '/data'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live'], + 'interval': '10s', + 'timeout': '5s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif provider == 'aws': + _check_cli('aws') + from .aws import callaws + region = kw.get('region', 'us-east-1') + + try: + # Create bucket + if region == 'us-east-1': + callaws('s3api', 'create-bucket', '--bucket', name) + else: + callaws('s3api', 'create-bucket', '--bucket', name, + '--create-bucket-configuration', f'LocationConstraint={region}') + except Exception as e: + if 'BucketAlreadyOwnedByYou' not in str(e) and 'BucketAlreadyExists' not in str(e): + raise + + # Enable encryption + callaws('s3api', 'put-bucket-encryption', + '--bucket', name, + '--server-side-encryption-configuration', json.dumps({ + 'Rules': [{ + 'ApplyServerSideEncryptionByDefault': { + 'SSEAlgorithm': 'AES256' + } + }] + })) + + # Block public access + callaws('s3api', 'put-public-access-block', + '--bucket', name, + '--public-access-block-configuration', + 'BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true') + + env_dict = { + 'S3_BUCKET': name, + 'S3_REGION': region, + 'STORAGE_PROVIDER': 'aws' + } + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + account_name = kw.get('account_name', name.replace('-', '').replace('_', '')[:24]) + + try: + # Create storage account + callaz('storage', 'account', 'create', + '--name', account_name, + '--resource-group', rg, + '--encryption-services', 'blob', + '--min-tls-version', 'TLS1_2') + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + try: + # Create container + callaz('storage', 'container', 'create', + '--name', name, + '--account-name', account_name) + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + # Get connection string + keys = callaz('storage', 'account', 'show-connection-string', + '--name', account_name, + '--resource-group', rg) + + env_dict = { + 'AZURE_STORAGE_CONNECTION_STRING': keys.get('connectionString', ''), + 'AZURE_STORAGE_CONTAINER': name, + 'STORAGE_PROVIDER': 'azure' + } + return (env_dict, None) + + elif provider == 'gcp': + _check_cli('gcloud') + location = kw.get('location', 'us') + + try: + # Create bucket + subprocess.run(['gcloud', 'storage', 'buckets', 'create', + f'gs://{name}', + '--location', location, + '--uniform-bucket-level-access', + '--quiet'], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'already exists' not in e.stderr.lower(): + raise + + env_dict = { + 'GCS_BUCKET': name, + 'STORAGE_PROVIDER': 'gcp' + } + return (env_dict, None) + + return ({}, None) + + +def llm(name='gpt-4o', provider='openai', **kw): + 'Provision LLM endpoint' + if provider == 'docker': + # Use a small model that runs anywhere for local dev + model_name = 'llama3.2' if name == 'gpt-4o' else name + env_dict = { + 'LLM_ENDPOINT': 'http://ollama:11434', + 'LLM_MODEL': model_name, + 'LLM_PROVIDER': 'ollama' + } + svc = { + 'image': 'ollama/ollama:latest', + 'ports': {'11434': '11434'}, + 'volumes': {'ollama-data': '/root/.ollama'}, + 'restart': 'unless-stopped' + } + + if kw.get('gpu'): + svc['deploy'] = { + 'resources': { + 'reservations': { + 'devices': [{ + 'driver': 'nvidia', + 'count': 1, + 'capabilities': ['gpu'] + }] + } + } + } + + return (env_dict, svc) + + elif provider == 'openai': + api_key = os.environ.get('OPENAI_API_KEY', '${OPENAI_API_KEY}') + env_dict = { + 'LLM_ENDPOINT': 'https://api.openai.com/v1', + 'LLM_MODEL': name, + 'LLM_PROVIDER': 'openai', + 'OPENAI_API_KEY': api_key + } + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + location = kw.get('location', 'eastus') + + try: + # Create Azure OpenAI resource + callaz('cognitiveservices', 'account', 'create', + '--name', name, + '--resource-group', rg, + '--kind', 'OpenAI', + '--sku', 'S0', + '--location', location) + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + try: + # Deploy model + deployment_name = kw.get('deployment', f'{name}-deployment') + model_name = kw.get('model_name', 'gpt-4') + callaz('cognitiveservices', 'account', 'deployment', 'create', + '--name', name, + '--resource-group', rg, + '--deployment-name', deployment_name, + '--model-name', model_name, + '--model-version', kw.get('model_version', '0613'), + '--model-format', 'OpenAI', + '--sku-capacity', str(kw.get('capacity', 1)), + '--sku-name', 'Standard') + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + # Get endpoint and key + account = callaz('cognitiveservices', 'account', 'show', + '--name', name, + '--resource-group', rg) + endpoint = account.get('properties', {}).get('endpoint', '') + + keys = callaz('cognitiveservices', 'account', 'keys', 'list', + '--name', name, + '--resource-group', rg) + + env_dict = { + 'LLM_ENDPOINT': endpoint, + 'LLM_MODEL': model_name, + 'LLM_PROVIDER': 'azure_openai', + 'AZURE_OPENAI_API_KEY': keys.get('key1', ''), + 'AZURE_OPENAI_DEPLOYMENT': deployment_name + } + return (env_dict, None) + + elif provider == 'aws': + model_id = kw.get('model_id', f'anthropic.{name}') + region = kw.get('region', 'us-east-1') + + env_dict = { + 'LLM_MODEL': model_id, + 'LLM_PROVIDER': 'bedrock', + 'AWS_REGION': region + } + return (env_dict, None) + + return ({}, None) + + +def function(name, runtime='python3.12', handler='main.handler', provider='aws', **kw): + 'Provision serverless function' + if provider == 'aws': + _check_cli('aws') + from .aws import callaws + role = kw.get('role') or os.environ.get('LAMBDA_ROLE_ARN') + zip_path = kw.get('zip_path', 'function.zip') + timeout = kw.get('timeout', 30) + memory = kw.get('memory', 256) + + try: + result = callaws('lambda', 'create-function', + '--function-name', name, + '--runtime', runtime, + '--handler', handler, + '--role', role, + '--zip-file', f'fileb://{zip_path}', + '--timeout', str(timeout), + '--memory-size', str(memory)) + + env_dict = { + 'FUNCTION_ARN': result['FunctionArn'], + 'FUNCTION_NAME': name, + 'FUNCTION_PROVIDER': 'lambda' + } + except Exception as e: + if 'ResourceConflictException' in str(e): + # Get existing function + result = callaws('lambda', 'get-function', '--function-name', name) + env_dict = { + 'FUNCTION_ARN': result['Configuration']['FunctionArn'], + 'FUNCTION_NAME': name, + 'FUNCTION_PROVIDER': 'lambda' + } + else: + raise + + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + location = kw.get('location', 'eastus') + storage_account = kw.get('storage_account', f'{name}storage') + + try: + # Create storage account + callaz('storage', 'account', 'create', + '--name', storage_account, + '--resource-group', rg, + '--location', location) + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + try: + # Create function app + callaz('functionapp', 'create', + '--name', name, + '--resource-group', rg, + '--consumption-plan-location', location, + '--runtime', 'python', + '--runtime-version', runtime.replace('python', ''), + '--storage-account', storage_account, + '--os-type', 'Linux') + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + env_dict = { + 'FUNCTION_URL': f'https://{name}.azurewebsites.net', + 'FUNCTION_NAME': name, + 'FUNCTION_PROVIDER': 'azure_functions' + } + return (env_dict, None) + + elif provider == 'gcp': + _check_cli('gcloud') + region = kw.get('region', 'us-central1') + entry_point = kw.get('entry_point', handler.split('.')[-1]) + + try: + result = subprocess.run(['gcloud', 'functions', 'deploy', name, + '--runtime', runtime, + '--trigger-http', + '--allow-unauthenticated', + '--entry-point', entry_point, + '--region', region, + '--quiet'], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'already exists' not in e.stderr.lower(): + raise + # Get existing function + result = subprocess.run(['gcloud', 'functions', 'describe', name, + '--region', region, + '--format', 'json'], + capture_output=True, text=True, check=True) + + # Parse output for URL + output = result.stdout + url = '' + for line in output.split('\n'): + if 'url:' in line.lower() or 'httpsTrigger' in line: + url = line.split(':', 1)[1].strip() if ':' in line else '' + + env_dict = { + 'FUNCTION_URL': url, + 'FUNCTION_NAME': name, + 'FUNCTION_PROVIDER': 'gcp_functions' + } + return (env_dict, None) + + return ({}, None) + + +def search(name='search', provider='docker', **kw): + 'Provision search engine' + if provider == 'docker': + env_dict = { + 'SEARCH_URL': 'http://elasticsearch:9200', + 'SEARCH_PROVIDER': 'elasticsearch' + } + svc = { + 'image': 'elasticsearch:8.12.0', + 'env': { + 'discovery.type': 'single-node', + 'xpack.security.enabled': 'false', + 'ES_JAVA_OPTS': '-Xms512m -Xmx512m' + }, + 'ports': {'9200': '9200'}, + 'volumes': {'es-data': '/usr/share/elasticsearch/data'}, + 'restart': 'unless-stopped', + 'healthcheck': { + 'test': ['CMD-SHELL', 'curl -s http://localhost:9200/_cluster/health || exit 1'], + 'interval': '15s', + 'timeout': '10s', + 'retries': 5 + } + } + return (env_dict, svc) + + elif provider == 'aws': + _check_cli('aws') + from .aws import callaws + instance_type = kw.get('instance_type', 't3.small.search') + volume_size = kw.get('volume_size', 20) + + try: + result = callaws('opensearch', 'create-domain', + '--domain-name', name, + '--engine-version', 'OpenSearch_2.11', + '--cluster-config', json.dumps({ + 'InstanceType': instance_type, + 'InstanceCount': 1 + }), + '--ebs-options', json.dumps({ + 'EBSEnabled': True, + 'VolumeType': 'gp3', + 'VolumeSize': volume_size + })) + + endpoint = result['DomainStatus']['Endpoint'] + except Exception as e: + if 'ResourceAlreadyExistsException' in str(e): + # Describe existing domain + result = callaws('opensearch', 'describe-domain', '--domain-name', name) + endpoint = result['DomainStatus']['Endpoint'] + else: + raise + + env_dict = { + 'SEARCH_URL': f'https://{endpoint}', + 'SEARCH_PROVIDER': 'opensearch' + } + return (env_dict, None) + + elif provider == 'azure': + _check_cli('az') + from .azure import callaz + rg = kw.get('resource_group') + sku = kw.get('sku', 'basic') + + try: + # Create search service + callaz('search', 'service', 'create', + '--name', name, + '--resource-group', rg, + '--sku', sku) + except Exception as e: + if 'ResourceAlreadyExists' not in str(e) and 'already exists' not in str(e).lower(): + raise + + # Get admin key + keys = callaz('search', 'admin-key', 'show', + '--service-name', name, + '--resource-group', rg) + + env_dict = { + 'SEARCH_URL': f'https://{name}.search.windows.net', + 'SEARCH_API_KEY': keys.get('primaryKey', ''), + 'SEARCH_PROVIDER': 'azure_search' + } + return (env_dict, None) + + return ({}, None) + + +def stack(resources=None, provider='docker'): + 'Compose multiple resources into a unified stack' + from .compose import Compose + + # Use DEFAULTS if no resources provided + if resources is None: + resources = DEFAULTS + + merged_env = {} + dc = Compose() + volumes = [] + + for name, resource_fn in resources.items(): + env, svc = resource_fn() + merged_env.update(env) + + if svc is not None: + dc = dc.svc(name, **svc) + + # Collect volume names + if 'volumes' in svc and isinstance(svc['volumes'], dict): + for vol_key in svc['volumes'].keys(): + # Skip bind mounts (paths starting with . or /) + if not str(vol_key).startswith('.') and not str(vol_key).startswith('/'): + volumes.append(vol_key) + + # Add unique volumes to Compose + for vol in set(volumes): + dc = dc.volume(vol) + + return (merged_env, dc, list(set(volumes))) diff --git a/fastops/ship.py b/fastops/ship.py index 0055fea..7a3bbbf 100644 --- a/fastops/ship.py +++ b/fastops/ship.py @@ -27,7 +27,7 @@ # %% ../nbs/13_ship.ipynb def ship(path='.', *, to='docker', domain=None, port=None, proxy='caddy', preset='production', tls=True, tunnel=False, security=False, - compliance=None, host=None, user='deploy', key=None, cloud=None): + compliance=None, host=None, user='deploy', key=None, cloud=None, resources=None, **kw): 'Main orchestrator: detect → build → proxy → deploy' result = { @@ -75,13 +75,47 @@ def ship(path='.', *, to='docker', domain=None, port=None, proxy='caddy', print('Building docker-compose configuration...') compose = Compose() - # App service + # Process resources first if provided + resource_env = {} + if resources: + print('Provisioning resources...') + from .resources import stack + + # Determine effective provider based on deployment target + resource_provider = 'docker' + if to in ('azure', 'aws', 'gcp'): + resource_provider = to + + # Build resource stack with the appropriate provider + # Wrap each resource function to apply provider + provider_resources = {} + for res_name, res_fn in resources.items(): + # Create a wrapper that calls the function and applies the provider + def wrapper(fn=res_fn, prov=resource_provider): + return fn() if callable(fn) else fn + provider_resources[res_name] = wrapper + + # Get resources environment, compose services, and volumes + res_env, res_dc, res_vols = stack(provider_resources, provider=resource_provider) + + # Merge resource environment variables + resource_env.update(res_env) + + # Merge resource services into main compose + for item in res_dc: + compose = compose._add(item) + + # App service with resource environment app_name = Path(path).name or 'app' app_svc = service( build='.', ports={app_port: app_port} ) + # Add resource environment to app service + if resource_env: + app_svc['environment'] = resource_env + # Load compliance defaults if specified compliance_config = {} if compliance == 'soc2': @@ -184,21 +218,15 @@ def ship(path='.', *, to='docker', domain=None, port=None, proxy='caddy', result['target'] = 'docker' result['url'] = f'http://localhost:{app_port}' - elif to == 'vps': + elif to == 'vps' or (to == 'hetzner' and host): if not host: - raise ValueError('host parameter required for VPS deployment') + raise ValueError('host parameter required for VPS deployment. Use to="hetzner" to auto-provision.') print(f'Deploying to VPS {host}...') - from .vps import deploy + from .vps import deploy as vps_deploy - # Deploy using existing vps.py deploy function - deploy_result = deploy( - host=host, - user=user, - key=key, - path=path, - compliance=compliance - ) + deploy_path = kw.get('deploy_path', f'/srv/{app_name}') + vps_deploy(compose, host, user=user, key=key, path=deploy_path) result['status'] = 'deployed' result['target'] = 'vps' result['host'] = host @@ -243,6 +271,144 @@ def ship(path='.', *, to='docker', domain=None, port=None, proxy='caddy', result['target'] = 'aws' result['aws'] = aws_result + elif to == 'hetzner': + print('Deploying to Hetzner...') + from .vps import vps_init, create, deploy as vps_deploy, server_ip, servers, hcloud_auth + + # Server name from app name + server_name = kw.get('server_name', app_name) + server_type = kw.get('server_type', 'cx22') # €4/mo default — cheapest usable + location = kw.get('location', 'nbg1') # Nuremberg, Germany — good EU default + image = kw.get('image', 'ubuntu-24.04') # Latest LTS + ssh_keys = kw.get('ssh_keys', []) + pub_keys = kw.get('pub_keys', '') + + # Read SSH public key from default location if not provided + if not pub_keys: + import os + for key_path in ['~/.ssh/id_ed25519.pub', '~/.ssh/id_rsa.pub']: + expanded = os.path.expanduser(key_path) + if os.path.exists(expanded): + pub_keys = open(expanded).read().strip() + break + + # Check if server already exists + existing = None + try: + existing_servers = servers() + for s in existing_servers: + if s['name'] == server_name: + existing = s + break + except Exception: + pass # hcloud CLI might not be configured yet + + if existing: + print(f'Server {server_name} already exists at {existing["ip"]}') + ip = existing['ip'] + else: + # Generate cloud-init + print(f'Provisioning Hetzner {server_type} in {location}...') + + # Build cloud-init packages list + init_packages = ['git', 'htop', 'curl'] + + # Generate cloud-init YAML + cloud_init_yaml = vps_init( + server_name, + pub_keys=pub_keys, + username=user, + docker=True, + packages=init_packages, + cf_token=kw.get('cf_token'), + ) + + # Create the server + ip = create( + server_name, + image=image, + server_type=server_type, + location=location, + cloud_init=cloud_init_yaml, + ssh_keys=ssh_keys, + ) + + # Wait for server to be ready (cloud-init takes ~60-90s) + print(f'Server created at {ip}. Waiting for cloud-init to complete...') + import time + max_wait = kw.get('wait_timeout', 180) # 3 minutes default + waited = 0 + interval = 10 + ready = False + while waited < max_wait: + time.sleep(interval) + waited += interval + try: + from .vps import run_ssh + result_cmd = run_ssh(ip, 'cloud-init status --wait 2>/dev/null || echo done', + user=user, key=key) + if 'done' in result_cmd or 'status: done' in result_cmd: + ready = True + break + except Exception: + pass # SSH not ready yet + print(f' Waiting... ({waited}s)') + + if not ready: + print(f' Warning: cloud-init may not have completed after {max_wait}s. Proceeding anyway.') + + # Configure DNS if domain provided + if domain: + try: + from .cloudflare import dns_record + print(f'Configuring DNS: {domain} → {ip}') + dns_record(domain.split('.')[-2] + '.' + domain.split('.')[-1], + domain.split('.')[0] if '.' in domain and len(domain.split('.')) > 2 else '@', + ip, proxied=kw.get('proxied', False)) + except Exception as e: + print(f' DNS configuration skipped: {e}') + + # Deploy the compose stack + print(f'Deploying to {server_name} ({ip})...') + from .vps import deploy as vps_deploy + deploy_path = kw.get('deploy_path', f'/srv/{app_name}') + vps_deploy(compose, ip, user=user, key=key, path=deploy_path) + + result['status'] = 'deployed' + result['target'] = 'hetzner' + result['host'] = ip + result['server_name'] = server_name + result['server_type'] = server_type + result['location'] = location + result['deploy_path'] = deploy_path + result['url'] = f'https://{domain}' if domain else f'http://{ip}:{app_port}' + + elif to == 'gcp': + print('Deploying to GCP Cloud Run...') + from .gcp import gcp_stack + + gcp_result = gcp_stack( + app_name, + image=kw.get('image'), + port=app_port, + region=kw.get('region', 'us-central1'), + project=kw.get('project'), + postgres=kw.get('postgres', False), + redis=kw.get('redis', False), + domain=domain, + min_instances=kw.get('min_instances', 0), + max_instances=kw.get('max_instances', 10), + memory=kw.get('memory', '512Mi'), + cpu=kw.get('cpu', '1'), + env=kw.get('env'), + service_account=kw.get('service_account'), + ) + + result['status'] = 'deployed' + result['target'] = 'gcp' + result['url'] = gcp_result.get('url', f'https://{domain}' if domain else '') + result['gcp'] = gcp_result + else: result['status'] = 'error' result['error'] = f'Unknown deployment target: {to}' diff --git a/fastops/teardown.py b/fastops/teardown.py new file mode 100644 index 0000000..afac635 --- /dev/null +++ b/fastops/teardown.py @@ -0,0 +1,415 @@ +"""Resource teardown and lifecycle management. Safely destroy provisioned resources.""" + +__all__ = ['destroy', 'destroy_stack', 'status', 'teardown_gcp', 'destroy_cloud_run', 'destroy_cloud_sql', 'destroy_memorystore'] + +import os +import json +import subprocess +import shutil + + +def _docker_noop(): + 'Docker resources managed by compose' + return {'destroyed': True, 'message': 'Remove via docker compose down -v'} + +def _not_found(msg): + 'Resource not found response' + return {'destroyed': False, 'message': msg} + +def _success(msg): + 'Successful deletion response' + return {'destroyed': True, 'message': msg} + + +def destroy(resource_type, name, provider='docker', **kw): + 'Destroy a single provisioned resource' + dispatchers = { + 'database': _destroy_database, 'cache': _destroy_cache, 'queue': _destroy_queue, + 'bucket': _destroy_bucket, 'llm': _destroy_llm, 'search': _destroy_search, + 'function': _destroy_function + } + if resource_type not in dispatchers: + return {'destroyed': False, 'resource': name, 'provider': provider, + 'message': f'Unknown resource type: {resource_type}'} + result = dispatchers[resource_type](name, provider, **kw) + result.setdefault('resource', name) + result.setdefault('provider', provider) + return result + + +def _destroy_database(name, provider, **kw): + 'Destroy a database instance' + if provider == 'docker': return _docker_noop() + elif provider == 'aws': + from .aws import callaws + try: + callaws('rds', 'delete-db-instance', '--db-instance-identifier', name, + '--skip-final-snapshot', '--delete-automated-backups') + except Exception as e: + if 'DBInstanceNotFound' in str(e): return _not_found(f'RDS instance {name} not found') + raise + return _success(f'RDS instance {name} deletion initiated') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + try: + callaz('postgres', 'flexible-server', 'delete', '--name', name, + '--resource-group', rg, '--yes') + except Exception as e: + if 'ResourceNotFound' in str(e): return _not_found(f'Azure DB {name} not found') + raise + return _success(f'Azure Postgres {name} deleted') + return _not_found(f'Unsupported provider: {provider}') + + +def _destroy_cache(name, provider, **kw): + 'Destroy a cache instance' + if provider == 'docker': return _docker_noop() + elif provider == 'aws': + from .aws import callaws + try: + callaws('elasticache', 'delete-cache-cluster', '--cache-cluster-id', name) + except Exception as e: + if 'CacheClusterNotFound' in str(e): return _not_found(f'ElastiCache cluster {name} not found') + raise + return _success(f'ElastiCache cluster {name} deletion initiated') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + try: + callaz('redis', 'delete', '--name', name, '--resource-group', rg, '--yes') + except Exception as e: + if 'ResourceNotFound' in str(e): return _not_found(f'Azure Redis {name} not found') + raise + return _success(f'Azure Redis {name} deleted') + return _not_found(f'Unsupported provider: {provider}') + + +def _destroy_queue(name, provider, **kw): + 'Destroy a message queue' + if provider == 'docker': return _docker_noop() + elif provider == 'aws': + from .aws import callaws + try: + result = callaws('sqs', 'get-queue-url', '--queue-name', name) + callaws('sqs', 'delete-queue', '--queue-url', result['QueueUrl']) + except Exception as e: + if 'NonExistentQueue' in str(e) or 'QueueDoesNotExist' in str(e): + return _not_found(f'SQS queue {name} not found') + raise + return _success(f'SQS queue {name} deleted') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + namespace = kw.get('namespace', f'{name}-ns') + try: + callaz('servicebus', 'namespace', 'delete', '--name', namespace, + '--resource-group', rg, '--yes') + except Exception as e: + if 'ResourceNotFound' in str(e): return _not_found(f'Azure ServiceBus namespace {namespace} not found') + raise + return _success(f'Azure ServiceBus namespace {namespace} deleted') + elif provider == 'gcp': + try: + sub_name = f'{name}-sub' + subprocess.run(['gcloud', 'pubsub', 'subscriptions', 'delete', sub_name, '--quiet'], + capture_output=True, text=True, check=True) + subprocess.run(['gcloud', 'pubsub', 'topics', 'delete', name, '--quiet'], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'NOT_FOUND' in e.stderr or 'does not exist' in e.stderr.lower(): + return _not_found(f'GCP Pub/Sub topic {name} not found') + raise + return _success(f'GCP Pub/Sub topic {name} deleted') + return _not_found(f'Unsupported provider: {provider}') + + +def _destroy_bucket(name, provider, **kw): + 'Destroy object storage bucket' + if provider == 'docker': return _docker_noop() + elif provider == 'aws': + from .aws import callaws + try: + callaws('s3', 'rm', f's3://{name}', '--recursive') + callaws('s3api', 'delete-bucket', '--bucket', name) + except Exception as e: + if 'NoSuchBucket' in str(e): return _not_found(f'S3 bucket {name} not found') + raise + return _success(f'S3 bucket {name} deleted') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + account_name = kw.get('account_name', name.replace('-', '').replace('_', '')[:24]) + try: + callaz('storage', 'container', 'delete', '--name', name, + '--account-name', account_name, '--yes') + callaz('storage', 'account', 'delete', '--name', account_name, + '--resource-group', rg, '--yes') + except Exception as e: + if 'ResourceNotFound' in str(e) or 'NotFound' in str(e): + return _not_found(f'Azure storage {name} not found') + raise + return _success(f'Azure storage account {account_name} deleted') + elif provider == 'gcp': + try: + subprocess.run(['gcloud', 'storage', 'rm', '-r', f'gs://{name}', '--quiet'], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'NOT_FOUND' in e.stderr or 'does not exist' in e.stderr.lower(): + return _not_found(f'GCS bucket {name} not found') + raise + return _success(f'GCS bucket {name} deleted') + return _not_found(f'Unsupported provider: {provider}') + + +def _destroy_llm(name, provider, **kw): + 'Destroy LLM endpoint' + if provider == 'docker': return _docker_noop() + elif provider == 'openai': return _success('No teardown needed for OpenAI') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + deployment_name = kw.get('deployment', f'{name}-deployment') + try: + callaz('cognitiveservices', 'account', 'deployment', 'delete', + '--name', name, '--resource-group', rg, '--deployment-name', deployment_name) + callaz('cognitiveservices', 'account', 'delete', + '--name', name, '--resource-group', rg) + except Exception as e: + if 'ResourceNotFound' in str(e): return _not_found(f'Azure OpenAI {name} not found') + raise + return _success(f'Azure OpenAI account {name} deleted') + elif provider == 'aws': return _success('No teardown needed for Bedrock') + return _not_found(f'Unsupported provider: {provider}') + + +def _destroy_search(name, provider, **kw): + 'Destroy search engine' + if provider == 'docker': return _docker_noop() + elif provider == 'aws': + from .aws import callaws + try: + callaws('opensearch', 'delete-domain', '--domain-name', name) + except Exception as e: + if 'ResourceNotFoundException' in str(e): return _not_found(f'OpenSearch domain {name} not found') + raise + return _success(f'OpenSearch domain {name} deletion initiated') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + try: + callaz('search', 'service', 'delete', '--name', name, '--resource-group', rg, '--yes') + except Exception as e: + if 'ResourceNotFound' in str(e): return _not_found(f'Azure Search {name} not found') + raise + return _success(f'Azure Search {name} deleted') + return _not_found(f'Unsupported provider: {provider}') + + +def _destroy_function(name, provider, **kw): + 'Destroy serverless function' + if provider == 'aws': + from .aws import callaws + try: + callaws('lambda', 'delete-function', '--function-name', name) + except Exception as e: + if 'ResourceNotFoundException' in str(e): return _not_found(f'Lambda function {name} not found') + raise + return _success(f'Lambda function {name} deleted') + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + try: + callaz('functionapp', 'delete', '--name', name, '--resource-group', rg, '--yes') + except Exception as e: + if 'ResourceNotFound' in str(e): return _not_found(f'Azure Function {name} not found') + raise + return _success(f'Azure Function {name} deleted') + elif provider == 'gcp': + region = kw.get('region', 'us-central1') + try: + subprocess.run(['gcloud', 'functions', 'delete', name, '--quiet', '--region', region], + capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + if 'NOT_FOUND' in e.stderr or 'does not exist' in e.stderr.lower(): + return _not_found(f'GCP function {name} not found') + raise + return _success(f'GCP function {name} deleted') + return _not_found(f'Unsupported provider: {provider}') + + +def _infer_resource_type(env_dict): + 'Infer resource type from environment variables' + if 'DATABASE_URL' in env_dict: + return 'database' + elif 'REDIS_URL' in env_dict: + return 'cache' + elif 'QUEUE_URL' in env_dict or 'QUEUE_TOPIC' in env_dict: + return 'queue' + elif any(k in env_dict for k in ['S3_ENDPOINT', 'S3_BUCKET', 'AZURE_STORAGE_CONNECTION_STRING', 'GCS_BUCKET']): + return 'bucket' + elif 'LLM_ENDPOINT' in env_dict or 'LLM_MODEL' in env_dict: + return 'llm' + elif 'SEARCH_URL' in env_dict: + return 'search' + elif 'FUNCTION_ARN' in env_dict or 'FUNCTION_URL' in env_dict: + return 'function' + else: + return 'unknown' + + +def destroy_stack(resources, provider='docker', **kw): + 'Tear down all resources in a stack (reverse order for dependency safety)' + results = {} + # Reverse to tear down dependents before dependencies + for name in reversed(list(resources.keys())): + resource_fn = resources[name] + # Infer resource type from the function name or env output + env, _ = resource_fn() + rtype = _infer_resource_type(env) + results[name] = destroy(rtype, name, provider, **kw) + return results + + +def status(resource_type, name, provider='docker', **kw): + 'Quick health check for a provisioned resource' + if provider == 'docker': + # Check if container is running + result = subprocess.run(['docker', 'inspect', '--format', '{{.State.Status}}', name], + capture_output=True, text=True) + running = result.returncode == 0 and 'running' in result.stdout + return {'healthy': running, 'provider': 'docker', 'name': name} + + elif provider == 'aws': + from .aws import callaws + try: + if resource_type == 'database': + result = callaws('rds', 'describe-db-instances', + '--db-instance-identifier', name) + status_val = result['DBInstances'][0]['DBInstanceStatus'] + return {'healthy': status_val == 'available', 'provider': 'aws', + 'name': name, 'status': status_val} + + elif resource_type == 'cache': + result = callaws('elasticache', 'describe-cache-clusters', + '--cache-cluster-id', name) + status_val = result['CacheClusters'][0]['CacheClusterStatus'] + return {'healthy': status_val == 'available', 'provider': 'aws', + 'name': name, 'status': status_val} + + elif resource_type == 'bucket': + # Try to list bucket (will fail if not exists) + callaws('s3api', 'head-bucket', '--bucket', name) + return {'healthy': True, 'provider': 'aws', 'name': name} + + elif resource_type == 'search': + result = callaws('opensearch', 'describe-domain', '--domain-name', name) + status_val = result['DomainStatus']['Processing'] + return {'healthy': not status_val, 'provider': 'aws', + 'name': name, 'processing': status_val} + + elif resource_type == 'function': + result = callaws('lambda', 'get-function', '--function-name', name) + state = result['Configuration']['State'] + return {'healthy': state == 'Active', 'provider': 'aws', + 'name': name, 'state': state} + + except Exception as e: + return {'healthy': False, 'provider': 'aws', 'name': name, + 'message': f'Resource not found or error: {str(e)}'} + + elif provider == 'azure': + from .azure import callaz + rg = kw.get('resource_group') + try: + if resource_type == 'database': + result = callaz('postgres', 'flexible-server', 'show', + '--name', name, + '--resource-group', rg) + state = result.get('state', '') + return {'healthy': state == 'Ready', 'provider': 'azure', + 'name': name, 'state': state} + + elif resource_type == 'cache': + result = callaz('redis', 'show', + '--name', name, + '--resource-group', rg) + status_val = result.get('provisioningState', '') + return {'healthy': status_val == 'Succeeded', 'provider': 'azure', + 'name': name, 'status': status_val} + + elif resource_type == 'search': + result = callaz('search', 'service', 'show', + '--name', name, + '--resource-group', rg) + status_val = result.get('provisioningState', '') + return {'healthy': status_val == 'Succeeded', 'provider': 'azure', + 'name': name, 'status': status_val} + + elif resource_type == 'function': + result = callaz('functionapp', 'show', + '--name', name, + '--resource-group', rg) + state = result.get('state', '') + return {'healthy': state == 'Running', 'provider': 'azure', + 'name': name, 'state': state} + + except Exception as e: + return {'healthy': False, 'provider': 'azure', 'name': name, + 'message': f'Resource not found or error: {str(e)}'} + + return {'healthy': False, 'provider': provider, 'name': name, + 'message': 'Unsupported provider or resource type'} + + +# GCP-specific teardown functions + +def destroy_cloud_run(name, region, project=None): + 'Delete a Cloud Run service' + from .gcp import callgcloud + proj_args = ['--project', project] if project else [] + try: + callgcloud('run', 'services', 'delete', name, + '--region', region, '--quiet', *proj_args) + print(f'Deleted Cloud Run service: {name}') + except RuntimeError as e: + print(f'Error deleting Cloud Run service: {e}') + +def destroy_cloud_sql(name, project=None): + 'Delete a Cloud SQL instance' + from .gcp import callgcloud + proj_args = ['--project', project] if project else [] + try: + callgcloud('sql', 'instances', 'delete', name, + '--quiet', *proj_args) + print(f'Deleted Cloud SQL instance: {name}') + except RuntimeError as e: + print(f'Error deleting Cloud SQL instance: {e}') + +def destroy_memorystore(name, region, project=None): + 'Delete a Memorystore (Redis) instance' + from .gcp import callgcloud + proj_args = ['--project', project] if project else [] + try: + callgcloud('redis', 'instances', 'delete', name, + '--region', region, '--quiet', *proj_args) + print(f'Deleted Memorystore instance: {name}') + except RuntimeError as e: + print(f'Error deleting Memorystore instance: {e}') + +def teardown_gcp(name, region='us-central1', project=None, postgres=False, redis=False): + 'Tear down all GCP resources for an application' + print(f'Tearing down GCP resources for {name}...') + + # Delete Cloud Run service + destroy_cloud_run(name, region, project) + + # Delete Cloud SQL if it was created + if postgres: + destroy_cloud_sql(f'{name}-db', project) + + # Delete Memorystore if it was created + if redis: + destroy_memorystore(f'{name}-redis', region, project) + + print(f'Teardown complete for {name}') diff --git a/pyproject.toml b/pyproject.toml index b5e678d..a9464aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ readme = "README.md" requires-python = ">=3.10" license = {text = "Apache-2.0"} authors = [{name = "71293", email = "karthik.rajgopal@hotmail.com"}] -keywords = ['nbdev', 'docker', 'devops', 'vps', 'caddy', 'cloudflare'] +keywords = ['nbdev', 'docker', 'devops', 'vps', 'caddy', 'cloudflare', 'deployment', 'infrastructure', 'cloud', 'resources'] classifiers = [ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", @@ -27,6 +27,28 @@ dependencies = [ Repository = "https://github.com/Karthik777/fastops" Documentation = "https://Karthik777.github.io/fastops/" +[project.optional-dependencies] +db = ["fastsql>=0.0.13"] +storage = ["fsspec>=2024.2", "s3fs", "adlfs"] +llm = ["lisette"] +cache = ["redis>=5.0"] +queue = ["pika>=1.3"] +search = ["elasticsearch>=8.0"] +azure = ["azure-identity", "azure-mgmt-resource"] +aws = ["boto3"] +infra = ["pyinfra>=3.0"] +all = [ + "fastsql>=0.0.13", + "fsspec>=2024.2", + "s3fs", + "adlfs", + "lisette", + "redis>=5.0", + "pika>=1.3", + "elasticsearch>=8.0", + "pyinfra>=3.0", +] + [project.entry-points.nbdev] fastops = "fastops._modidx:d"