first load
Some checks failed
Build, Push, Publish / Build & Release (push) Failing after 2s

This commit is contained in:
2025-12-16 04:41:52 -03:00
parent b42ed0e6b9
commit 4f4540b93a
16 changed files with 1082 additions and 1 deletions

240
.github/workflows/release_build.yml vendored Normal file
View File

@@ -0,0 +1,240 @@
name: Build, Push, Publish
on:
push:
branches:
- main
workflow_dispatch:
schedule:
- cron: '28 5 * * *'
workflow_run:
workflows: ["Sync Repo"]
types:
- completed
jobs:
release:
name: Build & Release
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
steps:
- name: 📥 Checkout code with full history and tags
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check if any tags exist
id: check_tags_exist
run: |
git fetch --tags
TAG_COUNT=$(git tag | wc -l)
if [ "$TAG_COUNT" -eq 0 ]; then
echo "has_tags=false" >> "$GITHUB_OUTPUT"
echo "latest_tag=v0.0.0" >> "$GITHUB_OUTPUT"
else
echo "has_tags=true" >> "$GITHUB_OUTPUT"
LATEST_TAG=$(git describe --tags --abbrev=0)
echo "latest_tag=$LATEST_TAG" >> "$GITHUB_OUTPUT"
fi
- name: Check if meaningful commits exist since latest tag
id: check_commits
run: |
if [ "${{ steps.check_tags_exist.outputs.has_tags }}" = "false" ]; then
# No tags exist, so we should create first release
echo "commit_count=1" >> "$GITHUB_OUTPUT"
CHANGED_FILES=$(git ls-files | grep -v '^manifest.json$' || true)
if [ -n "$CHANGED_FILES" ]; then
echo "changed_files<<EOF" >> "$GITHUB_OUTPUT"
printf '%s\n' "$CHANGED_FILES" >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
else
echo "changed_files=Initial release" >> "$GITHUB_OUTPUT"
fi
else
LATEST_TAG="${{ steps.check_tags_exist.outputs.latest_tag }}"
CHANGED_FILES="$(git diff --name-only "${LATEST_TAG}..HEAD" | grep -v '^manifest.json$' || true)"
if [ -n "$CHANGED_FILES" ]; then
echo "commit_count=1" >> "$GITHUB_OUTPUT"
echo "changed_files<<EOF" >> "$GITHUB_OUTPUT"
printf '%s\n' "$CHANGED_FILES" >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
else
echo "commit_count=0" >> "$GITHUB_OUTPUT"
fi
fi
- name: Get latest release tag (from GitHub API)
id: get_latest_release
run: |
LATEST_RELEASE_TAG=$(curl -sL -H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/latest" | jq -r .tag_name)
if [ -z "$LATEST_RELEASE_TAG" ] || [ "$LATEST_RELEASE_TAG" = "null" ]; then
LATEST_RELEASE_TAG="v1.0.0"
fi
echo "latest_release_tag=$LATEST_RELEASE_TAG" >> "$GITHUB_OUTPUT"
echo "latest_release_version=${LATEST_RELEASE_TAG#v}" >> "$GITHUB_OUTPUT"
# -------------------------------
# Sync manifest.json to last release version if behind (only when no meaningful commits)
# -------------------------------
- name: 🛠 Ensure manifest.json matches latest release version
if: steps.check_commits.outputs.commit_count == '0'
run: |
if [ -f manifest.json ]; then
MANIFEST_VERSION=$(jq -r '.version // empty' manifest.json)
else
MANIFEST_VERSION=""
fi
LATEST_RELEASE_VERSION="${{ steps.get_latest_release.outputs.latest_release_version }}"
PYTHON_CODE="from packaging import version; \
print(version.parse('$LATEST_RELEASE_VERSION') > version.parse('$MANIFEST_VERSION') if '$MANIFEST_VERSION' else True)"
NEED_UPDATE=$(python3 -c "$PYTHON_CODE")
if [ "$NEED_UPDATE" = "True" ]; then
echo "Updating manifest.json to version $LATEST_RELEASE_VERSION (sync with release)"
jq --arg v "$LATEST_RELEASE_VERSION" '.version = $v' manifest.json > tmp.json && mv tmp.json manifest.json
git config user.name "github-actions"
git config user.email "github-actions@github.com"
git add manifest.json
git commit -m "Sync manifest.json to release $LATEST_RELEASE_VERSION [🔄]" || echo "Nothing to commit"
git push origin main || true
else
echo "Manifest.json is already up-to-date with the latest release."
fi
# -------------------------------
# Continue normal workflow if commits exist
# -------------------------------
- name: 📃 Get list of changed files (Markdown bullet list)
if: steps.check_commits.outputs.commit_count != '0'
id: changed_files
run: |
BULLET_LIST="$(printf '%s\n' "${{ steps.check_commits.outputs.changed_files }}" | sed 's/^/- /')"
echo "CHANGED<<EOF" >> "$GITHUB_OUTPUT"
printf '%s\n' "$BULLET_LIST" >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
COUNT="$(printf '%s\n' "${{ steps.check_commits.outputs.changed_files }}" | wc -l)"
echo "COUNT=$COUNT" >> "$GITHUB_OUTPUT"
- name: Get manifest version
if: steps.check_commits.outputs.commit_count != '0'
id: get_manifest_version
run: |
if [ -f manifest.json ]; then
MANIFEST_VERSION=$(jq -r '.version // empty' manifest.json)
if [ -z "$MANIFEST_VERSION" ] || [ "$MANIFEST_VERSION" = "null" ]; then
MANIFEST_VERSION="1.0.0"
fi
else
MANIFEST_VERSION="1.0.0"
fi
echo "manifest_version=$MANIFEST_VERSION" >> "$GITHUB_OUTPUT"
- name: Pick base version
if: steps.check_commits.outputs.commit_count != '0'
id: pick_base_version
run: |
LATEST_RELEASE="${{ steps.get_latest_release.outputs.latest_release_version }}"
MANIFEST="${{ steps.get_manifest_version.outputs.manifest_version }}"
BASE_VERSION=$(python3 -c "from packaging import version; \
print(str(max(version.parse('$LATEST_RELEASE'), version.parse('$MANIFEST'))))")
echo "base_version=$BASE_VERSION" >> "$GITHUB_OUTPUT"
- name: 🔢 Determine version
if: steps.check_commits.outputs.commit_count != '0'
id: version
run: |
BASE_VERSION="${{ steps.pick_base_version.outputs.base_version }}"
IFS='.' read -r MAJOR MINOR PATCH <<< "$BASE_VERSION"
COUNT="${{ steps.changed_files.outputs.COUNT }}"
if [ "$COUNT" -ge 5 ]; then
MAJOR=$((MAJOR + 1))
MINOR=0
PATCH=0
elif [ "$COUNT" -ge 3 ]; then
MINOR=$((MINOR + 1))
PATCH=0
else
PATCH=$((PATCH + 1))
fi
NEW_VERSION="${MAJOR}.${MINOR}.${PATCH}"
REPO_NAME="$(basename "$GITHUB_REPOSITORY")"
ZIP_NAME="${REPO_NAME}-${NEW_VERSION}.zip"
echo "VERSION=$NEW_VERSION" >> "$GITHUB_OUTPUT"
echo "ZIP_NAME=$ZIP_NAME" >> "$GITHUB_OUTPUT"
echo "REPO_NAME=$REPO_NAME" >> "$GITHUB_OUTPUT"
- name: 🛠 Update or create manifest.json
if: steps.check_commits.outputs.commit_count != '0'
run: |
VERSION="${{ steps.version.outputs.VERSION }}"
AUTHOR="Ivan Carlos"
VERSION_FILE="manifest.json"
if [ -f "$VERSION_FILE" ]; then
jq --arg v "$VERSION" --arg a "$AUTHOR" \
'.version = $v | .author = $a' "$VERSION_FILE" > tmp.json && mv tmp.json "$VERSION_FILE"
else
echo "{ \"version\": \"$VERSION\", \"author\": \"$AUTHOR\" }" > "$VERSION_FILE"
fi
- name: 💾 Commit and push updated manifest.json
if: steps.check_commits.outputs.commit_count != '0'
run: |
git config user.name "github-actions"
git config user.email "github-actions@github.com"
git add manifest.json
git commit -m "Update manifest version to ${{ steps.version.outputs.VERSION }} [▶️]" || echo "Nothing to commit"
git push origin main
- name: 📦 Create ZIP package (excluding certain files)
if: steps.check_commits.outputs.commit_count != '0'
run: |
ZIP_NAME="${{ steps.version.outputs.ZIP_NAME }}"
zip -r "$ZIP_NAME" . -x ".git/*" ".github/*" "docker/*" ".dockerignore" "CNAME" "Dockerfile" "README.md" "LICENSE"
- name: 🚀 Create GitHub Release
if: steps.check_commits.outputs.commit_count != '0'
uses: softprops/action-gh-release@v2
with:
tag_name: "v${{ steps.version.outputs.VERSION }}"
name: "${{ steps.version.outputs.REPO_NAME }} v${{ steps.version.outputs.VERSION }}"
body: |
### Changelog
Files changed in this release:
${{ steps.changed_files.outputs.CHANGED }}
files: ${{ steps.version.outputs.ZIP_NAME }}
# ----- Docker steps -----
- name: 🔍 Check if Dockerfile exists
if: steps.check_commits.outputs.commit_count != '0'
id: dockerfile_check
run: |
if [ -f Dockerfile ]; then
echo "exists=true" >> "$GITHUB_OUTPUT"
else
echo "exists=false" >> "$GITHUB_OUTPUT"
fi
- name: 🛠 Set up Docker Buildx
if: steps.check_commits.outputs.commit_count != '0' && steps.dockerfile_check.outputs.exists == 'true'
uses: docker/setup-buildx-action@v3
- name: 🔐 Login to GitHub Container Registry
if: steps.check_commits.outputs.commit_count != '0' && steps.dockerfile_check.outputs.exists == 'true'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: 🐳 Build and Push Docker image
if: steps.check_commits.outputs.commit_count != '0' && steps.dockerfile_check.outputs.exists == 'true'
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ghcr.io/${{ github.repository }}:latest

78
.github/workflows/update_readme.yml vendored Normal file
View File

@@ -0,0 +1,78 @@
name: Update README
# Allow GitHub Actions to commit and push changes
permissions:
contents: write
on:
workflow_dispatch:
schedule:
- cron: '0 4 * * *' # Every day at 4 AM UTC
jobs:
update-readme:
runs-on: ubuntu-latest
env:
SOURCE_REPO: ivancarlosti/.github
SOURCE_BRANCH: main
steps:
- name: Checkout current repository
uses: actions/checkout@v4
- name: Checkout source README template
uses: actions/checkout@v4
with:
repository: ${{ env.SOURCE_REPO }}
ref: ${{ env.SOURCE_BRANCH }}
path: source_readme
- name: Update README.md (buttons and footer)
run: |
set -e
REPO_NAME="${GITHUB_REPOSITORY##*/}"
# --- Extract buttons block from source ---
BUTTONS=$(awk '/<!-- buttons -->/{flag=1;next}/<!-- endbuttons -->/{flag=0}flag' source_readme/README.md)
BUTTONS_UPDATED=$(echo "$BUTTONS" | sed "s/\.github/${REPO_NAME}/g")
# --- Extract footer block from source (everything from <!-- footer --> onward) ---
FOOTER=$(awk '/<!-- footer -->/{flag=1}flag' source_readme/README.md)
# --- Replace buttons section in README.md ---
UPDATED=$(awk -v buttons="$BUTTONS_UPDATED" '
BEGIN { skip=0 }
/<!-- buttons -->/ {
print
print buttons
skip=1
next
}
/<!-- endbuttons -->/ && skip {
print
skip=0
next
}
!skip { print }
' README.md)
# --- Replace everything after <!-- footer --> with FOOTER ---
echo "$UPDATED" | awk -v footer="$FOOTER" '
/<!-- footer -->/ {
print footer
found=1
exit
}
{ print }
' > README.tmp && mv README.tmp README.md
- name: Remove source_readme from git index
run: git rm --cached -r source_readme || true
- name: Commit and push changes
uses: stefanzweifel/git-auto-commit-action@v5
with:
file_pattern: README.md
commit_message: "Sync README from template [▶️]"
branch: ${{ github.ref_name }}

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Ivan Carlos de Almeida
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,2 +1,64 @@
# lambdascripts
# Lambda scripts
<!-- buttons -->
[![Stars](https://img.shields.io/github/stars/ivancarlosti/lambdascripts?label=⭐%20Stars&color=gold&style=flat)](https://github.com/ivancarlosti/lambdascripts/stargazers)
[![Watchers](https://img.shields.io/github/watchers/ivancarlosti/lambdascripts?label=Watchers&style=flat&color=red)](https://github.com/sponsors/ivancarlosti)
[![Forks](https://img.shields.io/github/forks/ivancarlosti/lambdascripts?label=Forks&style=flat&color=ff69b4)](https://github.com/sponsors/ivancarlosti)
[![GitHub commit activity](https://img.shields.io/github/commit-activity/m/ivancarlosti/lambdascripts?label=Activity)](https://github.com/ivancarlosti/lambdascripts/pulse)
[![GitHub Issues](https://img.shields.io/github/issues/ivancarlosti/lambdascripts?label=Issues&color=orange)](https://github.com/ivancarlosti/lambdascripts/issues)
[![License](https://img.shields.io/github/license/ivancarlosti/lambdascripts?label=License)](LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/ivancarlosti/lambdascripts?label=Last%20Commit)](https://github.com/ivancarlosti/lambdascripts/commits)
[![Security](https://img.shields.io/badge/Security-View%20Here-purple)](https://github.com/ivancarlosti/lambdascripts/security)
[![Code of Conduct](https://img.shields.io/badge/Code%20of%20Conduct-2.1-4baaaa)](https://github.com/ivancarlosti/lambdascripts?tab=coc-ov-file)
[![GitHub Sponsors](https://img.shields.io/github/sponsors/ivancarlosti?label=GitHub%20Sponsors&color=ffc0cb)][sponsor]
<!-- endbuttons -->
# Configuration
* Runtime: Python 3.13+
* Architectune: arm64/x86_64 (arm64 is cheaper)
* Timeout: 1 minute
* Requires related policy for each script
# Instruction
* Create function using `.py` code
* Configure as specified below
* Attach related `.json` policy on function's role
# Remote call instruction
* Create an IAM user for remote function execution
* Attach related `remotecall_policy_lambda.json` policy on user
# Notes
* AddTag script uses Alias to tag KMS keys
* AddTag script uses Description to tag Route53 zones
* Backup Alert script uses 1 day as range window to gather fails and expired alerts
* Billing and AddTag scripts uses "Tenant" tag set resources
<!-- footer -->
---
## 🧑‍💻 Consulting and technical support
* For personal support and queries, please submit a new issue to have it addressed.
* For commercial related questions, please [**contact me**][ivancarlos] for consulting costs.
## 🩷 Project support
| If you found this project helpful, consider |
| :---: |
[**buying me a coffee**][buymeacoffee], [**donate by paypal**][paypal], [**sponsor this project**][sponsor] or just [**leave a star**](../..)⭐
|Thanks for your support, it is much appreciated!|
[cc]: https://docs.github.com/en/communities/setting-up-your-project-for-healthy-contributions/adding-a-code-of-conduct-to-your-project
[contributing]: https://docs.github.com/en/articles/setting-guidelines-for-repository-contributors
[security]: https://docs.github.com/en/code-security/getting-started/adding-a-security-policy-to-your-repository
[support]: https://docs.github.com/en/articles/adding-support-resources-to-your-project
[it]: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
[prt]: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/creating-a-pull-request-template-for-your-repository
[funding]: https://docs.github.com/en/articles/displaying-a-sponsor-button-in-your-repository
[ivancarlos]: https://ivancarlos.it
[buymeacoffee]: https://www.buymeacoffee.com/ivancarlos
[paypal]: https://icc.gg/donate
[sponsor]: https://github.com/sponsors/ivancarlosti

263
lambda_function_addtag.py Normal file
View File

@@ -0,0 +1,263 @@
import boto3
import re
def get_tag(tags, key):
for tag in tags:
if tag['Key'] == key:
return tag['Value']
return None
def add_output(output, resource_type, resource_id, tenant_value, status, **kwargs):
entry = {
"ResourceType": resource_type,
"ResourceId": resource_id,
"Tenant": tenant_value,
"Status": status
}
entry.update(kwargs)
output.append(entry)
def lambda_handler(event, context):
ec2 = boto3.client('ec2')
kms = boto3.client('kms', region_name='us-east-1')
route53 = boto3.client('route53')
output = []
# 1. AMIs
images = ec2.describe_images(Owners=['self'])['Images']
for image in images:
already_tagged = any(tag['Key'] == 'Tenant' for tag in image.get('Tags', []))
name = image.get('Name', '')
desc = image.get('Description', '')
match = re.search(r'i-[0-9a-f]+', name) or re.search(r'i-[0-9a-f]+', desc)
instance_id = match.group(0) if match else None
tenant = None
if instance_id:
try:
reservations = ec2.describe_instances(InstanceIds=[instance_id])['Reservations']
if reservations and reservations[0]['Instances']:
tags = reservations[0]['Instances'][0].get('Tags', [])
tenant = get_tag(tags, 'Tenant')
except Exception as e:
add_output(output, "AMI", image['ImageId'], tenant or "TagNotExists", f"InstanceDescribeError: {str(e)}", SourceID=instance_id)
continue
if already_tagged:
add_output(output, "AMI", image['ImageId'], tenant or "TagNotExists", "AlreadyTagged", SourceID=instance_id)
continue
if not instance_id:
add_output(output, "AMI", image['ImageId'], "TagNotExists", "NoInstanceIdInName")
continue
if tenant:
try:
ec2.create_tags(Resources=[image['ImageId']], Tags=[{'Key': 'Tenant', 'Value': tenant}])
add_output(output, "AMI", image['ImageId'], tenant, "TagAdded", SourceID=instance_id)
except Exception as e:
add_output(output, "AMI", image['ImageId'], tenant, f"TagError: {str(e)}", SourceID=instance_id)
else:
add_output(output, "AMI", image['ImageId'], "TagNotExists", "NoTenantTagOnInstance", SourceID=instance_id)
# 2. Volumes EBS
volumes = ec2.describe_volumes()['Volumes']
for vol in volumes:
already_tagged = any(tag['Key'] == 'Tenant' for tag in vol.get('Tags', []))
attachments = vol.get('Attachments', [])
instance_id = attachments[0]['InstanceId'] if attachments and attachments[0].get('InstanceId') else None
tenant = None
# Try to get Tenant tag from AMI if attached to an instance with an AMI
if instance_id:
# Try to get AMI from instance
try:
reservations = ec2.describe_instances(InstanceIds=[instance_id])['Reservations']
if reservations and reservations[0]['Instances']:
instance = reservations[0]['Instances'][0]
ami_id = instance.get('ImageId')
if ami_id:
# Get the Tenant tag from the AMI if it exists
images = ec2.describe_images(ImageIds=[ami_id])['Images']
if images:
tenant = get_tag(images[0].get('Tags', []), 'Tenant')
# Fallback: get Tenant tag from instance itself if AMI doesn't have it
if not tenant:
tenant = get_tag(instance.get('Tags', []), 'Tenant')
except Exception as e:
add_output(output, "Volume", vol['VolumeId'], tenant or "TagNotExists", f"InstanceDescribeError: {str(e)}", SourceID=instance_id)
continue
if already_tagged:
add_output(output, "Volume", vol['VolumeId'], tenant or "TagNotExists", "AlreadyTagged", SourceID=instance_id)
continue
if not instance_id:
add_output(output, "Volume", vol['VolumeId'], "TagNotExists", "NoInstanceAttachment")
continue
if tenant:
try:
ec2.create_tags(Resources=[vol['VolumeId']], Tags=[{'Key': 'Tenant', 'Value': tenant}])
add_output(output, "Volume", vol['VolumeId'], tenant, "TagAdded", SourceID=instance_id)
except Exception as e:
add_output(output, "Volume", vol['VolumeId'], tenant, f"TagError: {str(e)}", SourceID=instance_id)
else:
add_output(output, "Volume", vol['VolumeId'], "TagNotExists", "NoTenantTagOnInstanceOrAMI", SourceID=instance_id)
# 3. Snapshots EBS
snapshots = ec2.describe_snapshots(OwnerIds=['self'])['Snapshots']
for snap in snapshots:
already_tagged = any(tag['Key'] == 'Tenant' for tag in snap.get('Tags', []))
volume_id = snap.get('VolumeId')
tenant = None
if volume_id:
try:
volumes = ec2.describe_volumes(VolumeIds=[volume_id])['Volumes']
if volumes:
tenant = get_tag(volumes[0].get('Tags', []), 'Tenant')
except Exception as e:
if 'InvalidVolume.NotFound' in str(e):
add_output(output, "Snapshot", snap['SnapshotId'], tenant or "TagNotExists", "NoVolumeIDFound", SourceID=volume_id)
else:
add_output(output, "Snapshot", snap['SnapshotId'], tenant or "TagNotExists", f"VolumeDescribeError: {str(e)}", SourceID=volume_id)
continue
if already_tagged:
add_output(output, "Snapshot", snap['SnapshotId'], tenant or "TagNotExists", "AlreadyTagged", SourceID=volume_id)
continue
if not volume_id:
add_output(output, "Snapshot", snap['SnapshotId'], "TagNotExists", "NoVolumeId")
continue
if tenant:
try:
ec2.create_tags(Resources=[snap['SnapshotId']], Tags=[{'Key': 'Tenant', 'Value': tenant}])
add_output(output, "Snapshot", snap['SnapshotId'], tenant, "TagAdded", SourceID=volume_id)
except Exception as e:
add_output(output, "Snapshot", snap['SnapshotId'], tenant, f"TagError: {str(e)}", SourceID=volume_id)
else:
add_output(output, "Snapshot", snap['SnapshotId'], "TagNotExists", "NoTenantTagOnVolume", SourceID=volume_id)
# 4. Network Interfaces (ENIs)
enis = ec2.describe_network_interfaces()['NetworkInterfaces']
for eni in enis:
already_tagged = any(tag['Key'] == 'Tenant' for tag in eni.get('TagSet', []))
attachment = eni.get('Attachment', {})
instance_id = attachment.get('InstanceId')
tenant = None
if instance_id:
try:
reservations = ec2.describe_instances(InstanceIds=[instance_id])['Reservations']
if reservations and reservations[0]['Instances']:
tags = reservations[0]['Instances'][0].get('Tags', [])
tenant = get_tag(tags, 'Tenant')
except Exception as e:
add_output(output, "NetworkInterface", eni['NetworkInterfaceId'], tenant or "TagNotExists", f"InstanceDescribeError: {str(e)}", SourceID=instance_id)
continue
if already_tagged:
add_output(output, "NetworkInterface", eni['NetworkInterfaceId'], tenant or "TagNotExists", "AlreadyTagged", SourceID=instance_id)
continue
if not instance_id:
add_output(output, "NetworkInterface", eni['NetworkInterfaceId'], "TagNotExists", "NoInstanceAttachment")
continue
if tenant:
try:
ec2.create_tags(Resources=[eni['NetworkInterfaceId']], Tags=[{'Key': 'Tenant', 'Value': tenant}])
add_output(output, "NetworkInterface", eni['NetworkInterfaceId'], tenant, "TagAdded", SourceID=instance_id)
except Exception as e:
add_output(output, "NetworkInterface", eni['NetworkInterfaceId'], tenant, f"TagError: {str(e)}", SourceID=instance_id)
else:
add_output(output, "NetworkInterface", eni['NetworkInterfaceId'], "TagNotExists", "NoTenantTagOnInstance", SourceID=instance_id)
# 5. Elastic IPs (EIPs)
addresses = ec2.describe_addresses()['Addresses']
for addr in addresses:
allocation_id = addr.get('AllocationId')
instance_id = addr.get('InstanceId')
tags = addr.get('Tags', [])
already_tagged = any(tag['Key'] == 'Tenant' for tag in tags)
tenant = None
if instance_id:
try:
reservations = ec2.describe_instances(InstanceIds=[instance_id])['Reservations']
if reservations and reservations[0]['Instances']:
instance_tags = reservations[0]['Instances'][0].get('Tags', [])
tenant = get_tag(instance_tags, 'Tenant')
except Exception as e:
add_output(output, "EIP", allocation_id, tenant or "TagNotExists", f"InstanceDescribeError: {str(e)}", SourceID=instance_id)
continue
if already_tagged:
add_output(output, "EIP", allocation_id, tenant or "TagNotExists", "AlreadyTagged", SourceID=instance_id)
continue
if not instance_id:
add_output(output, "EIP", allocation_id, "TagNotExists", "NoInstanceAttachment")
continue
if tenant:
try:
ec2.create_tags(Resources=[allocation_id], Tags=[{'Key': 'Tenant', 'Value': tenant}])
add_output(output, "EIP", allocation_id, tenant, "TagAdded", SourceID=instance_id)
except Exception as e:
add_output(output, "EIP", allocation_id, tenant, f"TagError: {str(e)}", SourceID=instance_id)
else:
add_output(output, "EIP", allocation_id, "TagNotExists", "NoTenantTagOnInstance", SourceID=instance_id)
# 6. KMS: Tag customer-managed keys with alias name as Tenant (us-east-1)
paginator = kms.get_paginator('list_aliases')
for page in paginator.paginate():
for alias in page['Aliases']:
alias_name = alias.get('AliasName', '')
key_id = alias.get('TargetKeyId')
if alias_name.startswith('alias/aws/') or not key_id:
continue
# FIX: Remove 'alias/' prefix for the tag value
tenant_value = alias_name[len('alias/'):] if alias_name.startswith('alias/') else alias_name
try:
tags_resp = kms.list_resource_tags(KeyId=key_id)
existing_tenant = next((t['TagValue'] for t in tags_resp.get('Tags', []) if t['TagKey'] == 'Tenant'), None)
if existing_tenant == tenant_value:
add_output(output, "KMS", key_id, tenant_value, "AlreadyTagged")
continue
kms.tag_resource(
KeyId=key_id,
Tags=[{'TagKey': 'Tenant', 'TagValue': tenant_value}]
)
add_output(output, "KMS", key_id, tenant_value, "TagAdded")
except Exception as e:
add_output(output, "KMS", key_id, tenant_value, f"TagError: {str(e)}")
# 7. Route 53: Tag hosted zones with description as Tenant (only if needed)
hosted_zones = route53.list_hosted_zones()['HostedZones']
for zone in hosted_zones:
zone_id = zone['Id'].split('/')[-1]
description = zone.get('Config', {}).get('Comment', '')
tenant_value = description
if description:
try:
tags_resp = route53.list_tags_for_resource(ResourceType='hostedzone', ResourceId=zone_id)
existing_tenant = next((t['Value'] for t in tags_resp.get('ResourceTagSet', {}).get('Tags', []) if t['Key'] == 'Tenant'), None)
if existing_tenant == tenant_value:
add_output(output, "Route53HostedZone", zone_id, tenant_value, "AlreadyTagged")
continue
route53.change_tags_for_resource(
ResourceType='hostedzone',
ResourceId=zone_id,
AddTags=[{'Key': 'Tenant', 'Value': tenant_value}]
)
add_output(output, "Route53HostedZone", zone_id, tenant_value, "TagAdded")
except Exception as e:
add_output(output, "Route53HostedZone", zone_id, tenant_value, f"TagError: {str(e)}")
else:
add_output(output, "Route53HostedZone", zone_id, "", "NoDescriptionNoTag")
return output

View File

@@ -0,0 +1,50 @@
import boto3
from datetime import datetime, timedelta
import json
def lambda_handler(event, context):
client = boto3.client('backup')
days = 1 # Set search day for alert return
time_cutoff = datetime.utcnow() - timedelta(days=days)
statuses = ['FAILED', 'EXPIRED']
failed_or_expired_jobs = []
for status in statuses:
next_token = None
while True:
params = {
'ByCreatedAfter': time_cutoff,
'ByState': status,
'MaxResults': 1000
}
if next_token:
params['NextToken'] = next_token
response = client.list_backup_jobs(**params)
for job in response.get('BackupJobs', []):
# Use DescribeBackupJob to pull additional information of backup jobs
job_details = client.describe_backup_job(
BackupJobId=job.get('BackupJobId')
)
failed_or_expired_jobs.append({
'BackupJobId': job.get('BackupJobId'),
'ResourceArn': job.get('ResourceArn'),
'BackupVaultName': job.get('BackupVaultName'),
'CreatedAt': job.get('CreationDate').isoformat(),
'Status': job.get('State'),
'StatusMessage': job_details.get('StatusMessage'),
'CompletionDate': job.get('CompletionDate').isoformat() if job.get('CompletionDate') else None,
'BackupType': job_details.get('BackupType'),
'BytesTransferred': job_details.get('BytesTransferred'),
'IAMRoleArn': job_details.get('IamRoleArn')
})
next_token = response.get('NextToken')
if not next_token:
break
return {
'statusCode': 200,
'body': json.dumps(failed_or_expired_jobs)
}

View File

@@ -0,0 +1,44 @@
import boto3
from datetime import datetime, timedelta
def lambda_handler(event, context):
start_date = event.get('start_date')
end_date = event.get('end_date')
if not start_date or not end_date:
now = datetime.now()
first_day_this_month = datetime(now.year, now.month, 1)
first_day_last_month = (first_day_this_month - timedelta(days=1)).replace(day=1)
first_day_this_month_str = first_day_this_month.strftime('%Y-%m-%d')
first_day_last_month_str = first_day_last_month.strftime('%Y-%m-%d')
start_date = first_day_last_month_str
end_date = first_day_this_month_str # Set to first day of current month (exclusive)
ce = boto3.client('ce')
response = ce.get_cost_and_usage(
TimePeriod={'Start': start_date, 'End': end_date},
Granularity='MONTHLY',
Metrics=['UnblendedCost'],
GroupBy=[
{'Type': 'TAG', 'Key': 'Tenant'},
{'Type': 'DIMENSION', 'Key': 'SERVICE'}
]
)
results = []
for result in response['ResultsByTime']:
for group in result['Groups']:
tenant_key = group['Keys'][0] if group['Keys'][0] else ""
tenant = tenant_key.split('$', 1)[1] if '$' in tenant_key else tenant_key
if not tenant:
tenant = "_NoTag"
service = group['Keys'][1] if len(group['Keys']) > 1 else "Unknown"
cost_usd = float(group['Metrics']['UnblendedCost']['Amount'])
cost_usd_str = f"{cost_usd:.2f}".replace('.', ',')
results.append({
"Tenant": tenant,
"Service": service,
"Cost (USD)": cost_usd_str
})
return results

View File

@@ -0,0 +1,83 @@
import boto3
import datetime
def generate_bind_zone_file(domain_name, records):
"""
Generates a simplified BIND zone file as a string,
using the real SOA and NS records from the hosted zone.
"""
# Find the SOA record for the domain
soa_record = next(
(r for r in records if r['Type'] == 'SOA' and r['Name'].rstrip('.') == domain_name),
None
)
if not soa_record:
raise ValueError(f"SOA record not found for domain {domain_name}")
# Find the NS records for the domain
ns_records = [
r for r in records if r['Type'] == 'NS' and r['Name'].rstrip('.') == domain_name
]
if not ns_records:
raise ValueError(f"NS records not found for domain {domain_name}")
output = []
output.append(f"$TTL {soa_record.get('TTL', 300)}")
# Build SOA record using the real fields
soa_value = soa_record['ResourceRecords'][0]['Value']
output.append(f"@ IN SOA {soa_value}")
# Add NS records
for ns_record in ns_records:
for rr in ns_record.get('ResourceRecords', []):
output.append(f"@ IN NS {rr['Value']}")
output.append("")
# Add other records (except SOA and NS)
for record in records:
record_name = record['Name'].rstrip('.')
record_type = record['Type']
ttl = record.get('TTL', 300)
values = record.get('ResourceRecords', [])
if record_type in ['SOA', 'NS'] and record_name == domain_name:
continue # Already handled
for val in values:
val_str = val['Value']
# Use '@' for root domain
name_display = '@' if record_name == domain_name else record_name
output.append(f"{name_display} {ttl} IN {record_type} {val_str}")
return "\n".join(output)
def lambda_handler(event, context):
client = boto3.client('route53')
response = client.list_hosted_zones()
zones = response['HostedZones']
output = []
for zone in zones:
zone_id = zone['Id'].split("/")[-1]
domain_name = zone['Name'].rstrip('.')
records_response = client.list_resource_record_sets(HostedZoneId=zone_id)
records = records_response['ResourceRecordSets']
# Generate BIND zone file for the domain
bind_file = generate_bind_zone_file(domain_name, records)
output.append({
"domain": domain_name,
"bind_file": bind_file
})
return {
"statusCode": 200,
"zones": output
}

View File

@@ -0,0 +1,103 @@
import boto3
import json
from datetime import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""
Sample for JSON Input for n8n call:
{
"backblaze_key_id": "BACKBLAZEAPPKEY",
"backblaze_key": "BACKBLAZESECRETKEY",
"backblaze_endpoint": "s3.REGION.backblazeb2.com",
"dest_bucket": "BACKBLAZEBUCKET",
"exclude_buckets": ["temp-bucket", "logs"]
}
"""
# Extract parameters from the event
backblaze_key_id = event.get('backblaze_key_id')
backblaze_key = event.get('backblaze_key')
backblaze_endpoint = event.get('backblaze_endpoint')
dest_bucket = event.get('dest_bucket')
exclude_buckets = event.get('exclude_buckets', [])
# Validate required parameters
if not backblaze_key_id or not backblaze_key or not backblaze_endpoint or not dest_bucket:
raise ValueError("Missing required parameters: backblaze_key_id, backblaze_key, backblaze_endpoint, dest_bucket")
logger.info(f"Starting backup to destination bucket: {dest_bucket}")
s3_client = boto3.client('s3')
# Backblaze B2 S3-compatible client
b2_client = boto3.client(
's3',
endpoint_url=f"https://{backblaze_endpoint}",
aws_access_key_id=backblaze_key_id,
aws_secret_access_key=backblaze_key,
region_name='us-west-004'
)
# Get list of all buckets, excluding if requested
all_buckets_resp = s3_client.list_buckets()
bucket_names = [b['Name'] for b in all_buckets_resp['Buckets'] if b['Name'] not in exclude_buckets]
timestamp = datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
total_copied = 0
for bucket_name in bucket_names:
logger.info(f"Backing up bucket: {bucket_name}")
paginator = s3_client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket_name)
copied_in_bucket = 0
for page in page_iterator:
if 'Contents' not in page:
logger.info(f"No objects found in bucket {bucket_name}")
continue
for obj in page['Contents']:
key = obj['Key']
source = {'Bucket': bucket_name, 'Key': key}
dest_key = f"{bucket_name}/{timestamp}/{key}"
try:
# Download the object content from source S3
obj_body = s3_client.get_object(Bucket=bucket_name, Key=key)['Body'].read()
# Upload to Backblaze B2 bucket
b2_client.put_object(
Bucket=dest_bucket,
Key=dest_key,
Body=obj_body,
Metadata={
'original-bucket': bucket_name,
'backup-timestamp': timestamp
}
)
copied_in_bucket += 1
total_copied += 1
if copied_in_bucket % 100 == 0:
logger.info(f"Copied {copied_in_bucket} objects from {bucket_name} so far")
except Exception as e:
logger.error(f"Error copying object {key} from bucket {bucket_name}: {str(e)}")
continue
logger.info(f"Completed bucket {bucket_name} backup: {copied_in_bucket} objects copied")
logger.info(f"Backup completed. Total objects copied: {total_copied}")
return {
'statusCode': 200,
'body': json.dumps({
'message': f'Backup completed for {len(bucket_names)} buckets to {dest_bucket}',
'total_objects_copied': total_copied,
'destination_bucket': dest_bucket
})
}

36
lambda_policy_addtag.json Normal file
View File

@@ -0,0 +1,36 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeSnapshots",
"ec2:DescribeVolumes",
"ec2:DescribeImages",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeAddresses",
"ec2:CreateTags"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"kms:ListAliases",
"kms:ListResourceTags",
"kms:TagResource"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource",
"route53:ChangeTagsForResource"
],
"Resource": "*"
}
]
}

View File

@@ -0,0 +1,13 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"backup:ListBackupJobs",
"backup:DescribeBackupJob"
],
"Resource": "*"
}
]
}

View File

@@ -0,0 +1,24 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ce:GetCostAndUsage",
"ce:GetCostAndUsageWithResources",
"ce:GetCostForecast",
"ce:GetDimensionValues",
"ce:GetReservationCoverage",
"ce:GetReservationPurchaseRecommendation",
"ce:GetReservationUtilization",
"ce:GetRightsizingRecommendation",
"ce:GetSavingsPlansCoverage",
"ce:GetSavingsPlansPurchaseRecommendation",
"ce:GetSavingsPlansUtilization",
"ce:GetTags",
"ce:GetUsageForecast"
],
"Resource": "*"
}
]
}

View File

@@ -0,0 +1,13 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:ListResourceRecordSets"
],
"Resource": "*"
}
]
}

View File

@@ -0,0 +1,25 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets",
"s3:GetBucketLocation"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:GetObjectVersion"
],
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}
]
}

4
manifest.json Normal file
View File

@@ -0,0 +1,4 @@
{
"version": "2.1.21",
"author": "Ivan Carlos"
}

View File

@@ -0,0 +1,22 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"lambda:ListFunctions",
"lambda:GetFunction",
"lambda:GetAccountSettings",
"lambda:ListTags"
],
"Resource": "*"
}
]
}