diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 068af94..03a8452 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -15,6 +15,7 @@ jobs: uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf with: dockerfile: Dockerfile + ignore: DL3008 - name: Set up Python uses: actions/setup-python@e9aba2c848f5ebd159c070c61ea2c4e2b122355e diff --git a/Dockerfile b/Dockerfile index e808508..aa9696f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ FROM python:${PYTHON_VERSION} # Install git RUN apt-get update && \ - apt-get install -y git ffmpeg libsm6 libxext6 && \ + apt-get install -y --no-install-recommends git ffmpeg libsm6 libxext6 && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/pipelines/deteccao_alagamento_cameras/__init__.py b/pipelines/deteccao_alagamento_cameras/__init__.py index 6bb53b9..6afa668 100644 --- a/pipelines/deteccao_alagamento_cameras/__init__.py +++ b/pipelines/deteccao_alagamento_cameras/__init__.py @@ -1 +1,2 @@ -from pipelines.deteccao_alagamento_cameras.flooding_detection.flows import * +# -*- coding: utf-8 -*- +from pipelines.deteccao_alagamento_cameras.flooding_detection.flows import * # noqa diff --git a/pipelines/deteccao_alagamento_cameras/flooding_detection/tasks.py b/pipelines/deteccao_alagamento_cameras/flooding_detection/tasks.py index 79d1cc6..72a012d 100644 --- a/pipelines/deteccao_alagamento_cameras/flooding_detection/tasks.py +++ b/pipelines/deteccao_alagamento_cameras/flooding_detection/tasks.py @@ -217,7 +217,7 @@ def get_prediction( ) responses.resolve() - if type(responses) == tuple: + if isinstance(responses, tuple): responses = responses[0] json_string = responses.text.replace("```json\n", "").replace("\n```", "") label = json.loads(json_string)["label"] diff --git a/pipelines/flows.py b/pipelines/flows.py index 9b7883d..066583f 100644 --- a/pipelines/flows.py +++ b/pipelines/flows.py @@ -4,5 +4,6 @@ """ from pipelines.deteccao_alagamento_cameras import * # noqa from pipelines.exemplo import * # noqa +from pipelines.lgpd import * # noqa from pipelines.stress import * # noqa from pipelines.templates import * # noqa diff --git a/pipelines/lgpd/__init__.py b/pipelines/lgpd/__init__.py new file mode 100644 index 0000000..71692e0 --- /dev/null +++ b/pipelines/lgpd/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from pipelines.lgpd.tables_bindings.flows import * # noqa diff --git a/pipelines/lgpd/tables_bindings/__init__.py b/pipelines/lgpd/tables_bindings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pipelines/lgpd/tables_bindings/flows.py b/pipelines/lgpd/tables_bindings/flows.py new file mode 100644 index 0000000..18150e5 --- /dev/null +++ b/pipelines/lgpd/tables_bindings/flows.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +from prefect import Parameter +from prefect.run_configs import KubernetesRun +from prefect.storage import GCS +from prefect.utilities.edges import unmapped +from prefeitura_rio.pipelines_utils.custom import Flow +from prefeitura_rio.pipelines_utils.state_handlers import handler_inject_bd_credentials + +from pipelines.constants import constants +from pipelines.lgpd.tables_bindings.schedules import update_tables_bindings_schedule +from pipelines.lgpd.tables_bindings.tasks import ( + get_project_tables_iam_policies, + list_projects, + merge_dataframes, + upload_dataframe_to_gsheets, +) + +with Flow( + name="LGPD - Lista de permissões de acesso a tabelas do BigQuery", + state_handlers=[handler_inject_bd_credentials], + skip_if_running=True, + parallelism=5, +) as rj_escritorio__lgpd__tables_bindings__flow: + # Parameters + credentials_secret_name = Parameter("credentials_secret_name") + sheet_name = Parameter("sheet_name") + spreadsheet_url = Parameter("spreadsheet_url") + + # Flow + project_ids = list_projects(credentials_secret_name=credentials_secret_name) + iam_policies_dataframes = get_project_tables_iam_policies.map( + project_id=project_ids, credentials_secret_name=unmapped(credentials_secret_name) + ) + merged_dataframe = merge_dataframes(dfs=iam_policies_dataframes) + upload_dataframe_to_gsheets( + dataframe=merged_dataframe, + spreadsheet_url=spreadsheet_url, + sheet_name=sheet_name, + credentials_secret_name=credentials_secret_name, + ) + + +rj_escritorio__lgpd__tables_bindings__flow.storage = GCS(constants.GCS_FLOWS_BUCKET.value) +rj_escritorio__lgpd__tables_bindings__flow.run_config = KubernetesRun( + image=constants.DOCKER_IMAGE.value, + labels=[constants.RJ_ESCRITORIO_AGENT_LABEL.value], +) +rj_escritorio__lgpd__tables_bindings__flow.schedule = update_tables_bindings_schedule diff --git a/pipelines/lgpd/tables_bindings/schedules.py b/pipelines/lgpd/tables_bindings/schedules.py new file mode 100644 index 0000000..a4ca28a --- /dev/null +++ b/pipelines/lgpd/tables_bindings/schedules.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +from datetime import datetime, timedelta + +import pytz +from prefect.schedules import Schedule +from prefect.schedules.clocks import IntervalClock + +from pipelines.constants import constants + +update_tables_bindings_schedule = Schedule( + clocks=[ + IntervalClock( + interval=timedelta(days=1), + start_date=datetime(2023, 1, 1, tzinfo=pytz.timezone("America/Sao_Paulo")), + labels=[ + constants.RJ_ESCRITORIO_AGENT_LABEL.value, + ], + parameter_defaults={ + "credentials_secret_name": "LGPD_SERVICE_ACCOUNT_B64", + "sheet_name": "tables_bindings", + "spreadsheet_url": "https://docs.google.com/spreadsheets/d/16gVrhfwMl1TUZ_jbWdNKw1xcNMUTNwtkJCfg7nW52go/edit#gid=0", # noqa + }, + ), + ] +) diff --git a/pipelines/lgpd/tables_bindings/tasks.py b/pipelines/lgpd/tables_bindings/tasks.py new file mode 100644 index 0000000..02b6b41 --- /dev/null +++ b/pipelines/lgpd/tables_bindings/tasks.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- +from typing import List + +import gspread +import pandas as pd +from google.cloud import asset +from googleapiclient import discovery +from prefect import task +from prefeitura_rio.pipelines_utils.logging import log + +from pipelines.lgpd.tables_bindings.utils import ( + batch_get_effective_iam_policies, + get_gcp_credentials, + list_tables, + merge_dataframes_fn, + write_data_to_gsheets, +) + + +@task +def list_projects(credentials_secret_name: str) -> List[str]: + """ + Lists all GCP projects that we have access to. + + Args: + mode: Credentials mode. + exclude_dev: Exclude projects that ends with "-dev". + + Returns: + List of project IDs. + """ + credentials = get_gcp_credentials(secret_name=credentials_secret_name) + service = discovery.build("cloudresourcemanager", "v1", credentials=credentials) + request = service.projects().list() + projects = [] + while request is not None: + response = request.execute() + for project in response.get("projects", []): + project_id = project["projectId"] + log(f"Found project {project_id}.") + projects.append(project_id) + request = service.projects().list_next(previous_request=request, previous_response=response) + log(f"Found {len(projects)} projects.") + return projects + + +@task +def get_project_tables_iam_policies(project_id: str, credentials_secret_name: str) -> pd.DataFrame: + """ + Get IAM policies for a list of tables in a given project. + + Args: + project_id (str): The project ID. + + Returns: + pd.DataFrame: A DataFrame with the IAM policies for the given tables. The dataframe contains + the following columns: + - project_id: The project ID. + - dataset_id: The dataset ID. + - table_id: The table ID. + - attached_resource: The resource to which the policy is attached. + - role: The role for the binding. + - member: The member for the binding. + """ + credentials = get_gcp_credentials(secret_name=credentials_secret_name) + tables = list_tables(project_id, credentials) + log(f"Found {len(tables)} tables in project {project_id}.") + client = asset.AssetServiceClient(credentials=credentials) + scope = f"projects/{project_id}" + # Split tables in batches of 20 (maximum allowed by the API) + tables_batches = [tables[i : i + 20] for i in range(0, len(tables), 20)] # noqa + dfs = [] + for i, table_batch in enumerate(tables_batches): + log( + f"Getting IAM policies for batch {i + 1}/{len(tables_batches)} (project_id={project_id})." # noqa + ) + df_batch = batch_get_effective_iam_policies(client=client, scope=scope, names=table_batch) + dfs.append(df_batch) + if len(dfs) == 0: + log(f"No IAM policies found for project {project_id}.") + return pd.DataFrame() + df = merge_dataframes_fn(dfs) + log(f"Found {len(df)} IAM policies for project {project_id}.") + return df + + +@task +def merge_dataframes(dfs: List[pd.DataFrame]) -> pd.DataFrame: + """ + Merge a list of DataFrames into a single DataFrame. + + Args: + dfs (List[pd.DataFrame]): The DataFrames to merge. + + Returns: + pd.DataFrame: The merged DataFrame. + """ + log(f"Merging {len(dfs)} DataFrames.") + return merge_dataframes_fn(dfs) + + +@task +def upload_dataframe_to_gsheets( + dataframe: pd.DataFrame, spreadsheet_url: str, sheet_name: str, credentials_secret_name: str +) -> None: + """ + Update a Google Sheets spreadsheet with a DataFrame. + + Args: + dataframe: Pandas DataFrame. + spreadsheet_url: Google Sheets spreadsheet URL. + sheet_name: Google Sheets sheet name. + """ + # Get gspread client + credentials = get_gcp_credentials( + secret_name=credentials_secret_name, + scopes=[ + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/drive", + ], + ) + gspread_client = gspread.authorize(credentials) + # Open spreadsheet + log(f"Opening Google Sheets spreadsheet {spreadsheet_url} with sheet {sheet_name}.") + sheet = gspread_client.open_by_url(spreadsheet_url) + worksheet = sheet.worksheet(sheet_name) + # Update spreadsheet + log("Deleting old data.") + worksheet.clear() + log("Rewriting headers.") + write_data_to_gsheets( + worksheet=worksheet, + data=[dataframe.columns.tolist()], + ) + log("Updating new data.") + write_data_to_gsheets( + worksheet=worksheet, + data=dataframe.values.tolist(), + start_cell="A2", + ) + # Add filters + log("Adding filters.") + first_col = "A" + last_col = chr(ord(first_col) + len(dataframe.columns) - 1) + worksheet.set_basic_filter(f"{first_col}:{last_col}") + # Resize columns + log("Resizing columns.") + worksheet.columns_auto_resize(0, len(dataframe.columns) - 1) + log("Done.") diff --git a/pipelines/lgpd/tables_bindings/utils.py b/pipelines/lgpd/tables_bindings/utils.py new file mode 100644 index 0000000..29d6bd5 --- /dev/null +++ b/pipelines/lgpd/tables_bindings/utils.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +import base64 +import json +from time import sleep +from typing import Any, List, Tuple + +import pandas as pd +from google.api_core.exceptions import FailedPrecondition, NotFound, ResourceExhausted +from google.cloud import asset, bigquery +from google.cloud.asset_v1.types.asset_service import ( + BatchGetEffectiveIamPoliciesResponse, +) +from google.oauth2 import service_account +from gspread.worksheet import Worksheet +from prefeitura_rio.pipelines_utils.infisical import get_secret +from prefeitura_rio.pipelines_utils.logging import log + + +def batch_get_effective_iam_policies( + client: asset.AssetServiceClient, + scope: str, + names: List[str], + retry_delay: float = 7.5, + backoff_factor: float = 2, + max_retries: int = 5, +) -> pd.DataFrame: + """ + Batch get effective IAM policies. + + Args: + scope: The scope. + names: The names. + retry_delay: The retry delay. + backoff_factor: The backoff factor for exponential backoff. + max_retries: The maximum number of retries. + + Returns: + pd.DataFrame: A DataFrame with the IAM policies for the given tables. The dataframe contains + the following columns: + - project_id: The project ID. + - dataset_id: The dataset ID. + - table_id: The table ID. + - attached_resource: The resource to which the policy is attached. + - role: The role for the binding. + - member: The member for the binding. + """ + success = False + retries = 0 + while not success and retries < max_retries: + try: + request = asset.BatchGetEffectiveIamPoliciesRequest(scope=scope, names=names) + response = client.batch_get_effective_iam_policies(request=request) + return build_dataframe_from_batch_get_effective_iam_policies_response(response) + except FailedPrecondition as exc: + # This is a quota issue. We should wait and retry. + log( + f"Reached API quota. Retrying in {retry_delay * (backoff_factor**retries)} seconds." + ) + sleep(retry_delay * (backoff_factor**retries)) + retries += 1 + if retries >= max_retries: + raise FailedPrecondition( + f"Failed to get effective IAM policies after {max_retries} attempts." + ) from exc + except ResourceExhausted as exc: + # This is a quota issue. We should wait and retry. + log( + f"Reached API quota. Retrying in {retry_delay * (backoff_factor**retries)} seconds." + ) + sleep(retry_delay * (backoff_factor**retries)) + retries += 1 + if retries >= max_retries: + raise ResourceExhausted( + f"Failed to get effective IAM policies after {max_retries} attempts." + ) from exc + except NotFound: + # A resource was not found. We must handle the situation this way: + # - If len(names) > 1, we must split the list in half and call the function recursively. + # - If len(names) == 1, we must log the error and return an empty response. + if len(names) > 1: + log( + f"Some resources were not found. Splitting the list (size={len(names)}) and retrying." # noqa + ) + half = len(names) // 2 + left = names[:half] + right = names[half:] + left_df = batch_get_effective_iam_policies(client=client, scope=scope, names=left) + right_df = batch_get_effective_iam_policies(client=client, scope=scope, names=right) + return merge_dataframes_fn([left_df, right_df]) + else: + log(f"Resource {names[0]} not found. Skipping.", level="warning") + return pd.DataFrame() + + +def build_dataframe_from_batch_get_effective_iam_policies_response( + response: BatchGetEffectiveIamPoliciesResponse, +) -> pd.DataFrame: + """ + Build a DataFrame from a BatchGetEffectiveIamPoliciesResponse. + + Args: + response: The response. + + Returns: + pd.DataFrame: A DataFrame with the IAM policies for the given tables. The dataframe contains + the following columns: + - project_id: The project ID. + - dataset_id: The dataset ID. + - table_id: The table ID. + - attached_resource: The resource to which the policy is attached. + - role: The role for the binding. + - member: The member for the binding. + """ + policies = [] + for policy_result in response.policy_results: + project_id, dataset_id, table_id = parse_table_name(policy_result.full_resource_name) + for policy_info in policy_result.policies: + attached_resource = policy_info.attached_resource + policy = policy_info.policy + for binding in policy.bindings: + role = binding.role + for member in binding.members: + policies.append( + { + "project_id": project_id, + "dataset_id": dataset_id, + "table_id": table_id, + "attached_resource": attached_resource, + "role": role, + "member": member, + } + ) + return pd.DataFrame(policies) + + +def get_gcp_credentials(secret_name: str, scopes: List[str] = None) -> service_account.Credentials: + """ + Get GCP credentials from a secret. + + Args: + secret_name: The secret name. + scopes: The scopes to use. + + Returns: + service_account.Credentials: The GCP credentials. + """ + secret = get_secret(secret_name)[secret_name] + info = json.loads(base64.b64decode(secret)) + credentials = service_account.Credentials.from_service_account_info(info) + if scopes: + credentials = credentials.with_scopes(scopes) + return credentials + + +def list_tables(project_id: str, credentials: service_account.Credentials) -> List[str]: + """List all tables in a given project. The output is a list of strings in the format + `//bigquery.googleapis.com/projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`. + """ + client = bigquery.Client(project=project_id, credentials=credentials) + datasets = client.list_datasets() + tables = [] + + for dataset in datasets: + dataset_ref = client.dataset(dataset.dataset_id) + dataset_tables = client.list_tables(dataset_ref) + for table in dataset_tables: + tables.append( + f"//bigquery.googleapis.com/projects/{project_id}/datasets/{table.dataset_id}/tables/{table.table_id}" # noqa + ) + + return tables + + +def merge_dataframes_fn(dfs: List[pd.DataFrame]) -> pd.DataFrame: + """ + Merge a list of DataFrames into a single DataFrame. + + Args: + dfs (List[pd.DataFrame]): The DataFrames to merge. + + Returns: + pd.DataFrame: The merged DataFrame. + """ + log(f"Merging {len(dfs)} DataFrames.") + return pd.concat(dfs, ignore_index=True) + + +def parse_table_name(table: str) -> Tuple[str, str, str]: + """ + Parse a table name from the format + `//bigquery.googleapis.com/projects/{project_id}/datasets/{dataset_id}/tables/{table_id}` + to a tuple with the project_id, dataset_id and table_id. + + Args: + table (str): The table name. + + Returns: + Tuple[str, str, str]: A tuple with the project_id, dataset_id and table_id. + """ + table = table.lstrip("//bigquery.googleapis.com/projects/") + parts = table.split("/") + project_id = parts[0] + dataset_id = parts[2] + table_id = parts[4] + return project_id, dataset_id, table_id + + +def write_data_to_gsheets(worksheet: Worksheet, data: List[List[Any]], start_cell: str = "A1"): + """ + Write data to a Google Sheets worksheet. + + Args: + worksheet: Google Sheets worksheet. + data: List of lists of data. + start_cell: Cell to start writing data. + """ + try: + start_letter = start_cell[0] + start_row = int(start_cell[1:]) + except ValueError as exc: + raise ValueError("Invalid start_cell. Please use a cell like A1.") from exc + cols_len = len(data[0]) + rows_len = len(data) + end_letter = chr(ord(start_letter) + cols_len - 1) + if end_letter not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": + raise ValueError("Too many columns. Please refactor this code.") + end_row = start_row + rows_len - 1 + range_name = f"{start_letter}{start_row}:{end_letter}{end_row}" + worksheet.update(range_name, data) diff --git a/poetry.lock b/poetry.lock index 0700de0..9b2ff4a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "agate" @@ -1072,6 +1072,42 @@ requests-oauthlib = ">=0.7.0" [package.extras] tool = ["click (>=6.0.0)"] +[[package]] +name = "google-cloud-access-context-manager" +version = "0.2.0" +description = "Google Cloud Access Context Manager Protobufs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-access-context-manager-0.2.0.tar.gz", hash = "sha256:a5b424312c084b02b6f98c1ebfb6af28132fc01e5d719817fa499e78c87e04b7"}, + {file = "google_cloud_access_context_manager-0.2.0-py2.py3-none-any.whl", hash = "sha256:7a23d2d08facb6f1d4b5456cce82144235575c6ccc6b6c6d9400de6cf23966d3"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "google-cloud-asset" +version = "3.24.1" +description = "Google Cloud Asset API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-asset-3.24.1.tar.gz", hash = "sha256:68d4c20ea8ffd3faa6e20c19ac82ab9f6ca180ab21bf55f01a51dde338733202"}, + {file = "google_cloud_asset-3.24.1-py2.py3-none-any.whl", hash = "sha256:01282a72d4f05b519553bffd13ad8f8cd90f99ccc488cc207a60cab54f110c33"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-access-context-manager = ">=0.1.2,<1.0.0dev" +google-cloud-org-policy = ">=0.1.2,<2.0.0dev" +google-cloud-os-config = ">=1.0.0,<2.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + [[package]] name = "google-cloud-bigquery" version = "3.17.1" @@ -1175,6 +1211,39 @@ grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" proto-plus = ">=1.22.3,<2.0.0dev" protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" +[[package]] +name = "google-cloud-org-policy" +version = "1.10.0" +description = "Google Cloud Org Policy API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-org-policy-1.10.0.tar.gz", hash = "sha256:3e7274d5f32c4da52ea60688d56e66227dfd840dcd01518674bcb05437130d16"}, + {file = "google_cloud_org_policy-1.10.0-py2.py3-none-any.whl", hash = "sha256:e2c9fbddbd538ac9ac34173a1add46e5702694a467c8c794532fe3ba96219e17"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "google-cloud-os-config" +version = "1.17.1" +description = "Google Cloud Os Config API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-os-config-1.17.1.tar.gz", hash = "sha256:d035e8da1da0a8ed73e6ab94c9623541bfc235aab2cb548d8a7c9a3f2456b91d"}, + {file = "google_cloud_os_config-1.17.1-py2.py3-none-any.whl", hash = "sha256:99bb9b0f0c7c758c582992557f2e27d5225797731f91ce7e9353ab24c1afd141"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + [[package]] name = "google-cloud-storage" version = "2.14.0" @@ -2268,11 +2337,8 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.21.2", markers = "python_version >= \"3.10\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, - {version = ">=1.19.3", markers = "python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\" or python_version >= \"3.9\""}, - {version = ">=1.17.0", markers = "python_version >= \"3.7\""}, - {version = ">=1.17.3", markers = "python_version >= \"3.8\""}, + {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, ] [[package]] @@ -3089,7 +3155,6 @@ files = [ {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8729dbf25eb32ad0dc0b9bd5e6a0d0b7e5c2dc8ec06ad171088e1896b522a74"}, {file = "pymongo-4.6.1-cp312-cp312-win32.whl", hash = "sha256:3177f783ae7e08aaf7b2802e0df4e4b13903520e8380915e6337cdc7a6ff01d8"}, {file = "pymongo-4.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:00c199e1c593e2c8b033136d7a08f0c376452bac8a896c923fcd6f419e07bdd2"}, - {file = "pymongo-4.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6dcc95f4bb9ed793714b43f4f23a7b0c57e4ef47414162297d6f650213512c19"}, {file = "pymongo-4.6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:13552ca505366df74e3e2f0a4f27c363928f3dff0eef9f281eb81af7f29bc3c5"}, {file = "pymongo-4.6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:77e0df59b1a4994ad30c6d746992ae887f9756a43fc25dec2db515d94cf0222d"}, {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3a7f02a58a0c2912734105e05dedbee4f7507e6f1bd132ebad520be0b11d46fd"}, @@ -4316,4 +4381,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.11" -content-hash = "57023a65747d287d458d750c7280d0f683488f09ff91a80d07c1e0b163a7a621" +content-hash = "a1ec4b54714eaa4e7b79abcc4704202a5818b0d7a0b9a96bb2cb6e9d003d356d" diff --git a/pyproject.toml b/pyproject.toml index fca48a1..495d531 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ requests = "^2.31.0" pillow = "^10.1.0" shapely = "^2.0.2" h3 = "^3.7.6" +google-cloud-asset = "^3.24.1" [tool.poetry.group.dev] optional = true