Skip to content

rs_workflows/dpr_flow.md

<< Back to index

DPR flow implementation

compute_eopf_origin_datetime(env, input_products)

Compute the maximum eopf:origin_datetime across all input products.

For each input product, this function retrieves the corresponding item from the catalog using its item ID and collection ID, extracts the eopf:origin_datetime property, and returns the latest (maximum) datetime value found.

If an item cannot be retrieved from the catalog, the error is logged and processing continues with the remaining products.

Parameters

env : object Execution environment object used to serialize and pass context to the catalog flow. input_products : Iterable[dict] Iterable of input product mappings. Each mapping is expected to contain values of the form (item_id, collection_id).

Returns

str ISO 8601 string representing the maximum eopf:origin_datetime found among all retrieved items. If no valid items are found, returns the fallback value "2023-01-01T00:00:00Z".

Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
def compute_eopf_origin_datetime(env, input_products) -> str:
    """
    Compute the maximum ``eopf:origin_datetime`` across all input products.

    For each input product, this function retrieves the corresponding item
    from the catalog using its item ID and collection ID, extracts the
    ``eopf:origin_datetime`` property, and returns the latest (maximum)
    datetime value found.

    If an item cannot be retrieved from the catalog, the error is logged
    and processing continues with the remaining products.

    Parameters
    ----------
    env : object
        Execution environment object used to serialize and pass context
        to the catalog flow.
    input_products : Iterable[dict]
        Iterable of input product mappings. Each mapping is expected to
        contain values of the form ``(item_id, collection_id)``.

    Returns
    -------
    str
        ISO 8601 string representing the maximum ``eopf:origin_datetime``
        found among all retrieved items. If no valid items are found,
        returns the fallback value ``"2023-01-01T00:00:00Z"``.
    """
    logger = get_run_logger()
    items = []
    if not input_products:
        logger.error("No valid input products found to compute eopf:origin_datetime. Exit")
        raise RuntimeError("No valid input products found to compute eopf:origin_datetime")

    for input_product in input_products:
        for _, (item_id, collection_id) in input_product.items():
            try:
                future = catalog_flow.get_item.submit(
                    env.serialize(),
                    collection_id,
                    item_id,
                )
                if not future.result():
                    logger.error(
                        f"Expected valid input product item {item_id} was not found"
                        " to compute eopf:origin_datetime. Exit",
                    )
                    raise RuntimeError(
                        f"Expected valid input product item {item_id} was not found" " to compute eopf:origin_datetime",
                    )

                items.append(future.result())
            except RuntimeError as rte:
                logger.exception(f"Failed to get item '{item_id}' from collection '{collection_id}'")
                raise RuntimeError("No valid items found to compute eopf:origin_datetime") from rte

    logger.info(f"Items matching input found in catalog: {len(items)}")

    max_eopf_datetime = max(
        datetime.datetime.fromisoformat(
            item.to_dict()["properties"]["eopf:origin_datetime"].replace("Z", "+00:00"),  # type: ignore
        )
        for item in items
    ).isoformat()

    logger.info(f"Maximum eopf datetime computed from all items is {max_eopf_datetime}")
    return max_eopf_datetime

create_stac_item(env, input_products, eopf_feature, s3_data_location, product_name, dpr_processor)

Create a list of STAC Items from EOPF features and processing payload metadata.

This function builds STAC Items compliant with EOPF constraints by: - Injecting EOPF-specific properties into each feature - Attaching output product assets - Propagating origin datetimes from input products

Parameters:

Name Type Description Default
eopf_features list[dict]

List of GeoJSON-like feature dictionaries.

required
s3_data_location str

Base S3 path where output products are stored.

required
product_name str

Product name

required
dpr_processor str

DPR processor name

required

Returns:

Type Description
Item

list[Item]: List of constructed STAC Item objects.

Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
def create_stac_item(
    env,
    input_products,
    eopf_feature,
    s3_data_location,
    product_name: str,
    dpr_processor: str,
) -> Item:
    """
    Create a list of STAC Items from EOPF features and processing payload metadata.

    This function builds STAC Items compliant with EOPF constraints by:
    - Injecting EOPF-specific properties into each feature
    - Attaching output product assets
    - Propagating origin datetimes from input products

    Args:
        eopf_features (list[dict]): List of GeoJSON-like feature dictionaries.
        s3_data_location (str): Base S3 path where output products are stored.
        product_name (str): Product name
        dpr_processor (str): DPR processor name

    Returns:
        list[Item]: List of constructed STAC Item objects.
    """

    def build_item(
        feature_dict: dict,
        eopf_origin_datetimes,
        product_name,
        dpr_processor: str,
        assets: dict[str, Asset],
    ) -> Item:
        """
        Build a STAC Item from a feature dictionary.

        This function mutates the feature dictionary by injecting mandatory
        EOPF and STAC properties before constructing the Item.

        Args:
            feature_dict (dict): GeoJSON-like feature dictionary.
            eopf_origin_datetimes (str | list[str]): Origin datetime(s) derived
                from input EOPF products.

        Returns:
            Item: A STAC Item populated with geometry, properties, and extensions.
        """
        feature_dict["properties"]["eopf:origin_datetime"] = eopf_origin_datetimes

        # C1.2 Ensure that all EOPF items have stac_version property set to "1.1.0"
        feature_dict["properties"]["stac_version"] = "1.1.0"

        # C1.3 Add stac_extensions following the list from the PRIP ICD §3.3.4
        # TODO: According to the 821 story, we have to:
        # - do not set stac_extension SAR for Sentinel-2 products "with instrument different from SRAL"
        # - do not set stac_extension SAR for Sentinel-3 products "with instrument different from SRAL"
        # Get in line with the story once clarified !
        stac_extensions: list[str] = []
        if dpr_processor == DprProcessor.S1L0.value:
            stac_extensions = [
                # TODO: We don't include the full list for now to avoid issues with catalog ingestion
                # This is because some extensions may require specific properties that are not properly
                # set by the DPR processor at this time.
                # "https://stac-extensions.github.io/sat/v1.1.0/schema.json",
                # "https://stac-extensions.github.io/processing/v1.2.0/schema.json",
                # "https://stac-extensions.github.io/product/v1.0.0/schema.json",
                # "https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
                # "https://stac-extensions.github.io/eo/v2.0.0/schema.json",
                # "https://stac-extensions.github.io/grid/v1.1.0/schema.json",
                # "https://stac-extensions.github.io/view/v1.1.0/schema.json",
                # "https://stac-extensions.github.io/sar/v1.3.0/schema.json",
                # "https://cs-si.github.io/eopf-stac-extension/v1.2.0/schema.json",
                # "https://stac-extensions.github.io/timestamps/v1.1.0/schema.json",
                # "https://stac-extensions.github.io/authentication/v1.1.0/schema.json",
            ]

        return Item(
            id=product_name,
            geometry=feature_dict["geometry"],
            bbox=feature_dict["bbox"],
            datetime=datetime.datetime.fromisoformat(feature_dict["properties"]["datetime"]),
            properties=feature_dict["properties"],
            stac_extensions=stac_extensions,
            assets=assets,
        )

    def build_asset(path: str, product_name: str) -> Asset:
        """
        Build a STAC Asset representing a Zarr output product.

        Args:
            path (str): Full path or URL to the asset.
            title (str): Human-readable asset title.

        Returns:
            Asset: A STAC Asset configured for EOPF output products.
        """
        return Asset(
            href=path.replace("/.zattrs", ""),
            title=product_name,
            media_type="application/vnd+zarr",
            roles=["data", "metadata"],
            # TODO: The story RSPY-280 is implemented in the catalog to fill the auth:ref field
            # extra_fields={
            #     "auth:ref": "should be filled thanks to story RSPY-280",
            # },
        )

    # C1.1 Add the property eopf:origin_datetime with value equal to the maximum
    # eopf:origin_datetime among all input products (excluding ADFS inputs)
    # Note: input_products != input_adfs
    if dpr_processor.lower() == "mockup":
        eopf_origin_datetime = "2026-01-01T00:00:00Z"
    else:
        eopf_origin_datetime = compute_eopf_origin_datetime(env, input_products)

    item = build_item(
        eopf_feature,
        eopf_origin_datetime,
        product_name,
        dpr_processor,
        assets={product_name: build_asset(s3_data_location, product_name)},
    )
    return item

extract_products_and_zattrs(files, base_path)

Extract product names and their corresponding .zattrs file paths.

Filters a list of file paths to find .zattrs files located directly under product directories within a base path, following the structure: base_path/product_name/.zattrs

Parameters:

Name Type Description Default
files list[str]

List of file paths to search through.

required
base_path str

The base directory path to strip from file paths.

required

Returns:

Type Description

A list of tuples, where each tuple contains: - product_name (str): The name of the product directory. - file (str): The full path to the .zattrs file.

Only includes files that are exactly two levels deep from the base_path

and have the filename '.zattrs'.

Example

files = [ ... "/data/products/product_a/.zattrs", ... "/data/products/product_b/.zattrs", ... "/data/products/product_c/subdir/file.txt" ... ] extract_products_and_zattrs(files, "/data/products") [('product_a', '/data/products/product_a/.zattrs'), ('product_b', '/data/products/product_b/.zattrs')]

Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def extract_products_and_zattrs(files: list[str], base_path: str):
    """Extract product names and their corresponding .zattrs file paths.

    Filters a list of file paths to find .zattrs files located directly under
    product directories within a base path, following the structure:
    base_path/product_name/.zattrs

    Args:
        files: List of file paths to search through.
        base_path: The base directory path to strip from file paths.

    Returns:
        A list of tuples, where each tuple contains:
            - product_name (str): The name of the product directory.
            - file (str): The full path to the .zattrs file.

        Only includes files that are exactly two levels deep from the base_path
        and have the filename '.zattrs'.

    Example:
        >>> files = [
        ...     "/data/products/product_a/.zattrs",
        ...     "/data/products/product_b/.zattrs",
        ...     "/data/products/product_c/subdir/file.txt"
        ... ]
        >>> extract_products_and_zattrs(files, "/data/products")
        [('product_a', '/data/products/product_a/.zattrs'),
         ('product_b', '/data/products/product_b/.zattrs')]
    """
    dirs_and_attrs = []

    for file in files:
        rest = file[len(base_path) :].lstrip("/")  # noqa: E203
        parts = rest.split("/")

        if len(parts) != 2:
            continue

        product_name = parts[0]

        # 1: base_path/product/.zattrs
        if parts[1] == ".zattrs":
            dirs_and_attrs.append((product_name, file))

    return dirs_and_attrs

read_zattrs_sync(path)

Download .zattrs file synchronously using prefect_utils.s3_download_file and return parsed JSON dicts in memory.

Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
 95
 96
 97
 98
 99
100
101
102
103
def read_zattrs_sync(path: str):
    """
    Download `.zattrs` file synchronously using prefect_utils.s3_download_file
    and return parsed JSON dicts in memory.
    """
    with tempfile.NamedTemporaryFile() as temp:
        s3_download_file_sync(path, str(temp.name), _sync=True)
        with open(temp.name, encoding="utf-8") as f:
            return json.load(f)

run_processor(env, processor, payload, cluster_info, s3_payload_run, input_products) async

Run the DPR processor.

Parameters:

Name Type Description Default
env FlowEnvArgs

Prefect flow environment

required
processor str

DPR processor name

required
s3_payload_run str

S3 bucket location of the output final DPR payload file.

required
Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
@task(name="Run DPR processor")
async def run_processor(
    env: FlowEnvArgs,
    processor: str,
    payload: PayloadSchema,
    cluster_info: ClusterInfo,
    s3_payload_run: str,
    input_products: list[dict],
) -> list[dict]:
    """
    Run the DPR processor.

    Args:
        env: Prefect flow environment
        processor: DPR processor name
        s3_payload_run: S3 bucket location of the output final DPR payload file.
    """
    logger = get_run_logger()

    # Init flow environment and opentelemetry span
    flow_env = FlowEnv(env)
    with flow_env.start_span(__name__, "run-processor"):
        if payload.io is None:
            raise ValueError("Payload I/O configuration is missing.")
        # First, remove the output products that are not final products from
        # the payload to avoid triggering the catalog registration for them
        # Create a temporary list for keeping track of products to keep
        kept_products = []

        # Iterate over the original products
        for prod in payload.io.output_products:
            if prod.final_product:
                kept_products.append(prod)
            else:
                logger.info(f"Output product {prod.id} is not marked as final_product, skipping catalog registration.")

        # Update the original output_products list with the kept products
        payload.io.output_products[:] = kept_products

        record_performance_indicators(  # type: ignore
            start_date=datetime.datetime.now(),
            status="OK",
            dpr_processing_input_stac_items=s3_payload_run,
            payload=payload,
            dpr_processor_name=processor,
        )
        # Trigger the processor run from the dpr service
        dpr_client: DprClient = flow_env.rs_client.get_dpr_client()
        start_time = time.time()
        s3_payload_dir = osp.dirname(s3_payload_run)
        s3_payload_filename = osp.basename(s3_payload_run)
        job_status = dpr_client.run_process(
            process=processor,
            cluster_info=cluster_info,
            s3_config_dir=s3_payload_dir,
            payload_subpath=s3_payload_filename,
            s3_report_dir=s3_payload_dir,
        )
        try:
            dpr_client.wait_for_job(job_status, logger, f"{processor!r} processor")
        finally:
            logger.info(f"Processor execution time: {str(timedelta(seconds=time.time() - start_time))}")

            # Download reports folder from the s3 bucket
            with tempfile.TemporaryDirectory() as tmpdir:
                await prefect_utils.s3_download_dir(s3_payload_dir, tmpdir)

                # Display here the log from eopf processors if it exists in the reports folder.
                # We search for a log file that shares the same name as the payload file, but
                # has the suffix ".processor.log". This approach is consistent with the current implementation
                # of the rs-dpr-service, which creates a subfolder named "reports" in the same directory as
                # the payload file. The processor log filename will be built by the rs-dpr-service
                # by using the same base name as the payload file, but with the addition of the
                # ".processor.log" suffix instead of ".yaml".
                local_log_file = osp.join(
                    tmpdir,
                    "reports",
                    Path(s3_payload_filename).with_suffix(".processor.log").name,
                )
                try:
                    async with await anyio.open_file(local_log_file, encoding="utf-8") as opened:
                        s3_log_file = osp.join(s3_payload_dir, osp.relpath(local_log_file, tmpdir))
                        logger.info(f"Log file {s3_log_file!r}:\n{await opened.read()}")
                except FileNotFoundError:
                    logger.info(f"No processor log file was uploaded under: {s3_payload_dir!r}")

        eopf_stac_items, eopf_types = update_eopf_assets(flow_env, input_products, payload, processor)
        # Wait for the job to finish
        record_performance_indicators(  # type: ignore
            stop_date=datetime.datetime.now(),
            status="OK",
            stac_items=eopf_stac_items,
            payload=payload,
            dpr_processor_name=processor,
            eopf_types=eopf_types,
        )
        return eopf_stac_items

s3_download_file_sync(s3_path, to_path, **download_kwargs)

Download a file from S3 synchronously.

Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
106
107
108
109
110
111
112
113
114
115
116
def s3_download_file_sync(
    s3_path: str,
    to_path: str | Path,
    **download_kwargs: Any,
) -> str | Path:
    """
    Download a file from S3 synchronously.
    """
    s3_bucket, from_path = prefect_utils.get_s3_bucket(s3_path)
    s3_bucket.download_object_to_path(from_path, str(to_path), **download_kwargs)
    return to_path

s3_list(s3_prefix)

List all S3 objects under a prefix without downloading.

Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
40
41
42
43
44
45
def s3_list(s3_prefix: str):
    """List all S3 objects under a prefix without downloading."""
    s3_bucket, prefix = prefect_utils.get_s3_bucket(s3_prefix)
    objects = s3_bucket._get_bucket_resource().objects  # pylint: disable=protected-access

    return [f"s3://{s3_bucket.bucket_name}/{obj.key}" for obj in objects.filter(Prefix=prefix.rstrip("/") + "/")]

update_eopf_assets(env, input_products, payload, dpr_processor)

Update EOPF assets by extracting metadata and creating STAC items.

This Prefect task processes output products from a DPR (Data Processing Request) workflow, extracts EOPF (Earth Observation Processing Framework) metadata from .zattrs files, and generates STAC (SpatioTemporal Asset Catalog) items for each discovered product.

Workflow
  1. Lists all .zattrs files in the output product paths
  2. Reads and validates EOPF discovery metadata from each .zattrs file
  3. Extracts product type information
  4. Creates corresponding STAC items for catalog registration

Parameters:

Name Type Description Default
env

Environment configuration object containing runtime settings.

required
input_products list[dict]

List of dictionaries representing input product metadata.

required
payload PayloadSchema

PayloadSchema object containing I/O configuration, including output product paths to scan for .zattrs files.

required
dpr_processor str

str DPR processor name

required

Returns:

Type Description
tuple[Any, Any]

A tuple containing: - stac_items (list): List of STAC items created from EOPF metadata. - eopf_types (list): List of product types extracted from the .zattrs files (corresponds to stac_items by index).

Raises:

Type Description
RuntimeError

If any .zattrs file cannot be read or does not contain required EOPF discovery metadata (stac_discovery.properties).

Notes
  • Requires .zattrs files to follow EOPF metadata conventions
  • Each .zattrs file must contain stac_discovery.properties.product:type
  • Uses S3 storage backend (via s3_list and read_zattrs_sync functions)
Source code in docs/rs-client-libraries/rs_workflows/dpr_flow.py
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
@task(name="Update eopf assets")
def update_eopf_assets(
    env,
    input_products: list[dict],
    payload: PayloadSchema,
    dpr_processor: str,
) -> tuple[Any, Any]:
    """Update EOPF assets by extracting metadata and creating STAC items.

    This Prefect task processes output products from a DPR (Data Processing Request)
    workflow, extracts EOPF (Earth Observation Processing Framework) metadata from
    .zattrs files, and generates STAC (SpatioTemporal Asset Catalog) items for
    each discovered product.

    Workflow:
        1. Lists all .zattrs files in the output product paths
        2. Reads and validates EOPF discovery metadata from each .zattrs file
        3. Extracts product type information
        4. Creates corresponding STAC items for catalog registration

    Args:
        env: Environment configuration object containing runtime settings.
        input_products: List of dictionaries representing input product metadata.
        payload: PayloadSchema object containing I/O configuration, including
            output product paths to scan for .zattrs files.
        dpr_processor: str
            DPR processor name

    Returns:
        A tuple containing:
            - stac_items (list): List of STAC items created from EOPF metadata.
            - eopf_types (list): List of product types extracted from the .zattrs
              files (corresponds to stac_items by index).

    Raises:
        RuntimeError: If any .zattrs file cannot be read or does not contain
            required EOPF discovery metadata (stac_discovery.properties).

    Notes:
        - Requires .zattrs files to follow EOPF metadata conventions
        - Each .zattrs file must contain stac_discovery.properties.product:type
        - Uses S3 storage backend (via s3_list and read_zattrs_sync functions)
    """
    logger = get_run_logger()
    logger.info("Starting EOPF asset update.")
    logger.info(f"Payload received: {payload}")
    logger.info("Input products: %s", input_products)

    if payload.io is None:
        raise RuntimeError("Payload I/O configuration is missing.")
    # Get all .zattrs files found in the output products paths
    zattrs_list = []
    for prod in payload.io.output_products:
        path = prod.path
        zattrs_list.extend(extract_products_and_zattrs(s3_list(path), path))
        logger.info(f"Product {prod.id} has been added to the list and will be published to the catalog.")

    # List & extract
    logger.info(f"Found {len(zattrs_list)} .zattrs files under path. The list: {zattrs_list}")

    stac_items = []
    eopf_types = []
    for product_name, zattrs_s3_location in zattrs_list:
        logger.info(f"Product = {product_name} | zattrs = {zattrs_s3_location}")
        # Read metadata
        zattrs_data = read_zattrs_sync(zattrs_s3_location)
        if not zattrs_data:
            logger.error(f"Could not read .zattrs file {zattrs_s3_location}. Exiting.")
            raise RuntimeError(f"Could not read .zattrs file {zattrs_s3_location}. Exiting.")
        logger.info(f"DPR processor output {zattrs_data}")

        # Extract EOPF info
        if "stac_discovery" not in zattrs_data or "properties" not in zattrs_data["stac_discovery"]:
            logger.error(f".zattrs file {zattrs_s3_location} does not contain EOPF discovery metadata. Exiting.")
            raise RuntimeError(f".zattrs file {zattrs_s3_location} does not contain EOPF discovery metadata. Exiting.")

        eopf_type = zattrs_data["stac_discovery"]["properties"].get("product:type", None)
        logger.info(f"Extracted EOPF product type: {eopf_type}")
        eopf_types.append(eopf_type)

        eopf_item = zattrs_data["stac_discovery"]
        logger.debug(f"EOPF discovery metadata extracted: {eopf_item}")

        # Build STAC items
        stac_items.append(
            create_stac_item(env, input_products, eopf_item, zattrs_s3_location, product_name, dpr_processor),
        )
        logger.info(f"Added one stac item to the already existing list. Length: {len(stac_items)}.")

    return stac_items, eopf_types