Skip to content

Computational integrations

On top of exposing a web API, a Fractal instance must be integrated to at least one computational resource which is used mainly for two goals:

Supported integrations

The configuration variable FRACTAL_RUNNER_BACKEND determines which one of the three following modality is in-place:

  1. In a local instance, every computational operation (setting up task environments and executing tasks) is run by the machine user who is running fractal-server, and the instance typically only has a single user. Note: this integration is mostly supported for testing and development.

  2. A SLURM/sudo instance requires access to a SLURM cluster, with some additional assumptions - notably:

    • The user that runs fractal-server also needs sufficient permissions to impersonate other users for running jobs (e.g. via sudo -u some-user sbatch /some/submission-script.sh);
    • There must be a shared filesystem which both the user running fractal-server and other users have access to.
  3. A SLURM/SSH instance requires access to a SLURM cluster through SSH, by impersonating one or several service users.


The specific configuration for each computational resource is defined in the Resource database table, with the following creation schemas:

For each resource, there may be one or many computational profiles, with the following creation schemas:

Here are some minimal examples of how to configure resources and profiles in the three different cases:

Resource examples

{
    "type": "local",
    "name": "Local resource",
    "jobs_local_dir": "/somewhere/jobs",
    "jobs_runner_config": {
        "parallel_tasks_per_job": 1
    },
    "jobs_poll_interval": 0,
    "tasks_local_dir": "/somewhere/tasks",
    "tasks_python_config": {
        "default_version": "3.12",
        "versions": {
            "3.12": "/some-venv/bin/python"
        }
    },
    "tasks_pixi_config": {},
    "tasks_pip_cache_dir": null
}
{
    "type": "slurm_sudo",
    "name": "SLURM cluster",
    "jobs_local_dir": "/somewhere/local-jobs",
    "jobs_runner_config": {
        "default_slurm_config": {
            "partition": "partition-name",
            "cpus_per_task": 1,
            "mem": "100M"
        },
        "gpu_slurm_config": {
            "partition": "gpu",
            "extra_lines": [
                "#SBATCH --gres=gpu:v100:1"
            ]
        },
        "user_local_exports": {
            "CELLPOSE_LOCAL_MODELS_PATH": "CELLPOSE_LOCAL_MODELS_PATH",
            "NAPARI_CONFIG": "napari_config.json"
        },
        "batching_config": {
            "target_cpus_per_job": 1,
            "max_cpus_per_job": 1,
            "target_mem_per_job": 200,
            "max_mem_per_job": 500,
            "target_num_jobs": 2,
            "max_num_jobs": 4
        }
    },
    "jobs_slurm_python_worker": "/some/venv/bin/python3.12",
    "jobs_poll_interval": 10,
    "tasks_local_dir": "/somewhere/local-tasks",
    "tasks_python_config": {
        "default_version": "3.12",
        "versions": {
            "3.11": "/some/venv/bin/python3.11",
            "3.12": "/some/venv/bin/python3.12"
        }
    },
    "tasks_pixi_config": {},
    "tasks_pip_cache_dir": null
}
{
    "type": "slurm_ssh",
    "name": "Remote SLURM cluster",
    "host": "slurm-cluster.example.org",
    "jobs_local_dir": "/somewhere/local-jobs",
    "jobs_runner_config": {
        "default_slurm_config": {
            "partition": "partition-name",
            "cpus_per_task": 1,
            "mem": "100M"
        },
        "gpu_slurm_config": {
            "partition": "gpu",
            "extra_lines": [
                "#SBATCH --gres=gpu:v100:1"
            ]
        },
        "user_local_exports": {
            "CELLPOSE_LOCAL_MODELS_PATH": "CELLPOSE_LOCAL_MODELS_PATH",
            "NAPARI_CONFIG": "napari_config.json"
        },
        "batching_config": {
            "target_cpus_per_job": 1,
            "max_cpus_per_job": 1,
            "target_mem_per_job": 200,
            "max_mem_per_job": 500,
            "target_num_jobs": 2,
            "max_num_jobs": 4
        }
    },
    "jobs_slurm_python_worker": "/some/venv/bin/python3.12",
    "jobs_poll_interval": 10,
    "tasks_local_dir": "/somewhere/local-tasks",
    "tasks_python_config": {
        "default_version": "3.12",
        "versions": {
            "3.11": "/some/venv/bin/python3.11",
            "3.12": "/some/venv/bin/python3.12"
        }
    },
    "tasks_pixi_config": {},
    "tasks_pip_cache_dir": null
}

Profile examples

{
    "name": "Local profile",
    "resource_type": "local"
}
{
    "name": "SLURM/sudo profile",
    "resource_type": "slurm_sudo",
    "username": "slurm-username"
}
{
    "name": "SLURM/SSH profile",
    "resource_type": "slurm_ssh",
    "username": "slurm-username",
    "ssh_key_path": "/somewhere/private.key",
    "jobs_remote_dir": "/somewhere/jobs",
    "tasks_remote_dir": "/somewhere/tasks"
}