Skip to content

job

download_job_logs(job_id, user=Depends(current_superuser_act), db=Depends(get_async_db)) async

Download job folder

Source code in fractal_server/app/routes/admin/v2/job.py
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
@router.get("/{job_id}/download/", response_class=StreamingResponse)
async def download_job_logs(
    job_id: int,
    user: UserOAuth = Depends(current_superuser_act),
    db: AsyncSession = Depends(get_async_db),
) -> StreamingResponse:
    """
    Download job folder
    """
    # Get job from DB
    job = await db.get(JobV2, job_id)
    if job is None:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Job {job_id} not found",
        )
    # Create and return byte stream for zipped log folder
    PREFIX_ZIP = Path(job.working_dir).name
    zip_filename = f"{PREFIX_ZIP}_archive.zip"
    return StreamingResponse(
        _zip_folder_to_byte_stream_iterator(folder=job.working_dir),
        media_type="application/x-zip-compressed",
        headers={"Content-Disposition": f"attachment;filename={zip_filename}"},
    )

stop_job(job_id, user=Depends(current_superuser_act), db=Depends(get_async_db)) async

Stop execution of a workflow job.

Source code in fractal_server/app/routes/admin/v2/job.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
@router.get("/{job_id}/stop/", status_code=202)
async def stop_job(
    job_id: int,
    user: UserOAuth = Depends(current_superuser_act),
    db: AsyncSession = Depends(get_async_db),
) -> Response:
    """
    Stop execution of a workflow job.
    """

    _check_shutdown_is_supported()

    job = await db.get(JobV2, job_id)
    if job is None:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Job {job_id} not found",
        )

    _write_shutdown_file(job=job)

    return Response(status_code=status.HTTP_202_ACCEPTED)

update_job(job_update, job_id, user=Depends(current_superuser_act), db=Depends(get_async_db)) async

Change the status of an existing job.

This endpoint is only open to superusers, and it does not apply project-based access-control to jobs.

Source code in fractal_server/app/routes/admin/v2/job.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
@router.patch("/{job_id}/", response_model=JobReadV2)
async def update_job(
    job_update: JobUpdateV2,
    job_id: int,
    user: UserOAuth = Depends(current_superuser_act),
    db: AsyncSession = Depends(get_async_db),
) -> JobReadV2 | None:
    """
    Change the status of an existing job.

    This endpoint is only open to superusers, and it does not apply
    project-based access-control to jobs.
    """
    job = await db.get(JobV2, job_id)
    if job is None:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Job {job_id} not found",
        )
    if job.status != JobStatusTypeV2.SUBMITTED:
        raise HTTPException(
            status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
            detail=f"Job {job_id} has status {job.status=} != 'submitted'.",
        )

    if job_update.status != JobStatusTypeV2.FAILED:
        raise HTTPException(
            status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
            detail=f"Cannot set job status to {job_update.status}",
        )

    timestamp = get_timestamp()
    setattr(job, "status", job_update.status)
    setattr(job, "end_timestamp", timestamp)
    setattr(
        job,
        "log",
        f"{job.log or ''}\nThis job was manually marked as "
        f"'{JobStatusTypeV2.FAILED}' by an admin ({timestamp.isoformat()}).",
    )

    res = await db.execute(
        select(HistoryRun)
        .where(HistoryRun.job_id == job_id)
        .order_by(HistoryRun.timestamp_started.desc())
        .limit(1)
    )
    latest_run = res.scalar_one_or_none()
    if latest_run is not None:
        setattr(latest_run, "status", HistoryUnitStatus.FAILED)
        res = await db.execute(
            select(HistoryUnit).where(
                HistoryUnit.history_run_id == latest_run.id
            )
        )
        history_units = res.scalars().all()
        for history_unit in history_units:
            setattr(history_unit, "status", HistoryUnitStatus.FAILED)

    await db.commit()
    await db.refresh(job)
    await db.close()
    return job

view_job(id=None, user_id=None, project_id=None, dataset_id=None, workflow_id=None, status=None, start_timestamp_min=None, start_timestamp_max=None, end_timestamp_min=None, end_timestamp_max=None, log=True, pagination=Depends(get_pagination_params), user=Depends(current_superuser_act), db=Depends(get_async_db)) async

Query JobV2 table.

PARAMETER DESCRIPTION
id

If not None, select a given applyworkflow.id.

TYPE: int | None DEFAULT: None

project_id

If not None, select a given applyworkflow.project_id.

TYPE: int | None DEFAULT: None

dataset_id

If not None, select a given applyworkflow.input_dataset_id.

TYPE: int | None DEFAULT: None

workflow_id

If not None, select a given applyworkflow.workflow_id.

TYPE: int | None DEFAULT: None

status

If not None, select a given applyworkflow.status.

TYPE: JobStatusTypeV2 | None DEFAULT: None

start_timestamp_min

If not None, select a rows with start_timestamp after start_timestamp_min.

TYPE: AwareDatetime | None DEFAULT: None

start_timestamp_max

If not None, select a rows with start_timestamp before start_timestamp_min.

TYPE: AwareDatetime | None DEFAULT: None

end_timestamp_min

If not None, select a rows with end_timestamp after end_timestamp_min.

TYPE: AwareDatetime | None DEFAULT: None

end_timestamp_max

If not None, select a rows with end_timestamp before end_timestamp_min.

TYPE: AwareDatetime | None DEFAULT: None

log

If True, include job.log, if False job.log is set to None.

TYPE: bool DEFAULT: True

Source code in fractal_server/app/routes/admin/v2/job.py
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
@router.get("/", response_model=PaginationResponse[JobReadV2])
async def view_job(
    id: int | None = None,
    user_id: int | None = None,
    project_id: int | None = None,
    dataset_id: int | None = None,
    workflow_id: int | None = None,
    status: JobStatusTypeV2 | None = None,
    start_timestamp_min: AwareDatetime | None = None,
    start_timestamp_max: AwareDatetime | None = None,
    end_timestamp_min: AwareDatetime | None = None,
    end_timestamp_max: AwareDatetime | None = None,
    log: bool = True,
    pagination: PaginationRequest = Depends(get_pagination_params),
    user: UserOAuth = Depends(current_superuser_act),
    db: AsyncSession = Depends(get_async_db),
) -> PaginationResponse[JobReadV2]:
    """
    Query `JobV2` table.

    Args:
        id: If not `None`, select a given `applyworkflow.id`.
        project_id: If not `None`, select a given `applyworkflow.project_id`.
        dataset_id: If not `None`, select a given
            `applyworkflow.input_dataset_id`.
        workflow_id: If not `None`, select a given `applyworkflow.workflow_id`.
        status: If not `None`, select a given `applyworkflow.status`.
        start_timestamp_min: If not `None`, select a rows with
            `start_timestamp` after `start_timestamp_min`.
        start_timestamp_max: If not `None`, select a rows with
            `start_timestamp` before `start_timestamp_min`.
        end_timestamp_min: If not `None`, select a rows with `end_timestamp`
            after `end_timestamp_min`.
        end_timestamp_max: If not `None`, select a rows with `end_timestamp`
            before `end_timestamp_min`.
        log: If `True`, include `job.log`, if `False`
            `job.log` is set to `None`.
    """

    # Assign pagination parameters
    page = pagination.page
    page_size = pagination.page_size

    # Prepare statements
    stm = select(JobV2).order_by(JobV2.start_timestamp.desc())
    stm_count = select(func.count(JobV2.id))
    if id is not None:
        stm = stm.where(JobV2.id == id)
        stm_count = stm_count.where(JobV2.id == id)
    if user_id is not None:
        stm = stm.join(ProjectV2).where(
            ProjectV2.user_list.any(UserOAuth.id == user_id)
        )
        stm_count = stm_count.join(ProjectV2).where(
            ProjectV2.user_list.any(UserOAuth.id == user_id)
        )
    if project_id is not None:
        stm = stm.where(JobV2.project_id == project_id)
        stm_count = stm_count.where(JobV2.project_id == project_id)
    if dataset_id is not None:
        stm = stm.where(JobV2.dataset_id == dataset_id)
        stm_count = stm_count.where(JobV2.dataset_id == dataset_id)
    if workflow_id is not None:
        stm = stm.where(JobV2.workflow_id == workflow_id)
        stm_count = stm_count.where(JobV2.workflow_id == workflow_id)
    if status is not None:
        stm = stm.where(JobV2.status == status)
        stm_count = stm_count.where(JobV2.status == status)
    if start_timestamp_min is not None:
        stm = stm.where(JobV2.start_timestamp >= start_timestamp_min)
        stm_count = stm_count.where(
            JobV2.start_timestamp >= start_timestamp_min
        )
    if start_timestamp_max is not None:
        stm = stm.where(JobV2.start_timestamp <= start_timestamp_max)
        stm_count = stm_count.where(
            JobV2.start_timestamp <= start_timestamp_max
        )
    if end_timestamp_min is not None:
        stm = stm.where(JobV2.end_timestamp >= end_timestamp_min)
        stm_count = stm_count.where(JobV2.end_timestamp >= end_timestamp_min)
    if end_timestamp_max is not None:
        stm = stm.where(JobV2.end_timestamp <= end_timestamp_max)
        stm_count = stm_count.where(JobV2.end_timestamp <= end_timestamp_max)

    # Find total number of elements
    res_total_count = await db.execute(stm_count)
    total_count = res_total_count.scalar()
    if page_size is None:
        page_size = total_count
    else:
        stm = stm.offset((page - 1) * page_size).limit(page_size)

    # Get `page_size` rows
    res = await db.execute(stm)
    job_list = res.scalars().all()

    if not log:
        for job in job_list:
            setattr(job, "log", None)

    return PaginationResponse[JobReadV2](
        total_count=total_count,
        page_size=page_size,
        current_page=page,
        items=[job.model_dump() for job in job_list],
    )