Update a Spark job’s state or configuration. The request body must include the jobId.
unknown, idle, starting, running, finishing, cancelling, finished, cancelled, error, skipped "finished"
The job run ID. This is distinct from id which is the name of the job configuration.
"hhzitrwancwv"
{
"type": "aggregation",
"id": "api-test-app_click_signals_aggregation",
"inputCollection": "api-test-app_signals",
"sourceCatchup": true,
"sourceRemove": false,
"sql": "WITH sigs_with_filters AS ( SELECT c.query as query, c.doc_id, q.filters_s as filters, c.type, c.ref_time, coalesce(c.count_i,1) as count_i, c.timestamp_tdt, greatest(coalesce(c.weight_d,0.1),0.0) as weight_d FROM api-test-app_signals c LEFT JOIN (SELECT id, filters_s FROM api-test-app_signals WHERE type='response') q ON q.id = c.fusion_query_id WHERE c.type IN ('click','cart','purchase') AND c.timestamp_tdt >= c.catchup_timestamp_tdt ), signal_type_groups AS ( SELECT SUM(count_i) AS typed_aggr_count_i, query, doc_id, type, filters, time_decay(count_i, timestamp_tdt, \"30 days\", ref_time, weight_d) AS typed_weight_d FROM sigs_with_filters GROUP BY doc_id, query, filters, type ) SELECT concat_ws('|', query, doc_id, filters) as id, SUM(typed_aggr_count_i) AS aggr_count_i, query AS query_s, query AS query_t, doc_id AS doc_id_s, filters AS filters_s, SPLIT(filters, ' \\\\$ ') AS filters_ss, weighted_sum(typed_weight_d, type, 'click:1.0,cart:10.0,purchase:25.0') AS weight_d FROM signal_type_groups GROUP BY query, doc_id, filters",
"rollupSql": "SELECT concat_ws('|', query_s, doc_id_s, filters_s) as id,\n query_s,\n query_s as query_t,\n doc_id_s,\n filters_s,\n first(aggr_type_s) AS aggr_type_s,\n SPLIT(filters_s, ' \\\\$ ') AS filters_ss,\n SUM(weight_d) AS weight_d,\n SUM(aggr_count_i) AS aggr_count_i\n FROM api-test-app_signals_aggr\n GROUP BY query_s, doc_id_s, filters_s",
"referenceTime": "2025-10-17T18:56:14.660Z",
"skipCheckEnabled": true,
"readOptions": [{ "key": "splits_per_shard", "value": "4" }],
"skipJobIfSignalsEmpty": true,
"parameters": [
{
"key": "signalTypeWeights",
"value": "click:1.0,cart:10.0,purchase:25.0"
},
{
"key": "signalTypes",
"value": "_regex/signalTypeWeights/([\\w\\-\\.]*):([\\d\\.\\-]*)(,|$)/'$1'$3/g"
}
],
"selectQuery": "*:*",
"outputCollection": "api-test-app_signals_aggr",
"useNaturalKey": true,
"optimizeSegments": 0,
"dataFormat": "solr",
"sparkSQL": "SELECT * from spark_input",
"sparkPartitions": 200
}"driver-api-test-app-click-signal-sjnamxwxsyhq"
{
"jobConfigId": "api-test-app_click_signals_aggregation",
"jobRunId": "sjnamxwxsyhq",
"aggrClass": "SQL",
"query": "WITH sigs_with_filters AS (SELECT c.query as query, c.doc_id, q.filters_s as filters, c.type, c.ref_time, coalesce(c.count_i,1) as count_i, c.timestamp_tdt, greatest(coalesce(c.weight_d,0.1),0.0) as weight_d FROM api_test_app_signals c LEFT JOIN (SELECT id, filters_s FROM api_test_app_signals WHERE type='response') q ON q.id = c.fusion_query_id WHERE c.type IN ('click','cart','purchase') AND c.timestamp_tdt >= c.catchup_timestamp_tdt ), signal_type_groups AS (SELECT SUM(count_i) AS typed_aggr_count_i, query, doc_id, type, filters, time_decay(count_i, timestamp_tdt, \"30 days\", ref_time, weight_d) AS typed_weight_d FROM sigs_with_filters GROUP BY doc_id, query, filters, type) SELECT concat_ws('|', query, doc_id, filters) as id, SUM(typed_aggr_count_i) AS aggr_count_i, query AS query_s, query AS query_t, doc_id AS doc_id_s, filters AS filters_s, SPLIT(filters, ' \\\\$ ') AS filters_ss, weighted_sum(typed_weight_d, type, 'click:1.0,cart:10.0,purchase:25.0') AS weight_d FROM signal_type_groups GROUP BY query, doc_id, filters",
"state": "finished",
"aggregated": 0,
"applicationId": "spark-0424dda9215441c0b232210af1d7cb67",
"podId": "driver-api-test-app-click-signal-sjnamxwxsyhq",
"aggr_type_s": "click@doc_id,filters,query"
}"2025-10-16T19:49:15.106Z"
"2025-10-16T19:49:42.276Z"
27170
OK