Merge pull request #2944 from sronsse/fixes_for_team

Queue and download/extract related fixes
This commit is contained in:
Twinaphex 2016-05-05 02:56:28 +02:00
commit cc0f8160b5
5 changed files with 91 additions and 41 deletions

View File

@ -113,6 +113,35 @@ static retro_task_t *task_queue_get(task_queue_t *queue)
return task; return task;
} }
static void task_queue_remove(task_queue_t *queue, retro_task_t *task)
{
retro_task_t *t;
/* Remove first element if needed */
if (task == queue->front)
{
queue->front = task->next;
task->next = NULL;
return;
}
/* Parse queue */
t = queue->front;
while (t && t->next)
{
/* Remove task and update queue */
if (t->next == task)
{
t->next = task->next;
task->next = NULL;
break;
}
/* Update iterator */
t = t->next;
}
}
static void retro_task_internal_gather(void) static void retro_task_internal_gather(void)
{ {
retro_task_t *task = NULL; retro_task_t *task = NULL;
@ -292,24 +321,17 @@ static void threaded_worker(void *userdata)
for (;;) for (;;)
{ {
retro_task_t *queue = NULL;
retro_task_t *task = NULL; retro_task_t *task = NULL;
retro_task_t *next = NULL; retro_task_t *next = NULL;
/* pop all into a local queue,
* tasks are in the reverse order here. */
slock_lock(running_lock);
if (!worker_continue) if (!worker_continue)
break; /* should we keep running until all tasks finished? */ break; /* should we keep running until all tasks finished? */
while ((task = task_queue_get(&tasks_running)) != NULL) slock_lock(running_lock);
{
task->next = queue;
queue = task;
}
if (queue == NULL) /* no tasks running, lets wait a bit */ /* Get first task to run */
task = tasks_running.front;
if (task == NULL)
{ {
scond_wait(worker_cond, running_lock); scond_wait(worker_cond, running_lock);
slock_unlock(running_lock); slock_unlock(running_lock);
@ -318,21 +340,26 @@ static void threaded_worker(void *userdata)
slock_unlock(running_lock); slock_unlock(running_lock);
for (task = queue; task; task = next) task->handler(task);
{
next = task->next;
task->handler(task);
if (task->finished) slock_lock(running_lock);
{ task_queue_remove(&tasks_running, task);
slock_lock(finished_lock); slock_unlock(running_lock);
task_queue_put(&tasks_finished, task);
slock_unlock(finished_lock); /* Update queue */
} if (!task->finished)
else {
retro_task_threaded_push_running(task); /* Re-add task to running queue */
retro_task_threaded_push_running(task);
} }
else
{
/* Add task to finished queue */
slock_lock(finished_lock);
task_queue_put(&tasks_finished, task);
slock_unlock(finished_lock);
}
retro_sleep(10); retro_sleep(10);
} }

View File

@ -1379,6 +1379,19 @@ static void cb_generic_download(void *task_data,
fill_pathname_join(output_path, dir_path, fill_pathname_join(output_path, dir_path,
transf->path, sizeof(output_path)); transf->path, sizeof(output_path));
#ifdef HAVE_ZLIB
file_ext = path_get_extension(output_path);
if (string_is_equal_noncase(file_ext, "zip"))
{
if (rarch_task_check_decompress(output_path))
{
err = "Decompression already in progress.";
goto finish;
}
}
#endif
if (!filestream_write_file(output_path, data->data, data->len)) if (!filestream_write_file(output_path, data->data, data->len))
{ {
err = "Write failed."; err = "Write failed.";
@ -1386,8 +1399,6 @@ static void cb_generic_download(void *task_data,
} }
#ifdef HAVE_ZLIB #ifdef HAVE_ZLIB
file_ext = path_get_extension(output_path);
if (!settings->network.buildbot_auto_extract_archive) if (!settings->network.buildbot_auto_extract_archive)
goto finish; goto finish;

View File

@ -226,6 +226,18 @@ static bool rarch_task_decompress_finder(
return string_is_equal(dec->source_file, (const char*)user_data); return string_is_equal(dec->source_file, (const char*)user_data);
} }
bool rarch_task_check_decompress(const char *source_file)
{
task_finder_data_t find_data;
/* Prepare find parameters */
find_data.func = rarch_task_decompress_finder;
find_data.userdata = (void *)source_file;
/* Return whether decompressing is in progress or not */
return task_queue_ctl(TASK_QUEUE_CTL_FIND, &find_data);
}
bool rarch_task_push_decompress( bool rarch_task_push_decompress(
const char *source_file, const char *source_file,
const char *target_dir, const char *target_dir,
@ -235,7 +247,6 @@ bool rarch_task_push_decompress(
retro_task_callback_t cb, retro_task_callback_t cb,
void *user_data) void *user_data)
{ {
task_finder_data_t find_data;
char tmp[PATH_MAX_LENGTH]; char tmp[PATH_MAX_LENGTH];
decompress_state_t *s = NULL; decompress_state_t *s = NULL;
retro_task_t *t = NULL; retro_task_t *t = NULL;
@ -263,10 +274,7 @@ bool rarch_task_push_decompress(
if (!valid_ext || !valid_ext[0]) if (!valid_ext || !valid_ext[0])
valid_ext = NULL; valid_ext = NULL;
find_data.func = rarch_task_decompress_finder; if (rarch_task_check_decompress(source_file))
find_data.userdata = (void*)source_file;
if (task_queue_ctl(TASK_QUEUE_CTL_FIND, &find_data))
{ {
RARCH_LOG("[decompress] File '%s' already being decompressed.\n", RARCH_LOG("[decompress] File '%s' already being decompressed.\n",
source_file); source_file);

View File

@ -45,6 +45,7 @@ typedef struct http_handle
struct http_connection_t *handle; struct http_connection_t *handle;
transfer_cb_t cb; transfer_cb_t cb;
char elem1[PATH_MAX_LENGTH]; char elem1[PATH_MAX_LENGTH];
char url[PATH_MAX_LENGTH];
} connection; } connection;
struct http_t *handle; struct http_t *handle;
transfer_cb_t cb; transfer_cb_t cb;
@ -192,20 +193,19 @@ task_finished:
static bool rarch_task_http_finder(retro_task_t *task, void *user_data) static bool rarch_task_http_finder(retro_task_t *task, void *user_data)
{ {
http_handle_t *http = (http_handle_t*)task->state; http_handle_t *http;
const char *handle_url = NULL;
if ( !http || !user_data || if (!task || (task->handler != rarch_task_http_transfer_handler))
!task || task->handler != rarch_task_http_transfer_handler)
return false;
if (!http->connection.handle)
return false; return false;
handle_url = net_http_connection_url(http->connection.handle); if (!user_data)
if (!handle_url)
return false; return false;
return string_is_equal(handle_url, (const char*)user_data); http = (http_handle_t*)task->state;
if (!http)
return false;
return string_is_equal(http->connection.url, (const char*)user_data);
} }
bool rarch_task_push_http_transfer(const char *url, const char *type, bool rarch_task_push_http_transfer(const char *url, const char *type,
@ -246,6 +246,8 @@ bool rarch_task_push_http_transfer(const char *url, const char *type,
if (type) if (type)
strlcpy(http->connection.elem1, type, sizeof(http->connection.elem1)); strlcpy(http->connection.elem1, type, sizeof(http->connection.elem1));
strlcpy(http->connection.url, url, sizeof(http->connection.url));
http->status = HTTP_STATUS_CONNECTION_TRANSFER; http->status = HTTP_STATUS_CONNECTION_TRANSFER;
t = (retro_task_t*)calloc(1, sizeof(*t)); t = (retro_task_t*)calloc(1, sizeof(*t));

View File

@ -61,6 +61,8 @@ int detect_ps1_game(const char *track_path, char *game_id);
int detect_psp_game(const char *track_path, char *game_id); int detect_psp_game(const char *track_path, char *game_id);
bool rarch_task_check_decompress(const char *source_file);
bool rarch_task_push_decompress( bool rarch_task_push_decompress(
const char *source_file, const char *source_file,
const char *target_dir, const char *target_dir,