core: Refactor the wait code utilizing the new fdtable.

* src/fdtable.c, src/fdtable.h: Largely extend.
* src/wait-global.c, src/wait-private.c, src/wait-user.c: Remove and
move code to ...
* src/wait.c: here.
(_gpgme_fd_table_init, fd_table_put): Remove.  Do not call them.
(_gpgme_add_io_cb, _gpgme_add_io_cb_user): Change to use the fdtable.
(_gpgme_remove_io_cb, _gpgme_remove_io_cb_user):  Ditto.
(_gpgme_wait_global_event_cb):  Ditto.
(gpgme_wait_ext, _gpgme_wait_on_condition): Ditto.
* src/wait.h (struct io_cb_tag_s): Add fields 'serial' and 'desc'.
Change 'idx' to 'fd'.
(struct fd_table): Remove.
* src/context.h (struct gpgme_context): Remoce 'fdt'.  Rename io_cbs
to user_io_cbs for clarity.
* src/engine-gpgsm.c: Unify trace output.
(start): Pass a description along with the IO handlers.
* src/priv-io.h (struct io_select_fd_s): Rename to io_select_s.
(io_select_t): New.
* src/gpgme.c (_gpgme_cancel_with_err): Replace arg 'ctx' by 'serial'.
(gpgme_cancel): Adjust.
--

This is the second part of a larger refactoring of the wait/event
code.  Does currently only work on Unix and with the private wait
functions (i.e. the async operations don't yet work).

Signed-off-by: Werner Koch <wk@gnupg.org>
This commit is contained in:
Werner Koch 2019-06-13 08:40:33 +02:00
parent ef50bffc71
commit 28e620fa16
No known key found for this signature in database
GPG Key ID: E3FDFF218E45B72B
15 changed files with 999 additions and 946 deletions

View File

@ -75,7 +75,7 @@ main_sources = \
data-compat.c data-identify.c \
signers.c sig-notation.c \
fdtable.c fdtable.h \
wait.c wait-global.c wait-private.c wait-user.c wait.h \
wait.c wait.h \
op-support.c \
encrypt.c encrypt-sign.c decrypt.c decrypt-verify.c verify.c \
sign.c passphrase.c progress.c \

View File

@ -194,10 +194,8 @@ struct gpgme_context
gpgme_status_cb_t status_cb;
void *status_cb_value;
/* A list of file descriptors in active use by the current
operation. */
struct fd_table fdt;
struct gpgme_io_cbs io_cbs;
/* User specific I/O callbacks. */
struct gpgme_io_cbs user_io_cbs;
};
@ -208,7 +206,7 @@ struct gpgme_context
gpg_error_t _gpgme_get_ctx (uint64_t serial, gpgme_ctx_t *r_ctx);
gpgme_error_t _gpgme_cancel_with_err (gpgme_ctx_t ctx, gpg_error_t ctx_err,
gpgme_error_t _gpgme_cancel_with_err (uint64_t serial, gpg_error_t ctx_err,
gpg_error_t op_err);

View File

@ -910,7 +910,7 @@ status_handler (void *opaque, int fd)
/* Try our best to terminate the connection friendly. */
/* assuan_write_line (gpgsm->assuan_ctx, "BYE"); */
TRACE (DEBUG_CTX, "gpgme:status_handler", gpgsm,
"fd 0x%x: error from assuan (%d) getting status line : %s",
"fd=%d: error from assuan (%d) getting status line : %s",
fd, err, gpg_strerror (err));
}
else if (linelen >= 3
@ -922,7 +922,7 @@ status_handler (void *opaque, int fd)
if (! err)
err = gpg_error (GPG_ERR_GENERAL);
TRACE (DEBUG_CTX, "gpgme:status_handler", gpgsm,
"fd 0x%x: ERR line - mapped to: %s",
"fd=%d: ERR line - mapped to: %s",
fd, err ? gpg_strerror (err) : "ok");
/* Try our best to terminate the connection friendly. */
/* assuan_write_line (gpgsm->assuan_ctx, "BYE"); */
@ -951,7 +951,7 @@ status_handler (void *opaque, int fd)
err = gpgsm->colon.fnc (gpgsm->colon.fnc_value, NULL);
}
TRACE (DEBUG_CTX, "gpgme:status_handler", gpgsm,
"fd 0x%x: OK line - final status: %s",
"fd=%d: OK line - final status: %s",
fd, err ? gpg_strerror (err) : "ok");
_gpgme_io_close (gpgsm->status_cb.fd);
return err;
@ -1026,7 +1026,7 @@ status_handler (void *opaque, int fd)
}
}
TRACE (DEBUG_CTX, "gpgme:status_handler", gpgsm,
"fd 0x%x: D line; final status: %s",
"fd=%d: D line; final status: %s",
fd, err? gpg_strerror (err):"ok");
}
else if (linelen > 2
@ -1068,7 +1068,7 @@ status_handler (void *opaque, int fd)
}
TRACE (DEBUG_CTX, "gpgme:status_handler", gpgsm,
"fd 0x%x: D inlinedata; final status: %s",
"fd=%d: D inlinedata; final status: %s",
fd, err? gpg_strerror (err):"ok");
}
else if (linelen > 2
@ -1106,7 +1106,7 @@ status_handler (void *opaque, int fd)
else
fprintf (stderr, "[UNKNOWN STATUS]%s %s", line + 2, rest);
TRACE (DEBUG_CTX, "gpgme:status_handler", gpgsm,
"fd 0x%x: S line (%s) - final status: %s",
"fd=%d: S line (%s) - final status: %s",
fd, line+2, err? gpg_strerror (err):"ok");
}
else if (linelen >= 7
@ -1131,12 +1131,14 @@ status_handler (void *opaque, int fd)
static gpgme_error_t
add_io_cb (engine_gpgsm_t gpgsm, iocb_data_t *iocbd, gpgme_io_cb_t handler)
add_io_cb (engine_gpgsm_t gpgsm, iocb_data_t *iocbd, gpgme_io_cb_t handler,
const char *handler_desc)
{
gpgme_error_t err;
TRACE_BEG (DEBUG_ENGINE, "engine-gpgsm:add_io_cb", gpgsm,
"fd=%d, dir %d", iocbd->fd, iocbd->dir);
TRACE_BEG (DEBUG_ENGINE, "engine-gpgsm:add_io_cb", NULL,
"fd=%d, dir %d (%s-handler)",
iocbd->fd, iocbd->dir, handler_desc);
err = (*gpgsm->io_cbs.add) (gpgsm->io_cbs.add_priv,
iocbd->fd, iocbd->dir,
handler, iocbd->data, &iocbd->tag);
@ -1205,15 +1207,19 @@ start (engine_gpgsm_t gpgsm, const char *command)
return gpg_error (GPG_ERR_GENERAL);
}
err = add_io_cb (gpgsm, &gpgsm->status_cb, status_handler);
err = add_io_cb (gpgsm, &gpgsm->status_cb, status_handler, "status");
if (!err && gpgsm->input_cb.fd != -1)
err = add_io_cb (gpgsm, &gpgsm->input_cb, _gpgme_data_outbound_handler);
err = add_io_cb (gpgsm, &gpgsm->input_cb,
_gpgme_data_outbound_handler, "outbound");
if (!err && gpgsm->output_cb.fd != -1)
err = add_io_cb (gpgsm, &gpgsm->output_cb, _gpgme_data_inbound_handler);
err = add_io_cb (gpgsm, &gpgsm->output_cb,
_gpgme_data_inbound_handler, "inbound");
if (!err && gpgsm->message_cb.fd != -1)
err = add_io_cb (gpgsm, &gpgsm->message_cb, _gpgme_data_outbound_handler);
err = add_io_cb (gpgsm, &gpgsm->message_cb,
_gpgme_data_outbound_handler, "outbound");
if (!err && gpgsm->diag_cb.fd != -1)
err = add_io_cb (gpgsm, &gpgsm->diag_cb, _gpgme_data_inbound_handler);
err = add_io_cb (gpgsm, &gpgsm->diag_cb,
_gpgme_data_inbound_handler, "inbound");
if (!err)
err = assuan_write_line (gpgsm->assuan_ctx, command);

View File

@ -37,6 +37,41 @@ struct fdtable_item_s
{
int fd; /* -1 indicates an unused entry. */
uint64_t owner; /* The S/N of the context owning this FD. */
/* ACTIVE is set if this fd is in the global event loop, has an
* active callback (.io_cb), and has seen the start event. */
unsigned int active:1;
/* DONE is set if this fd was previously active but is not active
* any longer, either because is finished successfully or its I/O
* callback returned an error. Note that ACTIVE and DONE should
* never both be set. */
unsigned int done:1;
/* Infos for io_select. */
unsigned int for_read:1;
unsigned int for_write:1;
unsigned int signaled:1;
/* We are in a closing handler. Note that while this flag is active
* the remove code holds an index into the table. Thus we better
* make sure that the index won't change. Or change the removal
* code to re-find the fd. */
unsigned int closing:1;
/* We are currently running the IO callback. */
unsigned int io_cb_running:1;
/* The I/O callback handler with its value context. */
struct {
gpgme_io_cb_t cb;
void *value;
} io_cb;
/* The error code and the operational error for the done status. */
gpg_error_t done_status;
gpg_error_t done_op_err;
/* The callback to be called before the descriptor is actually closed. */
struct {
fdtable_handler_t handler;
@ -109,6 +144,16 @@ _gpgme_fdtable_insert (int fd)
idx = firstunused;
fdtable[idx].fd = fd;
fdtable[idx].owner = 0;
fdtable[idx].active = 0;
fdtable[idx].done = 0;
fdtable[idx].for_read = 0;
fdtable[idx].for_write = 0;
fdtable[idx].signaled = 0;
fdtable[idx].closing = 0;
fdtable[idx].io_cb_running = 0;
fdtable[idx].io_cb.cb = NULL;
fdtable[idx].io_cb.value = NULL;
fdtable[idx].close_notify.handler = NULL;
fdtable[idx].close_notify.value = NULL;
err = 0;
@ -161,6 +206,178 @@ _gpgme_fdtable_add_close_notify (int fd,
}
/* Set the I/O callback for the FD. FD must already exist otherwise
* GPG_ERR_NO_KEY is returned. OWNER is the serial of the owning
* context. If DIRECTION is 1 the callback wants to read from it; if
* it is 0 the callback want to write to it. CB is the actual
* callback and CB_VALUE the values passed to that callback. If a
* callback as already been set GPG_ERR_DUP_VALUE is returned. To
* remove the handler, FD and OWNER must be passed as usual but CB be
* passed as NULL.
*/
gpg_error_t
_gpgme_fdtable_set_io_cb (int fd, uint64_t owner, int direction,
gpgme_io_cb_t cb, void *cb_value)
{
gpg_error_t err;
int idx;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "fd=%d ctx=%lu dir=%d",
fd, (unsigned long)owner, direction);
if (fd < 0 || !owner)
return TRACE_ERR (gpg_error (GPG_ERR_INV_ARG));
LOCK (fdtable_lock);
if (cb)
{
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd == fd)
break;
if (idx == fdtablesize)
{
err = gpg_error (GPG_ERR_NO_KEY);
TRACE_LOG ("with_cb: fd=%d owner=%lu", fd, (unsigned long)owner);
goto leave;
}
if (fdtable[idx].io_cb.cb)
{
err = gpg_error (GPG_ERR_DUP_VALUE);
goto leave;
}
fdtable[idx].owner = owner;
fdtable[idx].for_read = (direction == 1);
fdtable[idx].for_write = (direction == 0);
fdtable[idx].signaled = 0;
fdtable[idx].io_cb.cb = cb;
fdtable[idx].io_cb.value = cb_value;
}
else /* Remove. */
{
/* We compare also the owner as a cross-check. */
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd == fd && fdtable[idx].owner == owner)
break;
if (idx == fdtablesize)
{
err = gpg_error (GPG_ERR_NO_KEY);
TRACE_LOG ("remove: fd=%d owner=%lu", fd, (unsigned long)owner);
for (idx=0; idx < fdtablesize; idx++)
TRACE_LOG (" TBL: fd=%d owner=%lu", fdtable[idx].fd, (unsigned long)fdtable[idx].owner);
goto leave;
}
fdtable[idx].for_read = 0;
fdtable[idx].for_write = 0;
fdtable[idx].signaled = 0;
fdtable[idx].io_cb.cb = NULL;
fdtable[idx].io_cb.value = NULL;
fdtable[idx].owner = 0;
}
err = 0;
leave:
UNLOCK (fdtable_lock);
return TRACE_ERR (err);
}
/* Set all FDs of OWNER into the active state. */
gpg_error_t
_gpgme_fdtable_set_active (uint64_t owner)
{
int idx;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "ctx=%lu", (unsigned long)owner);
if (!owner )
return TRACE_ERR (gpg_error (GPG_ERR_INV_ARG));
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && fdtable[idx].owner == owner
&& fdtable[idx].io_cb.cb)
{
fdtable[idx].active = 1;
fdtable[idx].done = 0;
}
UNLOCK (fdtable_lock);
return TRACE_ERR (0);
}
/* Set all FDs of OWNER into the done state. STATUS and OP_ERR are
* recorded. */
gpg_error_t
_gpgme_fdtable_set_done (uint64_t owner, gpg_error_t status, gpg_error_t op_err)
{
int idx;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "ctx=%lu", (unsigned long)owner);
if (!owner )
return TRACE_ERR (gpg_error (GPG_ERR_INV_ARG));
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && fdtable[idx].owner == owner
&& fdtable[idx].active)
{
fdtable[idx].active = 0;
fdtable[idx].done = 1;
fdtable[idx].done_status = status;
fdtable[idx].done_op_err = op_err;
}
UNLOCK (fdtable_lock);
return TRACE_ERR (0);
}
/* Walk over all fds in FDS and copy the signaled flag if set. It
* does not clear any signal flag in the global table. */
void
_gpgme_fdtable_set_signaled (io_select_t fds, unsigned int nfds)
{
int idx;
unsigned int n, count;
if (!nfds)
return;
/* FIXME: Highly inefficient code in case of large select lists. */
count = 0;
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
{
if (fdtable[idx].fd == -1)
continue;
for (n = 0; n < nfds; n++)
if (fdtable[idx].fd == fds[n].fd)
{
if (fds[n].signaled && !fdtable[idx].signaled)
{
fdtable[idx].signaled = 1;
count++; /* Only for tracing. */
}
break;
}
}
UNLOCK (fdtable_lock);
TRACE (DEBUG_SYSIO, __func__, NULL, "fds newly signaled=%u", count);
}
/* Remove FD from the table after calling the close handler. Note
* that at the time the close handler is called the FD has been
* removed form the table. Thus the close handler may not access the
@ -191,15 +408,273 @@ _gpgme_fdtable_remove (int fd)
return TRACE_ERR (gpg_error (GPG_ERR_NO_KEY));
}
TRACE_LOG ("removal of fd=%d owner=%lu (closing=%d)",
fdtable[idx].fd, (unsigned long)fdtable[idx].owner,
fdtable[idx].closing);
handler = fdtable[idx].close_notify.handler;
fdtable[idx].close_notify.handler = NULL;
handlervalue = fdtable[idx].close_notify.value;
fdtable[idx].close_notify.value = NULL;
fdtable[idx].fd = -1;
/* The handler might call into the fdtable again, so of we have a
* handler we can't immediately close it but instead record the fact
* and remove the entry from the table only after the handler has
* been run. */
if (handler)
fdtable[idx].closing = 1;
else if (!fdtable[idx].closing)
fdtable[idx].fd = -1;
UNLOCK (fdtable_lock);
err = handler? handler (fd, handlervalue) : 0;
if (handler)
{
err = handler (fd, handlervalue);
LOCK (fdtable_lock);
TRACE_LOG ("final removal of fd=%d owner=%lu (closing=%d)",
fdtable[idx].fd, (unsigned long)fdtable[idx].owner,
fdtable[idx].closing);
fdtable[idx].fd = -1;
UNLOCK (fdtable_lock);
}
else
err = 0;
return TRACE_ERR (err);
}
/* Return the number of active I/O callbacks for OWNER or for all if
* OWNER is 0. */
unsigned int
_gpgme_fdtable_io_cb_count (uint64_t owner)
{
int idx;
unsigned int count = 0;
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && (!owner || fdtable[idx].owner == owner))
count++;
UNLOCK (fdtable_lock);
TRACE (DEBUG_SYSIO, __func__, NULL, "ctx=%lu count=%u",
(unsigned long)owner, count);
return count;
}
/* Run all signaled IO callbacks of OWNER or all signaled callbacks if
* OWNER is 0. Returns an error code on the first real error
* encountered. If R_OP_ERR is not NULL an optional operational error
* can be stored tehre. For EOF the respective flags are set. */
gpg_error_t
_gpgme_fdtable_run_io_cbs (uint64_t owner, gpg_error_t *r_op_err)
{
gpg_error_t err;
int idx;
int fd;
gpgme_io_cb_t iocb;
struct io_cb_data iocb_data;
uint64_t serial;
unsigned int cb_count;
gpgme_ctx_t actx;
if (r_op_err)
*r_op_err = 0;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "ctx=%lu", owner);
for (;;)
{
fd = -1;
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && (!owner || fdtable[idx].owner == owner)
&& fdtable[idx].signaled)
{
fd = fdtable[idx].fd;
serial = fdtable[idx].owner;
iocb = fdtable[idx].io_cb.cb;
iocb_data.handler_value = fdtable[idx].io_cb.value;
iocb_data.op_err = 0;
fdtable[idx].signaled = 0;
if (iocb)
{
fdtable[idx].io_cb_running = 1;
break;
}
}
UNLOCK (fdtable_lock);
if (fd == -1)
break; /* No more callbacks found. */
/* If the context object is still valid and has not been
* canceled, we run the I/O callback. */
err = _gpgme_get_ctx (serial, &actx);
if (!err)
{
err = iocb (&iocb_data, fd);
if (err)
TRACE_LOG ("iocb(fd=%d) err=%s", fd, gpg_strerror (err));
}
/* Clear the running flag and while we are at it also count the
* remaining callbacks. */
cb_count = 0;
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
{
if (fdtable[idx].fd == -1)
continue;
if (fdtable[idx].fd == fd)
fdtable[idx].io_cb_running = 0;
if (fdtable[idx].owner == serial)
cb_count++;
}
UNLOCK (fdtable_lock);
/* Handle errors or success from the IO callback. In the error
* case we close all fds belonging to the same context. In the
* success case we check whether any callback is left and only
* if that is not the case, tell the engine that we are done.
* The latter indirectly sets the fd into the done state. */
if (err)
{
_gpgme_cancel_with_err (serial, err, 0);
return TRACE_ERR (err);
}
else if (iocb_data.op_err)
{
/* An operational error occurred. Cancel the current
* operation but not the session, and signal it. */
_gpgme_cancel_with_err (serial, 0, iocb_data.op_err);
/* NOTE: This relies on the operational error being
* generated after the operation really has completed, for
* example after no further status line output is generated.
* Otherwise the following I/O will spill over into the next
* operation. */
if (r_op_err)
*r_op_err = iocb_data.op_err;
return TRACE_ERR (0);
}
else if (!cb_count && actx)
{
struct gpgme_io_event_done_data data = { 0, 0 };
_gpgme_engine_io_event (actx->engine, GPGME_EVENT_DONE, &data);
}
}
return TRACE_ERR (0);
}
/* Retrieve a list of file descriptors owned by OWNER, or with OWNER
* being 0 of all fds, and store that list as a new array at R_FDS.
* Return the number of FDS in that list or 0 if none were selected.
* FLAGS give further selection flags:
* FDTABLE_FLAG_ACTIVE - Only those with the active flag set.
* FDTABLE_FLAG_DONE - Only those with the done flag set.
* FDTABLE_FLAG_FOR_READ - Only those with the readable FDs.
* FDTABLE_FLAG_FOR_WRITE - Only those with the writable FDs.
* FDTABLE_FLAG_SIGNALED - Only those with the signaled flag set.
* FDTABLE_FLAG_NOT_SIGNALED - Only those with the signaled flag cleared.
* FDTABLE_FLAG_CLEAR - Clear the signaled flag..
*/
unsigned int
_gpgme_fdtable_get_fds (io_select_t *r_fds, uint64_t owner, unsigned int flags)
{
int idx;
unsigned int count = 0;
io_select_t fds;
*r_fds = NULL;
gpg_err_set_errno (0);
/* We take an easy approach and allocate the array at the size of
* the entire fdtable. */
fds = calloc (fdtablesize, sizeof *fds);
if (!fds)
return 0;
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && (!owner || fdtable[idx].owner == owner))
{
if ((flags & FDTABLE_FLAG_ACTIVE) && !fdtable[idx].active)
continue;
if ((flags & FDTABLE_FLAG_DONE) && !fdtable[idx].done)
continue;
if ((flags & FDTABLE_FLAG_FOR_READ) && !fdtable[idx].for_read)
continue;
if ((flags & FDTABLE_FLAG_FOR_WRITE) && !fdtable[idx].for_write)
continue;
if ((flags & FDTABLE_FLAG_SIGNALED) && !fdtable[idx].signaled)
continue;
if ((flags & FDTABLE_FLAG_NOT_SIGNALED) && fdtable[idx].signaled)
continue;
if (fdtable[idx].io_cb_running || fdtable[idx].closing)
continue; /* The callback has not yet finished or we are
* already closing. Does not make sense to allow
* selecting on it. */
fds[count].fd = fdtable[idx].fd;
fds[count].for_read = fdtable[idx].for_read;
fds[count].for_write = fdtable[idx].for_write;
fds[count].signaled =
(flags & FDTABLE_FLAG_SIGNALED)? 0 : fdtable[idx].signaled;
count++;
}
UNLOCK (fdtable_lock);
*r_fds = fds;
TRACE (DEBUG_SYSIO, __func__, NULL, "ctx=%lu count=%u",
(unsigned long)owner, count);
return count;
}
/* If OWNER is 0 return the status info of the first fd with the done
* flag set. If OWNER is not 0 search for a matching owner with the
* done flag set and return its status info. Returns the serial
* number of the context found. */
uint64_t
_gpgme_fdtable_get_done (uint64_t owner,
gpg_error_t *r_status, gpg_error_t *r_op_err)
{
uint64_t serial = 0;
int idx;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "ctx=%lu", (unsigned long)owner);
LOCK (fdtable_lock);
for (idx=0; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && (!owner || fdtable[idx].owner == owner)
&& fdtable[idx].done)
{
/* Found. If an owner has been given also clear the done
* flags from all other fds of this owner. Note that they
* have the same status info anyway. */
*r_status = fdtable[idx].done_status;
*r_op_err = fdtable[idx].done_op_err;
fdtable[idx].done = 0;
serial = fdtable[idx].owner;
if (owner)
{
for (; idx < fdtablesize; idx++)
if (fdtable[idx].fd != -1 && fdtable[idx].owner == owner)
fdtable[idx].done = 0;
}
break;
}
UNLOCK (fdtable_lock);
TRACE_SUC ("ctx=%lu", (unsigned long)serial);
return serial;
}

View File

@ -21,6 +21,18 @@
#ifndef GPGME_FDTABLE_H
#define GPGME_FDTABLE_H
#include "priv-io.h"
/* Flags used by _gpgme_fdtable_get_fds. */
#define FDTABLE_FLAG_ACTIVE 1 /* Only those with the active flag set. */
#define FDTABLE_FLAG_DONE 2 /* Only those with the done flag set */
#define FDTABLE_FLAG_FOR_READ 4 /* Only those with the signaled flag set. */
#define FDTABLE_FLAG_FOR_WRITE 8 /* Only those with the for_read flag set. */
#define FDTABLE_FLAG_SIGNALED 16 /* Only those with the signaled flag set. */
#define FDTABLE_FLAG_NOT_SIGNALED 32 /* Ditto reversed. */
#define FDTABLE_FLAG_CLEAR 128 /* Clear the signaled flag. */
/* The handler type associated with an FD. It is called with the FD
* and the registered pointer. The handler may return an error code
* but there is no guarantee that this code is used; in particular
@ -35,9 +47,35 @@ gpg_error_t _gpgme_fdtable_insert (int fd);
gpg_error_t _gpgme_fdtable_add_close_notify (int fd,
fdtable_handler_t handler,
void *value);
/* Set or remove the I/O callback. */
gpg_error_t _gpgme_fdtable_set_io_cb (int fd, uint64_t owner, int direction,
gpgme_io_cb_t cb, void *cb_value);
/* Set all FDs of OWNER into the active state. */
gpg_error_t _gpgme_fdtable_set_active (uint64_t owner);
/* Set all FDs of OWNER into the done state. */
gpg_error_t _gpgme_fdtable_set_done (uint64_t owner,
gpg_error_t status, gpg_error_t op_err);
/* Walk over all FDS and copy the signaled flag if set. */
void _gpgme_fdtable_set_signaled (io_select_t fds, unsigned int nfds);
/* Remove FD from the table. This also runs the close handlers. */
gpg_error_t _gpgme_fdtable_remove (int fd);
/* Return the number of active I/O callbacks for OWNER. */
unsigned int _gpgme_fdtable_io_cb_count (uint64_t owner);
/* Run all the signaled IO callbacks of OWNER. */
gpg_error_t _gpgme_fdtable_run_io_cbs (uint64_t owner, gpg_error_t *r_op_err);
/* Return a list of FDs matching the OWNER and FLAGS. */
unsigned int _gpgme_fdtable_get_fds (io_select_t *r_fds,
uint64_t owner, unsigned int flags);
/* Return the status info for the entry of OWNER. */
uint64_t _gpgme_fdtable_get_done (uint64_t owner, gpg_error_t *r_status,
gpg_error_t *r_op_err);
#endif /*GPGME_FDTABLE_H*/

View File

@ -135,7 +135,6 @@ gpgme_new (gpgme_ctx_t *r_ctx)
ctx->include_certs = GPGME_INCLUDE_CERTS_DEFAULT;
ctx->protocol = GPGME_PROTOCOL_OpenPGP;
ctx->sub_protocol = GPGME_PROTOCOL_DEFAULT;
_gpgme_fd_table_init (&ctx->fdt);
LOCK (context_list_lock);
if (def_lc_ctype)
@ -218,36 +217,43 @@ _gpgme_get_ctx (uint64_t serial, gpgme_ctx_t *r_ctx)
*r_ctx = ctx;
return err;
}
/* Cancel the context indetified with SERIAL. Pass CTX_ERR or OP_ERR
* down to the engine. */
gpgme_error_t
_gpgme_cancel_with_err (gpgme_ctx_t ctx, gpg_error_t ctx_err,
_gpgme_cancel_with_err (uint64_t serial, gpg_error_t ctx_err,
gpg_error_t op_err)
{
gpgme_error_t err;
gpgme_ctx_t ctx;
struct gpgme_io_event_done_data data;
TRACE_BEG (DEBUG_CTX, "_gpgme_cancel_with_err", NULL,
"ctx=%lu ctx_err=%i op_err=%i",
CTXSERIAL (ctx), ctx_err, op_err);
(unsigned long)serial, ctx_err, op_err);
if (ctx_err)
{
err = _gpgme_engine_cancel (ctx->engine);
if (err)
return TRACE_ERR (err);
}
LOCK (context_list_lock);
for (ctx = context_list; ctx; ctx = ctx->next_ctx)
if (ctx->serial == serial)
break;
UNLOCK (context_list_lock);
if (!ctx)
err = gpg_error (GPG_ERR_NO_OBJ);
else if (ctx_err)
err = _gpgme_engine_cancel (ctx->engine);
else
err = _gpgme_engine_cancel_op (ctx->engine);
if (!err)
{
err = _gpgme_engine_cancel_op (ctx->engine);
if (err)
return TRACE_ERR (err);
data.err = ctx_err;
data.op_err = op_err;
_gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data);
}
data.err = ctx_err;
data.op_err = op_err;
_gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data);
return TRACE_ERR (0);
return TRACE_ERR (err);
}
@ -262,7 +268,7 @@ gpgme_cancel (gpgme_ctx_t ctx)
if (!ctx)
return TRACE_ERR (gpg_error (GPG_ERR_INV_VALUE));
err = _gpgme_cancel_with_err (ctx, gpg_error (GPG_ERR_CANCELED), 0);
err = _gpgme_cancel_with_err (ctx->serial, gpg_error (GPG_ERR_CANCELED), 0);
return TRACE_ERR (err);
}
@ -317,7 +323,7 @@ gpgme_release (gpgme_ctx_t ctx)
_gpgme_engine_release (ctx->engine);
ctx->engine = NULL;
_gpgme_fd_table_deinit (&ctx->fdt);
/* FIXME: Remove stale FDs belonging to us? */
_gpgme_release_result (ctx);
_gpgme_signers_clear (ctx);
_gpgme_sig_notation_clear (ctx);
@ -972,17 +978,17 @@ gpgme_set_io_cbs (gpgme_ctx_t ctx, gpgme_io_cbs_t io_cbs)
CTXSERIAL (ctx),
io_cbs, io_cbs->add, io_cbs->add_priv, io_cbs->remove,
io_cbs->event, io_cbs->event_priv);
ctx->io_cbs = *io_cbs;
ctx->user_io_cbs = *io_cbs;
}
else
{
TRACE (DEBUG_CTX, "gpgme_set_io_cbs", NULL,
"ctx=%lu io_cbs=%p (default)", CTXSERIAL (ctx), io_cbs);
ctx->io_cbs.add = NULL;
ctx->io_cbs.add_priv = NULL;
ctx->io_cbs.remove = NULL;
ctx->io_cbs.event = NULL;
ctx->io_cbs.event_priv = NULL;
ctx->user_io_cbs.add = NULL;
ctx->user_io_cbs.add_priv = NULL;
ctx->user_io_cbs.remove = NULL;
ctx->user_io_cbs.event = NULL;
ctx->user_io_cbs.event_priv = NULL;
}
}
@ -1054,7 +1060,7 @@ gpgme_get_io_cbs (gpgme_ctx_t ctx, gpgme_io_cbs_t io_cbs)
io_cbs, io_cbs->add, io_cbs->add_priv, io_cbs->remove,
io_cbs->event, io_cbs->event_priv);
*io_cbs = ctx->io_cbs;
*io_cbs = ctx->user_io_cbs;
}

View File

@ -173,7 +173,7 @@ _gpgme_op_reset (gpgme_ctx_t ctx, int type)
return err;
}
if (type == 1 || (type == 2 && !ctx->io_cbs.add))
if (type == 1 || (type == 2 && !ctx->user_io_cbs.add))
{
/* Use private event loop. */
io_cbs.add = _gpgme_add_io_cb;
@ -182,7 +182,7 @@ _gpgme_op_reset (gpgme_ctx_t ctx, int type)
io_cbs.event = _gpgme_wait_private_event_cb;
io_cbs.event_priv = ctx;
}
else if (! ctx->io_cbs.add)
else if (!ctx->user_io_cbs.add)
{
/* Use global event loop. */
io_cbs.add = _gpgme_add_io_cb;
@ -194,9 +194,9 @@ _gpgme_op_reset (gpgme_ctx_t ctx, int type)
else
{
/* Use user event loop. */
io_cbs.add = _gpgme_wait_user_add_io_cb;
io_cbs.add = _gpgme_add_io_cb_user;
io_cbs.add_priv = ctx;
io_cbs.remove = _gpgme_wait_user_remove_io_cb;
io_cbs.remove = _gpgme_remove_io_cb_user;
io_cbs.event = _gpgme_wait_user_event_cb;
io_cbs.event_priv = ctx;
}

View File

@ -622,10 +622,14 @@ _gpgme_io_spawn (const char *path, char *const argv[], unsigned int flags,
}
/* Select on the list of fds. Returns: -1 = error, 0 = timeout or
nothing to select, > 0 = number of signaled fds. */
/* Select on the list of fds.
*
* Returns: -1 = error,
* 0 = timeout or nothing to select,
* > 0 = number of signaled fds.
*/
int
_gpgme_io_select (struct io_select_fd_s *fds, size_t nfds, int nonblock)
_gpgme_io_select (io_select_t fds, unsigned int nfds, int nonblock)
{
fd_set readfds;
fd_set writefds;
@ -638,7 +642,7 @@ _gpgme_io_select (struct io_select_fd_s *fds, size_t nfds, int nonblock)
struct timeval timeout = { 1, 0 };
void *dbg_help = NULL;
TRACE_BEG (DEBUG_SYSIO, "_gpgme_io_select", NULL,
"nfds=%zu, nonblock=%u", nfds, nonblock);
"nfds=%u, nonblock=%u", nfds, nonblock);
FD_ZERO (&readfds);
FD_ZERO (&writefds);
@ -736,6 +740,7 @@ _gpgme_io_select (struct io_select_fd_s *fds, size_t nfds, int nonblock)
return TRACE_SYSRES (count);
}
int
_gpgme_io_recvmsg (int fd, struct msghdr *msg, int flags)

View File

@ -51,14 +51,15 @@ struct spawn_fd_item_s
int arg_loc;
};
struct io_select_fd_s
struct io_select_s
{
int fd;
int for_read;
int for_write;
int signaled;
void *opaque;
unsigned int for_read:1;
unsigned int for_write:1;
unsigned int signaled:1;
};
typedef struct io_select_s *io_select_t;
/* These function are either defined in posix-io.c or w32-io.c. */
void _gpgme_io_subsystem_init (void);
@ -89,7 +90,7 @@ int _gpgme_io_spawn (const char *path, char *const argv[], unsigned int flags,
void (*atfork) (void *opaque, int reserved),
void *atforkvalue, pid_t *r_pid);
int _gpgme_io_select (struct io_select_fd_s *fds, size_t nfds, int nonblock);
int _gpgme_io_select (io_select_t fds, unsigned int nfds, int nonblock);
/* Write the printable version of FD to the buffer BUF of length
BUFLEN. The printable version is the representation on the command

View File

@ -911,7 +911,7 @@ parse_error (gpgme_signature_t sig, char *args, int set_status)
&& gpg_err_code (err) == GPG_ERR_BAD_DATA)
{
/* This indicates a double plaintext. The only solid way to
handle this is by failing the oepration. */
handle this is by failing the operation. */
return gpg_error (GPG_ERR_BAD_DATA);
}
else if (!set_status)

View File

@ -1,401 +0,0 @@
/* wait-global.c
* Copyright (C) 2000 Werner Koch (dd9jn)
* Copyright (C) 2001, 2002, 2003, 2004, 2005 g10 Code GmbH
*
* This file is part of GPGME.
*
* GPGME is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* GPGME is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <https://gnu.org/licenses/>.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#if HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <errno.h>
#include "gpgme.h"
#include "sema.h"
#include "util.h"
#include "context.h"
#include "wait.h"
#include "priv-io.h"
#include "ops.h"
#include "debug.h"
/* The global event loop is used for all asynchronous operations
(except key listing) for which no user I/O callbacks are specified.
A context sets up its initial I/O callbacks and then sends the
GPGME_EVENT_START event. After that, it is added to the global
list of active contexts.
The gpgme_wait function contains a select() loop over all file
descriptors in all active contexts. If an error occurs, it closes
all fds in that context and moves the context to the global done
list. Likewise, if a context has removed all I/O callbacks, it is
moved to the global done list.
All contexts in the global done list are eligible for being
returned by gpgme_wait if requested by the caller. */
/* The ctx_list_lock protects the list of active and done contexts.
Insertion into any of these lists is only allowed when the lock is
held. This allows a muli-threaded program to loop over gpgme_wait
and in parallel start asynchronous gpgme operations.
However, the fd tables in the contexts are not protected by this
lock. They are only allowed to change either before the context is
added to the active list (ie, before the start event is signalled)
or in a callback handler. */
DEFINE_STATIC_LOCK (ctx_list_lock);
/* A ctx_list_item is an item in the global list of active or done
contexts. */
struct ctx_list_item
{
/* Every ctx_list_item is an element in a doubly linked list. The
list pointers are protected by the ctx_list_lock. */
struct ctx_list_item *next;
struct ctx_list_item *prev;
gpgme_ctx_t ctx;
/* The status is set when the ctx is moved to the done list. */
gpgme_error_t status;
gpgme_error_t op_err;
};
/* The active list contains all contexts that are in the global event
loop, have active I/O callbacks, and have already seen the start
event. */
static struct ctx_list_item *ctx_active_list;
/* The done list contains all contexts that have previously been
active but now are not active any longer, either because they
finished successfully or an I/O callback returned an error. The
status field in the list item contains the error value (or 0 if
successful). */
static struct ctx_list_item *ctx_done_list;
/* Enter the context CTX into the active list. */
static gpgme_error_t
ctx_active (gpgme_ctx_t ctx)
{
struct ctx_list_item *li = malloc (sizeof (struct ctx_list_item));
if (!li)
return gpg_error_from_syserror ();
li->ctx = ctx;
LOCK (ctx_list_lock);
/* Add LI to active list. */
li->next = ctx_active_list;
li->prev = NULL;
if (ctx_active_list)
ctx_active_list->prev = li;
ctx_active_list = li;
UNLOCK (ctx_list_lock);
return 0;
}
/* Enter the context CTX into the done list with status STATUS. */
static void
ctx_done (gpgme_ctx_t ctx, gpgme_error_t status, gpgme_error_t op_err)
{
struct ctx_list_item *li;
LOCK (ctx_list_lock);
li = ctx_active_list;
while (li && li->ctx != ctx)
li = li->next;
assert (li);
/* Remove LI from active list. */
if (li->next)
li->next->prev = li->prev;
if (li->prev)
li->prev->next = li->next;
else
ctx_active_list = li->next;
li->status = status;
li->op_err = op_err;
/* Add LI to done list. */
li->next = ctx_done_list;
li->prev = NULL;
if (ctx_done_list)
ctx_done_list->prev = li;
ctx_done_list = li;
UNLOCK (ctx_list_lock);
}
/* Find finished context CTX (or any context if CTX is NULL) and
return its status in STATUS after removing it from the done list.
If a matching context could be found, return it. Return NULL if no
context could be found. */
static gpgme_ctx_t
ctx_wait (gpgme_ctx_t ctx, gpgme_error_t *status, gpgme_error_t *op_err)
{
struct ctx_list_item *li;
LOCK (ctx_list_lock);
li = ctx_done_list;
if (ctx)
{
/* A specific context is requested. */
while (li && li->ctx != ctx)
li = li->next;
}
if (li)
{
ctx = li->ctx;
if (status)
*status = li->status;
if (op_err)
*op_err = li->op_err;
/* Remove LI from done list. */
if (li->next)
li->next->prev = li->prev;
if (li->prev)
li->prev->next = li->next;
else
ctx_done_list = li->next;
free (li);
}
else
ctx = NULL;
UNLOCK (ctx_list_lock);
return ctx;
}
/* Internal I/O callback functions. */
/* The add_io_cb and remove_io_cb handlers are shared with the private
event loops. */
void
_gpgme_wait_global_event_cb (void *data, gpgme_event_io_t type,
void *type_data)
{
gpgme_ctx_t ctx = (gpgme_ctx_t) data;
assert (ctx);
switch (type)
{
case GPGME_EVENT_START:
{
gpgme_error_t err = ctx_active (ctx);
if (err)
/* An error occurred. Close all fds in this context, and
send the error in a done event. */
_gpgme_cancel_with_err (ctx, err, 0);
}
break;
case GPGME_EVENT_DONE:
{
gpgme_io_event_done_data_t done_data =
(gpgme_io_event_done_data_t) type_data;
ctx_done (ctx, done_data->err, done_data->op_err);
}
break;
case GPGME_EVENT_NEXT_KEY:
assert (!"Unexpected event GPGME_EVENT_NEXT_KEY");
break;
case GPGME_EVENT_NEXT_TRUSTITEM:
assert (!"Unexpected event GPGME_EVENT_NEXT_TRUSTITEM");
break;
default:
assert (!"Unexpected event");
break;
}
}
/* Perform asynchronous operations in the global event loop (ie, any
asynchronous operation except key listing and trustitem listing
operations). If CTX is not a null pointer, the function will
return if the asynchronous operation in the context CTX finished.
Otherwise the function will return if any asynchronous operation
finished. If HANG is zero, the function will not block for a long
time. Otherwise the function does not return until an operation
matching CTX finished.
If a matching context finished, it is returned, and *STATUS is set
to the error value of the operation in that context. Otherwise, if
the timeout expires, NULL is returned and *STATUS is 0. If an
error occurs, NULL is returned and *STATUS is set to the error
value. */
gpgme_ctx_t
gpgme_wait_ext (gpgme_ctx_t ctx, gpgme_error_t *status,
gpgme_error_t *op_err, int hang)
{
do
{
unsigned int i = 0;
struct ctx_list_item *li;
struct fd_table fdt;
int nr;
/* Collect the active file descriptors. */
LOCK (ctx_list_lock);
for (li = ctx_active_list; li; li = li->next)
i += li->ctx->fdt.size;
fdt.fds = malloc (i * sizeof (struct io_select_fd_s));
if (!fdt.fds)
{
int saved_err = gpg_error_from_syserror ();
UNLOCK (ctx_list_lock);
if (status)
*status = saved_err;
if (op_err)
*op_err = 0;
return NULL;
}
fdt.size = i;
i = 0;
for (li = ctx_active_list; li; li = li->next)
{
memcpy (&fdt.fds[i], li->ctx->fdt.fds,
li->ctx->fdt.size * sizeof (struct io_select_fd_s));
i += li->ctx->fdt.size;
}
UNLOCK (ctx_list_lock);
nr = _gpgme_io_select (fdt.fds, fdt.size, 0);
if (nr < 0)
{
int saved_err = gpg_error_from_syserror ();
free (fdt.fds);
if (status)
*status = saved_err;
if (op_err)
*op_err = 0;
return NULL;
}
for (i = 0; i < fdt.size && nr; i++)
{
if (fdt.fds[i].fd != -1 && fdt.fds[i].signaled)
{
gpgme_ctx_t ictx;
gpgme_error_t err = 0;
gpgme_error_t local_op_err = 0;
struct wait_item_s *item;
assert (nr);
nr--;
item = (struct wait_item_s *) fdt.fds[i].opaque;
assert (item);
ictx = item->ctx;
assert (ictx);
LOCK (ctx->lock);
if (ctx->canceled)
err = gpg_error (GPG_ERR_CANCELED);
UNLOCK (ctx->lock);
if (!err)
err = _gpgme_run_io_cb (&fdt.fds[i], 0, &local_op_err);
if (err || local_op_err)
{
/* An error occurred. Close all fds in this context,
and signal it. */
_gpgme_cancel_with_err (ictx, err, local_op_err);
/* Break out of the loop, and retry the select()
from scratch, because now all fds should be
gone. */
break;
}
}
}
free (fdt.fds);
/* Now some contexts might have finished successfully. */
LOCK (ctx_list_lock);
retry:
for (li = ctx_active_list; li; li = li->next)
{
gpgme_ctx_t actx = li->ctx;
for (i = 0; i < actx->fdt.size; i++)
if (actx->fdt.fds[i].fd != -1)
break;
if (i == actx->fdt.size)
{
struct gpgme_io_event_done_data data;
data.err = 0;
data.op_err = 0;
/* FIXME: This does not perform too well. We have to
release the lock because the I/O event handler
acquires it to remove the context from the active
list. Two alternative strategies are worth
considering: Either implement the DONE event handler
here in a lock-free manner, or save a list of all
contexts to be released and call the DONE events
afterwards. */
UNLOCK (ctx_list_lock);
_gpgme_engine_io_event (actx->engine, GPGME_EVENT_DONE, &data);
LOCK (ctx_list_lock);
goto retry;
}
}
UNLOCK (ctx_list_lock);
{
gpgme_ctx_t dctx = ctx_wait (ctx, status, op_err);
if (dctx)
{
ctx = dctx;
hang = 0;
}
else if (!hang)
{
ctx = NULL;
if (status)
*status = 0;
if (op_err)
*op_err = 0;
}
}
}
while (hang);
return ctx;
}
gpgme_ctx_t
gpgme_wait (gpgme_ctx_t ctx, gpgme_error_t *status, int hang)
{
return gpgme_wait_ext (ctx, status, NULL, hang);
}

View File

@ -1,180 +0,0 @@
/* wait-private.c
* Copyright (C) 2000 Werner Koch (dd9jn)
* Copyright (C) 2001, 2002, 2003, 2004, 2005 g10 Code GmbH
*
* This file is part of GPGME.
*
* GPGME is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* GPGME is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <https://gnu.org/licenses/>.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#if HAVE_CONFIG_H
#include <config.h>
#endif
#include <assert.h>
#include <errno.h>
#include "gpgme.h"
#include "context.h"
#include "wait.h"
#include "ops.h"
#include "priv-io.h"
#include "util.h"
#include "debug.h"
/* The private event loops are used for all blocking operations, and
for the key and trust item listing operations. They are completely
separated from each other. */
/* Internal I/O callback functions. */
/* The add_io_cb and remove_io_cb handlers are shared with the global
event loops. */
void
_gpgme_wait_private_event_cb (void *data, gpgme_event_io_t type,
void *type_data)
{
switch (type)
{
case GPGME_EVENT_START:
/* Nothing to do here, as the wait routine is called after the
initialization is finished. */
break;
case GPGME_EVENT_DONE:
break;
case GPGME_EVENT_NEXT_KEY:
_gpgme_op_keylist_event_cb (data, type, type_data);
break;
case GPGME_EVENT_NEXT_TRUSTITEM:
_gpgme_op_trustlist_event_cb (data, type, type_data);
break;
}
}
/* If COND is a null pointer, wait until the blocking operation in CTX
finished and return its error value. Otherwise, wait until COND is
satisfied or the operation finished. */
gpgme_error_t
_gpgme_wait_on_condition (gpgme_ctx_t ctx, volatile int *cond,
gpgme_error_t *op_err_p)
{
gpgme_error_t err = 0;
int hang = 1;
if (op_err_p)
*op_err_p = 0;
do
{
int nr = _gpgme_io_select (ctx->fdt.fds, ctx->fdt.size, 0);
unsigned int i;
if (nr < 0)
{
/* An error occurred. Close all fds in this context, and
signal it. */
err = gpg_error_from_syserror ();
_gpgme_cancel_with_err (ctx, err, 0);
return err;
}
for (i = 0; i < ctx->fdt.size && nr; i++)
{
if (ctx->fdt.fds[i].fd != -1 && ctx->fdt.fds[i].signaled)
{
gpgme_error_t op_err = 0;
ctx->fdt.fds[i].signaled = 0;
assert (nr);
nr--;
LOCK (ctx->lock);
if (ctx->canceled)
err = gpg_error (GPG_ERR_CANCELED);
UNLOCK (ctx->lock);
if (!err)
err = _gpgme_run_io_cb (&ctx->fdt.fds[i], 0, &op_err);
if (err)
{
/* An error occurred. Close all fds in this context,
and signal it. */
_gpgme_cancel_with_err (ctx, err, 0);
return err;
}
else if (op_err)
{
/* An operational error occurred. Cancel the current
operation but not the session, and signal it. */
_gpgme_cancel_with_err (ctx, 0, op_err);
/* NOTE: This relies on the operational error being
generated after the operation really has
completed, for example after no further status
line output is generated. Otherwise the
following I/O will spill over into the next
operation. */
if (op_err_p)
*op_err_p = op_err;
return 0;
}
}
}
for (i = 0; i < ctx->fdt.size; i++)
if (ctx->fdt.fds[i].fd != -1)
break;
if (i == ctx->fdt.size)
{
struct gpgme_io_event_done_data data;
data.err = 0;
data.op_err = 0;
_gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data);
hang = 0;
}
if (cond && *cond)
hang = 0;
}
while (hang);
return 0;
}
/* Wait until the blocking operation in context CTX has finished and
return the error value. This variant can not be used for
session-based protocols. */
gpgme_error_t
_gpgme_wait_one (gpgme_ctx_t ctx)
{
return _gpgme_wait_on_condition (ctx, NULL, NULL);
}
/* Wait until the blocking operation in context CTX has finished and
return the error value. This is the right variant to use for
sesion-based protocols. */
gpgme_error_t
_gpgme_wait_one_ext (gpgme_ctx_t ctx, gpgme_error_t *op_err)
{
return _gpgme_wait_on_condition (ctx, NULL, op_err);
}

View File

@ -1,133 +0,0 @@
/* wait-user.c
* Copyright (C) 2000 Werner Koch (dd9jn)
* Copyright (C) 2001, 2002, 2003, 2004, 2005 g10 Code GmbH
*
* This file is part of GPGME.
*
* GPGME is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* GPGME is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <https://gnu.org/licenses/>.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#if HAVE_CONFIG_H
#include <config.h>
#endif
#include <assert.h>
#include "gpgme.h"
#include "context.h"
#include "priv-io.h"
#include "wait.h"
#include "ops.h"
#include "debug.h"
/* The user event loops are used for all asynchronous operations for
which a user callback is defined. */
/* Internal I/O Callbacks. */
gpgme_error_t
_gpgme_user_io_cb_handler (void *data, int fd)
{
gpgme_error_t err = 0;
gpgme_error_t op_err = 0;
struct tag *tag = (struct tag *) data;
gpgme_ctx_t ctx;
(void)fd;
assert (data);
ctx = tag->ctx;
assert (ctx);
LOCK (ctx->lock);
if (ctx->canceled)
err = gpg_error (GPG_ERR_CANCELED);
UNLOCK (ctx->lock);
if (! err)
err = _gpgme_run_io_cb (&ctx->fdt.fds[tag->idx], 0, &op_err);
if (err || op_err)
_gpgme_cancel_with_err (ctx, err, op_err);
else
{
unsigned int i;
for (i = 0; i < ctx->fdt.size; i++)
if (ctx->fdt.fds[i].fd != -1)
break;
if (i == ctx->fdt.size)
{
struct gpgme_io_event_done_data done_data;
done_data.err = 0;
done_data.op_err = 0;
_gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &done_data);
}
}
return 0;
}
/* Register the file descriptor FD with the handler FNC (which gets
FNC_DATA as its first argument) for the direction DIR. DATA should
be the context for which the fd is added. R_TAG will hold the tag
that can be used to remove the fd. */
gpgme_error_t
_gpgme_wait_user_add_io_cb (void *data, int fd, int dir, gpgme_io_cb_t fnc,
void *fnc_data, void **r_tag)
{
gpgme_ctx_t ctx = (gpgme_ctx_t) data;
struct tag *tag;
gpgme_error_t err;
assert (ctx);
err = _gpgme_add_io_cb (data, fd, dir, fnc, fnc_data, r_tag);
if (err)
return err;
tag = *r_tag;
assert (tag);
err = (*ctx->io_cbs.add) (ctx->io_cbs.add_priv, fd, dir,
_gpgme_user_io_cb_handler, *r_tag,
&tag->user_tag);
if (err)
_gpgme_remove_io_cb (*r_tag);
return err;
}
void
_gpgme_wait_user_remove_io_cb (void *data)
{
struct tag *tag = (struct tag *) data;
gpgme_ctx_t ctx;
assert (tag);
ctx = tag->ctx;
(*ctx->io_cbs.remove) (tag->user_tag);
_gpgme_remove_io_cb (data);
}
void
_gpgme_wait_user_event_cb (void *data, gpgme_event_io_t type, void *type_data)
{
gpgme_ctx_t ctx = data;
if (ctx->io_cbs.event)
(*ctx->io_cbs.event) (ctx->io_cbs.event_priv, type, type_data);
}

View File

@ -38,190 +38,440 @@
#include "priv-io.h"
#include "engine.h"
#include "debug.h"
#include "fdtable.h"
void
_gpgme_fd_table_init (fd_table_t fdt)
/* Wrapper for the user wait handler to match the exported prototype.
* This is used by _gpgme_add_io_cb_user. */
static gpg_error_t
user_io_cb_handler (void *data, int fd)
{
fdt->fds = NULL;
fdt->size = 0;
}
struct io_cb_tag_s *tag = data;
gpg_error_t err;
uint64_t serial;
gpgme_ctx_t ctx;
gpg_error_t op_err;
void
_gpgme_fd_table_deinit (fd_table_t fdt)
{
if (fdt->fds)
free (fdt->fds);
}
(void)fd;
assert (data);
serial = tag->serial;
assert (serial);
/* XXX We should keep a marker and roll over for speed. */
static gpgme_error_t
fd_table_put (fd_table_t fdt, int fd, int dir, void *opaque, int *idx)
{
unsigned int i, j;
struct io_select_fd_s *new_fds;
for (i = 0; i < fdt->size; i++)
err = _gpgme_fdtable_run_io_cbs (serial, &op_err);
if (err || op_err)
;
else if (!_gpgme_fdtable_io_cb_count (serial))
{
if (fdt->fds[i].fd == -1)
break;
/* No more active callbacks - emit a DONE. */
struct gpgme_io_event_done_data done_data = { 0, 0 };
_gpgme_get_ctx (serial, &ctx);
if (ctx)
_gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &done_data);
}
if (i == fdt->size)
{
#define FDT_ALLOCSIZE 10
new_fds = realloc (fdt->fds, (fdt->size + FDT_ALLOCSIZE)
* sizeof (*new_fds));
if (!new_fds)
return gpg_error_from_syserror ();
fdt->fds = new_fds;
fdt->size += FDT_ALLOCSIZE;
for (j = 0; j < FDT_ALLOCSIZE; j++)
fdt->fds[i + j].fd = -1;
}
fdt->fds[i].fd = fd;
fdt->fds[i].for_read = (dir == 1);
fdt->fds[i].for_write = (dir == 0);
fdt->fds[i].signaled = 0;
fdt->fds[i].opaque = opaque;
*idx = i;
return 0;
}
/* Register the file descriptor FD with the handler FNC (which gets
FNC_DATA as its first argument) for the direction DIR. DATA should
be the context for which the fd is added. R_TAG will hold the tag
that can be used to remove the fd. */
that can be used to remove the fd. This function is used for the
global and the private wait loops. */
gpgme_error_t
_gpgme_add_io_cb (void *data, int fd, int dir, gpgme_io_cb_t fnc,
void *fnc_data, void **r_tag)
{
gpgme_error_t err;
gpgme_ctx_t ctx = (gpgme_ctx_t) data;
fd_table_t fdt;
struct wait_item_s *item;
struct tag *tag;
struct io_cb_tag_s *tag;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "ctx=%lu fd=%d, dir %d",
CTXSERIAL (ctx), fd, dir);
if (!fnc)
return gpg_error (GPG_ERR_INV_ARG);
assert (fnc);
assert (ctx);
fdt = &ctx->fdt;
assert (fdt);
tag = malloc (sizeof *tag);
tag = calloc (1, sizeof *tag);
if (!tag)
return gpg_error_from_syserror ();
tag->ctx = ctx;
tag->serial = ctx->serial;
tag->fd = fd;
/* Allocate a structure to hold information about the handler. */
item = calloc (1, sizeof *item);
if (!item)
{
free (tag);
return gpg_error_from_syserror ();
}
item->ctx = ctx;
item->dir = dir;
item->handler = fnc;
item->handler_value = fnc_data;
err = fd_table_put (fdt, fd, dir, item, &tag->idx);
err = _gpgme_fdtable_set_io_cb (fd, ctx->serial, dir, fnc, fnc_data);
if (err)
{
free (tag);
free (item);
return err;
return TRACE_ERR (err);
}
TRACE (DEBUG_CTX, "_gpgme_add_io_cb", NULL,
"ctx=%lu fd=%d dir=%d -> tag=%p", CTXSERIAL (ctx), fd, dir, tag);
*r_tag = tag;
TRACE_SUC ("tag=%p", tag);
return 0;
}
/* Register the file descriptor FD with the handler FNC (which gets
FNC_DATA as its first argument) for the direction DIR. DATA should
be the context for which the fd is added. R_TAG will hold the tag
that can be used to remove the fd. This function is used for the
user wait loops. */
gpg_error_t
_gpgme_add_io_cb_user (void *data, int fd, int dir, gpgme_io_cb_t fnc,
void *fnc_data, void **r_tag)
{
gpgme_ctx_t ctx = (gpgme_ctx_t) data;
struct io_cb_tag_s *tag;
gpgme_error_t err;
TRACE_BEG (DEBUG_SYSIO, __func__, NULL, "ctx=%lu fd=%d, dir %d",
CTXSERIAL (ctx), fd, dir);
assert (ctx);
err = _gpgme_add_io_cb (data, fd, dir, fnc, fnc_data, r_tag);
if (err)
return TRACE_ERR (err);
tag = *r_tag;
assert (tag);
err = ctx->user_io_cbs.add (ctx->user_io_cbs.add_priv, fd, dir,
user_io_cb_handler, *r_tag,
&tag->user_tag);
if (err)
_gpgme_remove_io_cb (*r_tag);
return TRACE_ERR (err);
}
/* This function is used for the global and the private wait loops. */
void
_gpgme_remove_io_cb (void *data)
{
struct tag *tag = data;
gpgme_ctx_t ctx;
fd_table_t fdt;
int idx;
struct io_cb_tag_s *tag = data;
gpg_error_t err;
assert (tag);
ctx = tag->ctx;
assert (ctx);
fdt = &ctx->fdt;
assert (fdt);
idx = tag->idx;
TRACE (DEBUG_CTX, "_gpgme_remove_io_cb", NULL,
"ctx=%lu setting fd=%d (item=%p data=%p) done",
CTXSERIAL (ctx),
fdt->fds[idx].fd,
fdt->fds[idx].opaque, data);
free (fdt->fds[idx].opaque);
err = _gpgme_fdtable_set_io_cb (tag->fd, tag->serial, 0, NULL, NULL);
if (err)
{
TRACE (DEBUG_CTX, __func__, NULL, "tag=%p (ctx=%lu fd=%d) failed: %s",
tag, tag->serial, tag->fd, gpg_strerror (err));
}
else
{
TRACE (DEBUG_CTX, __func__, NULL, "tag=%p (ctx=%lu fd=%d) done",
tag, tag->serial, tag->fd);
}
free (tag);
/* Free the table entry. */
fdt->fds[idx].fd = -1;
fdt->fds[idx].for_read = 0;
fdt->fds[idx].for_write = 0;
fdt->fds[idx].opaque = NULL;
}
/* This function is used for the user wait loops. */
void
_gpgme_remove_io_cb_user (void *data)
{
struct io_cb_tag_s *tag = data;
gpgme_ctx_t ctx;
assert (tag);
_gpgme_get_ctx (tag->serial, &ctx);
if (ctx)
ctx->user_io_cbs.remove (tag->user_tag);
_gpgme_remove_io_cb (data);
}
/* This is slightly embarrassing. The problem is that running an I/O
callback _may_ influence the status of other file descriptors. Our
own event loops could compensate for that, but the external event
loops cannot. FIXME: We may still want to optimize this a bit when
we are called from our own event loops. So if CHECKED is 1, the
check is skipped. FIXME: Give an example on how the status of other
fds can be influenced. */
gpgme_error_t
_gpgme_run_io_cb (struct io_select_fd_s *an_fds, int checked,
gpgme_error_t *op_err)
/* The internal I/O callback function used for the global event loop.
That loop is used for all asynchronous operations (except key
listing) for which no user I/O callbacks are specified.
A context sets up its initial I/O callbacks and then sends the
GPGME_EVENT_START event. After that, it is added to the global
list of active contexts.
The gpgme_wait function contains a select() loop over all file
descriptors in all active contexts. If an error occurs, it closes
all fds in that context and moves the context to the global done
list. Likewise, if a context has removed all I/O callbacks, it is
moved to the global done list.
All contexts in the global done list are eligible for being
returned by gpgme_wait if requested by the caller. */
void
_gpgme_wait_global_event_cb (void *data, gpgme_event_io_t type,
void *type_data)
{
struct wait_item_s *item;
struct io_cb_data iocb_data;
gpgme_error_t err;
gpgme_ctx_t ctx = (gpgme_ctx_t) data;
gpg_error_t err;
item = (struct wait_item_s *) an_fds->opaque;
assert (item);
assert (ctx);
if (!checked)
switch (type)
{
int nr;
struct io_select_fd_s fds;
case GPGME_EVENT_START:
{
err = _gpgme_fdtable_set_active (ctx->serial);
if (err)
/* An error occurred. Close all fds in this context, and
send the error in a done event. */
_gpgme_cancel_with_err (ctx->serial, err, 0);
}
break;
TRACE (DEBUG_CTX, "_gpgme_run_io_cb", item, "need to check");
fds = *an_fds;
fds.signaled = 0;
/* Just give it a quick poll. */
nr = _gpgme_io_select (&fds, 1, 1);
assert (nr <= 1);
if (nr < 0)
return gpg_error_from_syserror ();
else if (nr == 0)
case GPGME_EVENT_DONE:
{
gpgme_io_event_done_data_t done_data =
(gpgme_io_event_done_data_t) type_data;
_gpgme_fdtable_set_done (ctx->serial,
done_data->err, done_data->op_err);
}
break;
case GPGME_EVENT_NEXT_KEY:
assert (!"Unexpected event GPGME_EVENT_NEXT_KEY");
break;
case GPGME_EVENT_NEXT_TRUSTITEM:
assert (!"Unexpected event GPGME_EVENT_NEXT_TRUSTITEM");
break;
default:
assert (!"Unexpected event");
break;
}
}
/* The internal I/O callback function used for private event loops.
* The private event loops are used for all blocking operations, and
* for the key and trust item listing operations. They are completely
* separated from each other. */
void
_gpgme_wait_private_event_cb (void *data, gpgme_event_io_t type,
void *type_data)
{
switch (type)
{
case GPGME_EVENT_START:
/* Nothing to do here, as the wait routine is called after the
initialization is finished. */
break;
case GPGME_EVENT_DONE:
break;
case GPGME_EVENT_NEXT_KEY:
_gpgme_op_keylist_event_cb (data, type, type_data);
break;
case GPGME_EVENT_NEXT_TRUSTITEM:
_gpgme_op_trustlist_event_cb (data, type, type_data);
break;
}
}
/* The internal I/O callback function used for user event loops. User
* event loops are used for all asynchronous operations for which a
* user callback is defined. */
void
_gpgme_wait_user_event_cb (void *data, gpgme_event_io_t type, void *type_data)
{
gpgme_ctx_t ctx = data;
if (ctx->user_io_cbs.event)
ctx->user_io_cbs.event (ctx->user_io_cbs.event_priv, type, type_data);
}
/* Perform asynchronous operations in the global event loop (ie, any
asynchronous operation except key listing and trustitem listing
operations). If CTX is not a null pointer, the function will
return if the asynchronous operation in the context CTX finished.
Otherwise the function will return if any asynchronous operation
finished. If HANG is zero, the function will not block for a long
time. Otherwise the function does not return until an operation
matching CTX finished.
If a matching context finished, it is returned, and *STATUS is set
to the error value of the operation in that context. Otherwise, if
the timeout expires, NULL is returned and *STATUS is 0. If an
error occurs, NULL is returned and *STATUS is set to the error
value. */
gpgme_ctx_t
gpgme_wait_ext (gpgme_ctx_t ctx, gpgme_error_t *status,
gpgme_error_t *op_err, int hang)
{
gpg_error_t err;
io_select_t fds = NULL;
unsigned int nfds;
int nr;
uint64_t serial;
do
{
/* Get all fds of CTX (or all if CTX is NULL) we want to wait
* for and which are in the active state. */
free (fds);
nfds = _gpgme_fdtable_get_fds (&fds, ctx? ctx->serial : 0,
( FDTABLE_FLAG_ACTIVE
| FDTABLE_FLAG_CLEAR));
if (!nfds)
{
/* The status changed in the meantime, there is nothing left
* to do. */
return 0;
err = gpg_error_from_syserror ();
if (gpg_err_code (err) != GPG_ERR_MISSING_ERRNO)
{
if (status)
*status = err;
if (op_err)
*op_err = 0;
free (fds);
return NULL;
}
/* Nothing to select. Run the select anyway, so that we use
* its timeout. */
}
nr = _gpgme_io_select (fds, nfds, 0);
if (nr < 0)
{
if (status)
*status = gpg_error_from_syserror ();
if (op_err)
*op_err = 0;
free (fds);
return NULL;
}
_gpgme_fdtable_set_signaled (fds, nfds);
_gpgme_fdtable_run_io_cbs (ctx? ctx->serial : 0, NULL);
serial = _gpgme_fdtable_get_done (ctx? ctx->serial : 0, status, op_err);
if (serial)
{
_gpgme_get_ctx (serial, &ctx);
hang = 0;
}
else if (!hang)
{
ctx = NULL;
if (status)
*status = 0;
if (op_err)
*op_err = 0;
}
}
while (hang);
TRACE (DEBUG_CTX, "_gpgme_run_io_cb", item, "handler (%p, %d)",
item->handler_value, an_fds->fd);
iocb_data.handler_value = item->handler_value;
iocb_data.op_err = 0;
err = item->handler (&iocb_data, an_fds->fd);
*op_err = iocb_data.op_err;
return err;
free (fds);
return ctx;
}
gpgme_ctx_t
gpgme_wait (gpgme_ctx_t ctx, gpgme_error_t *status, int hang)
{
return gpgme_wait_ext (ctx, status, NULL, hang);
}
/* If COND is a null pointer, wait until the blocking operation in CTX
finished and return its error value. Otherwise, wait until COND is
satisfied or the operation finished. */
gpgme_error_t
_gpgme_wait_on_condition (gpgme_ctx_t ctx, volatile int *cond,
gpgme_error_t *r_op_err)
{
gpgme_error_t err = 0;
int hang = 1;
io_select_t fds = NULL;
unsigned int nfds;
int op_err;
int nr;
if (r_op_err)
*r_op_err = 0;
if (!ctx)
return gpg_error (GPG_ERR_INV_VALUE);
do
{
/* Get all fds of CTX we want to wait for. */
free (fds);
nfds = _gpgme_fdtable_get_fds (&fds, ctx->serial,
FDTABLE_FLAG_CLEAR);
if (!nfds)
{
err = gpg_error_from_syserror ();
if (gpg_err_code (err) != GPG_ERR_MISSING_ERRNO)
{
free (fds);
return err;
}
/* Nothing to select. Run the select anyway, so that we use
* its timeout. */
}
nr = _gpgme_io_select (fds, nfds, 0);
if (nr < 0)
{
/* An error occurred. Close all fds in this context, and
signal it. */
err = gpg_error_from_syserror ();
_gpgme_cancel_with_err (ctx->serial, err, 0);
free (fds);
return err;
}
_gpgme_fdtable_set_signaled (fds, nfds);
err = _gpgme_fdtable_run_io_cbs (ctx->serial, r_op_err);
if (err || (r_op_err && *r_op_err))
{
free (fds);
return err;
}
if (!_gpgme_fdtable_io_cb_count (ctx->serial))
{
struct gpgme_io_event_done_data data = {0, 0};
_gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data);
hang = 0;
}
if (cond && *cond)
hang = 0;
}
while (hang);
free (fds);
return 0;
}
/* Wait until the blocking operation in context CTX has finished and
return the error value. This variant can not be used for
session-based protocols. */
gpgme_error_t
_gpgme_wait_one (gpgme_ctx_t ctx)
{
return _gpgme_wait_on_condition (ctx, NULL, NULL);
}
/* Wait until the blocking operation in context CTX has finished and
return the error value. This is the right variant to use for
sesion-based protocols. */
gpgme_error_t
_gpgme_wait_one_ext (gpgme_ctx_t ctx, gpgme_error_t *op_err)
{
return _gpgme_wait_on_condition (ctx, NULL, op_err);
}

View File

@ -1,6 +1,6 @@
/* wait.h - Definitions for the wait queue interface.
Copyright (C) 2000 Werner Koch (dd9jn)
Copyright (C) 2001, 2002, 2003, 2004 g10 Code GmbH
Copyright (C) 2001, 2002, 2003, 2004. 2019 g10 Code GmbH
This file is part of GPGME.
@ -25,58 +25,46 @@
#include "gpgme.h"
#include "sema.h"
struct fd_table
/* A registered fd handler can be removed using the tag that
* identifies it. In the public API that tag is an an opaque
* pointer. */
struct io_cb_tag_s
{
struct io_select_fd_s *fds;
size_t size;
};
typedef struct fd_table *fd_table_t;
/* The s/n of the context for which the fd was registered. */
uint64_t serial;
/* Wait items are hooked into the io_select_fd_s to connect an fd with
a callback handler. */
struct wait_item_s
{
gpgme_ctx_t ctx;
gpgme_io_cb_t handler;
void *handler_value;
int dir;
};
/* A registered fd handler is removed later using the tag that
identifies it. */
struct tag
{
/* The context for which the fd was registered. */
gpgme_ctx_t ctx;
/* The index into the fd table for this context. */
int idx;
/* The actual fd. */
int fd;
/* This is used by the wrappers for the user event loop. */
void *user_tag;
/* A string used describing the data. This is used for tracing. */
const char *desc;
};
void _gpgme_fd_table_init (fd_table_t fdt);
void _gpgme_fd_table_deinit (fd_table_t fdt);
gpgme_error_t _gpgme_add_io_cb (void *data, int fd, int dir,
gpgme_io_cb_t fnc, void *fnc_data, void **r_tag);
gpgme_io_cb_t fnc, void *fnc_data,
void **r_tag);
gpgme_error_t _gpgme_add_io_cb_user (void *data, int fd, int dir,
gpgme_io_cb_t fnc, void *fnc_data,
void **r_tag);
void _gpgme_remove_io_cb (void *tag);
void _gpgme_wait_private_event_cb (void *data, gpgme_event_io_t type,
void *type_data);
void _gpgme_remove_io_cb_user (void *tag);
void _gpgme_wait_global_event_cb (void *data, gpgme_event_io_t type,
void *type_data);
gpgme_error_t _gpgme_wait_user_add_io_cb (void *data, int fd, int dir,
gpgme_io_cb_t fnc, void *fnc_data,
void **r_tag);
void _gpgme_wait_user_remove_io_cb (void *tag);
void _gpgme_wait_private_event_cb (void *data, gpgme_event_io_t type,
void *type_data);
void _gpgme_wait_user_event_cb (void *data, gpgme_event_io_t type,
void *type_data);
gpgme_error_t _gpgme_run_io_cb (struct io_select_fd_s *an_fds, int checked,
gpgme_error_t *err);
/* Session based interfaces require to make a distinction between IPC