Reworked the process code.

This commit is contained in:
Werner Koch 2001-02-20 13:31:56 +00:00
parent 7bd07d62f3
commit 72af6e5349
9 changed files with 308 additions and 189 deletions

2
TODO
View File

@ -4,5 +4,3 @@
* Allow to use GTK's main loop instead of the select stuff in * Allow to use GTK's main loop instead of the select stuff in
wait.c wait.c
* need to close a lot of handles in w32-io.c

View File

@ -13,7 +13,7 @@ AM_MAINTAINER_MODE
# AGE, set REVISION to 0. # AGE, set REVISION to 0.
# 3. Interfaces removed (BAD, breaks upward compatibility): Increment # 3. Interfaces removed (BAD, breaks upward compatibility): Increment
# CURRENT, set AGE and REVISION to 0. # CURRENT, set AGE and REVISION to 0.
AM_INIT_AUTOMAKE(gpgme,0.1.4a) AM_INIT_AUTOMAKE(gpgme,0.1.4b)
LIBGPGME_LT_CURRENT=2 LIBGPGME_LT_CURRENT=2
LIBGPGME_LT_AGE=2 LIBGPGME_LT_AGE=2
LIBGPGME_LT_REVISION=0 LIBGPGME_LT_REVISION=0

View File

@ -1,5 +1,21 @@
2001-02-20 Werner Koch <wk@gnupg.org>
* w32-io.c (destroy_reader,kill_reader): New.
(create_reader, reader): Add a new event to stop the thread.
(_gpgme_io_close): Kill the reader thread.
* posix-io.c (_gpgme_io_select): Handle frozen fds here.
* 32-io.c (_gpgme_io_select): Ditto. Removed a bunch of unused code.
* wait.c: Reworked the whole thing.
* rungpg.c (_gpgme_gpg_new): Init pid to -1.
(_gpgme_gpg_release): Remove the process from the wait queue.
2001-02-19 Werner Koch <wk@gnupg.org> 2001-02-19 Werner Koch <wk@gnupg.org>
* w32-io.c (_gpgme_io_set_close_notify): New.
(_gpgme_io_close): Do the notification.
* posix-io.c (_gpgme_io_select): Use a 1 sec timeout and not 200 * posix-io.c (_gpgme_io_select): Use a 1 sec timeout and not 200
microseconds. microseconds.

View File

@ -43,7 +43,7 @@ extern "C" {
* let autoconf (using the AM_PATH_GPGME macro) check that this * let autoconf (using the AM_PATH_GPGME macro) check that this
* header matches the installed library. * header matches the installed library.
* Warning: Do not edit the next line. configure will do that for you! */ * Warning: Do not edit the next line. configure will do that for you! */
#define GPGME_VERSION "0.1.4a" #define GPGME_VERSION "0.1.4b"

View File

@ -267,7 +267,10 @@ _gpgme_io_select ( struct io_select_fd_s *fds, size_t nfds )
for ( i=0; i < nfds; i++ ) { for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 ) if ( fds[i].fd == -1 )
continue; continue;
if ( fds[i].for_read ) { if ( fds[i].frozen ) {
DEBUG_ADD1 (dbg_help, "f%d ", fds[i].fd );
}
else if ( fds[i].for_read ) {
assert ( !FD_ISSET ( fds[i].fd, &readfds ) ); assert ( !FD_ISSET ( fds[i].fd, &readfds ) );
FD_SET ( fds[i].fd, &readfds ); FD_SET ( fds[i].fd, &readfds );
if ( fds[i].fd > max_fd ) if ( fds[i].fd > max_fd )

View File

@ -197,6 +197,8 @@ _gpgme_gpg_new ( GpgObject *r_gpg )
gpg->colon.fd[1] = -1; gpg->colon.fd[1] = -1;
gpg->cmd.fd = -1; gpg->cmd.fd = -1;
gpg->pid = -1;
/* allocate the read buffer for the status pipe */ /* allocate the read buffer for the status pipe */
gpg->status.bufsize = 1024; gpg->status.bufsize = 1024;
gpg->status.readpos = 0; gpg->status.readpos = 0;
@ -250,6 +252,8 @@ _gpgme_gpg_release ( GpgObject gpg )
free_argv (gpg->argv); free_argv (gpg->argv);
xfree (gpg->cmd.keyword); xfree (gpg->cmd.keyword);
if (gpg->pid != -1)
_gpgme_remove_proc_from_wait_queue ( gpg->pid );
if (gpg->status.fd[0] != -1 ) if (gpg->status.fd[0] != -1 )
_gpgme_io_close (gpg->status.fd[0]); _gpgme_io_close (gpg->status.fd[0]);
if (gpg->status.fd[1] != -1 ) if (gpg->status.fd[1] != -1 )

View File

@ -52,11 +52,21 @@
#define READBUF_SIZE 4096 #define READBUF_SIZE 4096
static struct {
int inuse;
int fd;
void (*handler)(int,void*);
void *value;
} notify_table[256];
DEFINE_STATIC_LOCK (notify_table_lock);
struct reader_context_s { struct reader_context_s {
HANDLE file_hd; HANDLE file_hd;
HANDLE thread_hd; HANDLE thread_hd;
DECLARE_LOCK (mutex); DECLARE_LOCK (mutex);
int stop_me;
int eof; int eof;
int eof_shortcut; int eof_shortcut;
int error; int error;
@ -64,6 +74,7 @@ struct reader_context_s {
HANDLE have_data_ev; /* manually reset */ HANDLE have_data_ev; /* manually reset */
HANDLE have_space_ev; /* auto reset */ HANDLE have_space_ev; /* auto reset */
HANDLE stopped;
size_t readpos, writepos; size_t readpos, writepos;
char buffer[READBUF_SIZE]; char buffer[READBUF_SIZE];
}; };
@ -109,7 +120,7 @@ reader (void *arg)
DEBUG2 ("reader thread %p for file %p started", c->thread_hd, c->file_hd ); DEBUG2 ("reader thread %p for file %p started", c->thread_hd, c->file_hd );
for (;;) { for (;;) {
LOCK (c->mutex); LOCK (c->mutex);
/* leave a one byte gap so that we can see wheter it is empty or full*/ /* leave a 1 byte gap so that we can see whether it is empty or full*/
if ((c->writepos + 1) % READBUF_SIZE == c->readpos) { if ((c->writepos + 1) % READBUF_SIZE == c->readpos) {
/* wait for space */ /* wait for space */
if (!ResetEvent (c->have_space_ev) ) if (!ResetEvent (c->have_space_ev) )
@ -120,6 +131,10 @@ reader (void *arg)
DEBUG1 ("reader thread %p: got space", c->thread_hd ); DEBUG1 ("reader thread %p: got space", c->thread_hd );
LOCK (c->mutex); LOCK (c->mutex);
} }
if ( c->stop_me ) {
UNLOCK (c->mutex);
break;
}
nbytes = (c->readpos + READBUF_SIZE - c->writepos-1) % READBUF_SIZE; nbytes = (c->readpos + READBUF_SIZE - c->writepos-1) % READBUF_SIZE;
if ( nbytes > READBUF_SIZE - c->writepos ) if ( nbytes > READBUF_SIZE - c->writepos )
nbytes = READBUF_SIZE - c->writepos; nbytes = READBUF_SIZE - c->writepos;
@ -149,6 +164,10 @@ reader (void *arg)
DEBUG2 ("reader thread %p: got %d bytes", c->thread_hd, (int)nread ); DEBUG2 ("reader thread %p: got %d bytes", c->thread_hd, (int)nread );
LOCK (c->mutex); LOCK (c->mutex);
if (c->stop_me) {
UNLOCK (c->mutex);
break;
}
c->writepos = (c->writepos + nread) % READBUF_SIZE; c->writepos = (c->writepos + nread) % READBUF_SIZE;
if ( !SetEvent (c->have_data_ev) ) if ( !SetEvent (c->have_data_ev) )
DEBUG1 ("SetEvent failed: ec=%d", (int)GetLastError ()); DEBUG1 ("SetEvent failed: ec=%d", (int)GetLastError ());
@ -158,6 +177,7 @@ reader (void *arg)
if ( !SetEvent (c->have_data_ev) ) if ( !SetEvent (c->have_data_ev) )
DEBUG1 ("SetEvent failed: ec=%d", (int)GetLastError ()); DEBUG1 ("SetEvent failed: ec=%d", (int)GetLastError ());
DEBUG1 ("reader thread %p ended", c->thread_hd ); DEBUG1 ("reader thread %p ended", c->thread_hd );
SetEvent (c->stopped);
return 0; return 0;
} }
@ -182,12 +202,15 @@ create_reader (HANDLE fd)
c->file_hd = fd; c->file_hd = fd;
c->have_data_ev = CreateEvent (&sec_attr, TRUE, FALSE, NULL); c->have_data_ev = CreateEvent (&sec_attr, TRUE, FALSE, NULL);
c->have_space_ev = CreateEvent (&sec_attr, FALSE, TRUE, NULL); c->have_space_ev = CreateEvent (&sec_attr, FALSE, TRUE, NULL);
if (!c->have_data_ev || !c->have_space_ev) { c->stopped = CreateEvent (&sec_attr, TRUE, FALSE, NULL);
if (!c->have_data_ev || !c->have_space_ev || !c->stopped ) {
DEBUG1 ("** CreateEvent failed: ec=%d\n", (int)GetLastError ()); DEBUG1 ("** CreateEvent failed: ec=%d\n", (int)GetLastError ());
if (c->have_data_ev) if (c->have_data_ev)
CloseHandle (c->have_data_ev); CloseHandle (c->have_data_ev);
if (c->have_space_ev) if (c->have_space_ev)
CloseHandle (c->have_space_ev); CloseHandle (c->have_space_ev);
if (c->stopped)
CloseHandle (c->stopped);
xfree (c); xfree (c);
return NULL; return NULL;
} }
@ -204,6 +227,8 @@ create_reader (HANDLE fd)
CloseHandle (c->have_data_ev); CloseHandle (c->have_data_ev);
if (c->have_space_ev) if (c->have_space_ev)
CloseHandle (c->have_space_ev); CloseHandle (c->have_space_ev);
if (c->stopped)
CloseHandle (c->stopped);
xfree (c); xfree (c);
return NULL; return NULL;
} }
@ -211,6 +236,27 @@ create_reader (HANDLE fd)
return c; return c;
} }
static void
destroy_reader (struct reader_context_s *c)
{
if (c->have_space_ev)
SetEvent (c->have_space_ev);
DEBUG1 ("waiting for thread %p termination ...", c->thread_hd );
WaitForSingleObject (c->stopped, INFINITE);
DEBUG1 ("thread %p has terminated", c->thread_hd );
if (c->stopped)
CloseHandle (c->stopped);
if (c->have_data_ev)
CloseHandle (c->have_data_ev);
if (c->have_space_ev)
CloseHandle (c->have_space_ev);
CloseHandle (c->thread_hd);
DESTROY_LOCK (c->mutex);
xfree (c);
}
/* /*
* Find a reader context or create a new one * Find a reader context or create a new one
@ -243,6 +289,24 @@ find_reader (int fd, int start_it)
} }
static void
kill_reader (int fd)
{
int i;
LOCK (reader_table_lock);
for (i=0; i < reader_table_size; i++ ) {
if (reader_table[i].used && reader_table[i].fd == fd ) {
destroy_reader (reader_table[i].context);
reader_table[i].context = NULL;
reader_table[i].used = 0;
break;
}
}
UNLOCK (reader_table_lock);
}
int int
_gpgme_io_read ( int fd, void *buffer, size_t count ) _gpgme_io_read ( int fd, void *buffer, size_t count )
@ -375,11 +439,29 @@ _gpgme_io_pipe ( int filedes[2], int inherit_idx )
int int
_gpgme_io_close ( int fd ) _gpgme_io_close ( int fd )
{ {
int i;
void (*handler)(int, void*) = NULL;
void *value = NULL;
if ( fd == -1 ) if ( fd == -1 )
return -1; return -1;
DEBUG1 ("** closing handle for fd %d\n", fd); DEBUG1 ("** closing handle for fd %d\n", fd);
/* fixme: destroy thread */ kill_reader (fd);
LOCK (notify_table_lock);
for ( i=0; i < DIM (notify_table); i++ ) {
if (notify_table[i].inuse && notify_table[i].fd == fd) {
handler = notify_table[i].handler;
value = notify_table[i].value;
notify_table[i].handler = NULL;
notify_table[i].value = NULL;
notify_table[i].inuse = 0;
break;
}
}
UNLOCK (notify_table_lock);
if (handler)
handler (fd, value);
if ( !CloseHandle (fd_to_handle (fd)) ) { if ( !CloseHandle (fd_to_handle (fd)) ) {
DEBUG2 ("CloseHandle for fd %d failed: ec=%d\n", DEBUG2 ("CloseHandle for fd %d failed: ec=%d\n",
@ -390,6 +472,37 @@ _gpgme_io_close ( int fd )
return 0; return 0;
} }
int
_gpgme_io_set_close_notify (int fd, void (*handler)(int, void*), void *value)
{
int i;
assert (fd != -1);
LOCK (notify_table_lock);
for (i=0; i < DIM (notify_table); i++ ) {
if ( notify_table[i].inuse && notify_table[i].fd == fd )
break;
}
if ( i == DIM (notify_table) ) {
for (i=0; i < DIM (notify_table); i++ ) {
if ( !notify_table[i].inuse )
break;
}
}
if ( i == DIM (notify_table) ) {
UNLOCK (notify_table_lock);
return -1;
}
notify_table[i].fd = fd;
notify_table[i].handler = handler;
notify_table[i].value = value;
notify_table[i].inuse = 1;
UNLOCK (notify_table_lock);
DEBUG2 ("set notification for fd %d (idx=%d)", fd, i );
return 0;
}
int int
_gpgme_io_set_nonblocking ( int fd ) _gpgme_io_set_nonblocking ( int fd )
@ -563,7 +676,8 @@ int
_gpgme_io_waitpid ( int pid, int hang, int *r_status, int *r_signal ) _gpgme_io_waitpid ( int pid, int hang, int *r_status, int *r_signal )
{ {
HANDLE proc = fd_to_handle (pid); HANDLE proc = fd_to_handle (pid);
int code, exc, ret = 0; int code, ret = 0;
DWORD exc;
*r_status = 0; *r_status = 0;
*r_signal = 0; *r_signal = 0;
@ -619,7 +733,6 @@ _gpgme_io_kill ( int pid, int hard )
int int
_gpgme_io_select ( struct io_select_fd_s *fds, size_t nfds ) _gpgme_io_select ( struct io_select_fd_s *fds, size_t nfds )
{ {
#if 1
HANDLE waitbuf[MAXIMUM_WAIT_OBJECTS]; HANDLE waitbuf[MAXIMUM_WAIT_OBJECTS];
int waitidx[MAXIMUM_WAIT_OBJECTS]; int waitidx[MAXIMUM_WAIT_OBJECTS];
int code, nwait; int code, nwait;
@ -631,11 +744,16 @@ _gpgme_io_select ( struct io_select_fd_s *fds, size_t nfds )
DEBUG_BEGIN (dbg_help, "select on [ "); DEBUG_BEGIN (dbg_help, "select on [ ");
any = any_write = 0; any = any_write = 0;
nwait = 0; nwait = 0;
count = 0;
for ( i=0; i < nfds; i++ ) { for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 ) if ( fds[i].fd == -1 )
continue; continue;
fds[i].signaled = 0;
if ( fds[i].for_read || fds[i].for_write ) { if ( fds[i].for_read || fds[i].for_write ) {
if ( fds[i].for_read ) { if ( fds[i].frozen ) {
DEBUG_ADD1 (dbg_help, "f%d ", fds[i].fd );
}
else if ( fds[i].for_read ) {
struct reader_context_s *c = find_reader (fds[i].fd,1); struct reader_context_s *c = find_reader (fds[i].fd,1);
if (!c) { if (!c) {
@ -650,28 +768,24 @@ _gpgme_io_select ( struct io_select_fd_s *fds, size_t nfds )
waitidx[nwait] = i; waitidx[nwait] = i;
waitbuf[nwait++] = c->have_data_ev; waitbuf[nwait++] = c->have_data_ev;
} }
DEBUG_ADD1 (dbg_help, "r%d ", fds[i].fd );
any = 1;
}
else if ( fds[i].for_write ) {
DEBUG_ADD1 (dbg_help, "w%d ", fds[i].fd );
any = 1;
/* no way to see whether a handle is ready for writing,
* so we signal them all */
fds[i].signaled = 1;
any_write =1;
count++;
} }
DEBUG_ADD2 (dbg_help, "%c%d ",
fds[i].for_read? 'r':'w',fds[i].fd );
any = 1;
} }
fds[i].signaled = 0;
} }
DEBUG_END (dbg_help, "]"); DEBUG_END (dbg_help, "]");
if (!any) if (!any)
return 0; return 0;
count = 0;
/* no way to see whether a handle is ready for writing, signal all */
for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 )
continue;
if ( fds[i].for_write ) {
fds[i].signaled = 1;
any_write =1;
count++;
}
}
code = WaitForMultipleObjects ( nwait, waitbuf, 0, any_write? 200:1000); code = WaitForMultipleObjects ( nwait, waitbuf, 0, any_write? 200:1000);
if ( code >= WAIT_OBJECT_0 && code < WAIT_OBJECT_0 + nwait ) { if ( code >= WAIT_OBJECT_0 && code < WAIT_OBJECT_0 + nwait ) {
/* This WFMO is a really silly function: It does return either /* This WFMO is a really silly function: It does return either
@ -735,79 +849,6 @@ _gpgme_io_select ( struct io_select_fd_s *fds, size_t nfds )
} }
return count; return count;
#else /* This is the code we use */
int i, any, count;
int once_more = 0;
DEBUG_SELECT ((stderr, "gpgme:fakedselect on [ "));
any = 0;
for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 )
continue;
if ( fds[i].for_read || fds[i].for_write ) {
DEBUG_SELECT ((stderr, "%c%d ",
fds[i].for_read? 'r':'w',fds[i].fd ));
any = 1;
}
fds[i].signaled = 0;
}
DEBUG_SELECT ((stderr, "]\n" ));
if (!any)
return 0;
restart:
count = 0;
/* no way to see whether a handle is ready fro writing, signal all */
for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 )
continue;
if ( fds[i].for_write ) {
fds[i].signaled = 1;
count++;
}
}
/* now peek on all read handles */
for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 )
continue;
if ( fds[i].for_read ) {
int navail;
if ( !PeekNamedPipe (fd_to_handle (fds[i].fd),
NULL, 0, NULL, &navail, NULL) ) {
DEBUG1 ("select: PeekFile failed: ec=%d\n",
(int)GetLastError ());
}
else if ( navail ) {
DEBUG2 ("fd %d has %d bytes to read\n", fds[i].fd, navail );
fds[i].signaled = 1;
count++;
}
}
}
if ( !once_more && !count ) {
/* once more but after relinquishing our timeslot */
once_more = 1;
Sleep (0);
goto restart;
}
if ( count ) {
DEBUG_SELECT ((stderr, "gpgme: signaled [ "));
for ( i=0; i < nfds; i++ ) {
if ( fds[i].fd == -1 )
continue;
if ( (fds[i].for_read || fds[i].for_write) && fds[i].signaled ) {
DEBUG_SELECT ((stderr, "%c%d ",
fds[i].for_read? 'r':'w',fds[i].fd ));
}
}
DEBUG_SELECT ((stderr, "]\n" ));
}
return count;
#endif
} }
#endif /*HAVE_DOSISH_SYSTEM*/ #endif /*HAVE_DOSISH_SYSTEM*/

View File

@ -31,85 +31,105 @@
#include "context.h" #include "context.h"
#include "ops.h" #include "ops.h"
#include "wait.h" #include "wait.h"
#include "sema.h"
#include "io.h" #include "io.h"
/* Fixme: implement the following stuff to make the code MT safe. struct wait_item_s;
* To avoid the need to link against a specific threads lib, such struct proc_s;
* an implementation should require the caller to register a function
* which does this task.
* enter_crit() and leave_crit() are used to embrace an area of code
* which should be executed only by one thread at a time.
* lock_xxxx() and unlock_xxxx() protect access to an data object.
* */
#define enter_crit() do { } while (0)
#define leave_crit() do { } while (0)
#define lock_table() do { } while (0)
#define unlock_table() do { } while (0)
static struct proc_s *proc_queue;
struct wait_item_s { DEFINE_STATIC_LOCK (proc_queue_lock);
volatile int active;
int (*handler)(void*,int,int);
void *handler_value;
int pid;
int inbound; /* this is an inbound data handler fd */
GpgmeCtx ctx;
};
static int fd_table_size; static int fd_table_size;
static struct io_select_fd_s *fd_table; static struct io_select_fd_s *fd_table;
DEFINE_STATIC_LOCK (fd_table_lock);
static void (*idle_function) (void); static void (*idle_function) (void);
struct proc_s {
struct proc_s *next;
int pid;
GpgmeCtx ctx;
struct wait_item_s *handler_list;
int ready;
};
struct wait_item_s {
struct wait_item_s *next;
int (*handler)(void*,int,int);
void *handler_value;
int inbound; /* this is an inbound data handler fd */
struct proc_s *proc; /* backlink */
int ready;
int frozen; /* copy of the frozen flag from the fd_table */
};
static int do_select ( void ); static int do_select ( void );
static void run_idle (void); static void run_idle (void);
static struct wait_item_s * /* only to be called with a locked proc_queue */
queue_item_from_context ( GpgmeCtx ctx )
{
struct wait_item_s *q;
int i;
for (i=0; i < fd_table_size; i++ ) {
if ( fd_table[i].fd != -1 && (q=fd_table[i].opaque) && q->ctx == ctx )
return q;
}
return NULL;
}
static int static int
count_active_and_thawed_fds ( int pid ) count_running_fds ( struct proc_s *proc )
{ {
struct wait_item_s *q; struct wait_item_s *q;
int i, count = 0; int count = 0;
for (i=0; i < fd_table_size; i++ ) { for (q=proc->handler_list; q; q=q->next) {
if ( fd_table[i].fd != -1 && (q=fd_table[i].opaque) if ( !q->frozen && !q->ready )
&& q->active && !fd_table[i].frozen && q->pid == pid )
count++; count++;
} }
return count; return count;
} }
/* remove the given process from the queue */ /* only to be called with a locked proc_queue */
/* FIXME: We should do this on demand from rungpg.c */
static void static void
remove_process ( int pid ) set_process_ready ( struct proc_s *proc )
{ {
struct wait_item_s *q; struct wait_item_s *q, *q2;
int i; int i;
for (i=0; i < fd_table_size; i++ ) { assert (proc);
if (fd_table[i].fd != -1 && (q=fd_table[i].opaque) && q->pid == pid ) { DEBUG2 ("set_process_ready(%p) pid=%d", proc, proc->pid );
xfree (q); LOCK (fd_table_lock);
fd_table[i].opaque = NULL; for (q = proc->handler_list; q; q=q2) {
fd_table[i].fd = -1; q2 = q->next;
for (i=0; i < fd_table_size; i++ ) {
if (fd_table[i].fd != -1 && q == fd_table[i].opaque ) {
fd_table[i].opaque = NULL;
fd_table[i].fd = -1;
}
} }
xfree (q);
} }
UNLOCK (fd_table_lock);
proc->handler_list = NULL;
proc->ready = 1;
} }
void
_gpgme_remove_proc_from_wait_queue ( int pid )
{
struct proc_s *proc, *last;
DEBUG1 ("removing process %d", pid );
LOCK (proc_queue_lock);
for (last=NULL, proc=proc_queue; proc; last = proc, proc = proc->next ) {
if (proc->pid == pid ) {
set_process_ready (proc);
if (!last)
proc_queue = proc->next;
else
last->next = proc->next;
xfree (proc);
break;
}
}
UNLOCK (proc_queue_lock);
}
/** /**
@ -134,28 +154,31 @@ gpgme_wait ( GpgmeCtx c, int hang )
GpgmeCtx GpgmeCtx
_gpgme_wait_on_condition ( GpgmeCtx c, int hang, volatile int *cond ) _gpgme_wait_on_condition ( GpgmeCtx c, int hang, volatile int *cond )
{ {
struct wait_item_s *q; DEBUG3 ("waiting... ctx=%p hang=%d cond=%p", c, hang, cond );
do { do {
int did_work = do_select(); int did_work = do_select();
int any = 0;
struct proc_s *proc;
if ( cond && *cond ) if ( cond && *cond )
hang = 0; hang = 0;
else {
if ( !did_work ) { LOCK (proc_queue_lock);
/* We did no read/write - see whether the process is still for (proc=proc_queue; proc; proc = proc->next ) {
* alive */ if ( !proc->ready && !count_running_fds (proc) ) {
assert (c); /* !c is not yet implemented */ set_process_ready (proc);
q = queue_item_from_context ( c );
if (q) {
if ( !count_active_and_thawed_fds (q->pid) ) {
remove_process (q->pid);
hang = 0;
} }
if (c && proc->ready && proc->ctx == c)
hang = 0;
if ( !proc->ready )
any = 1;
} }
else UNLOCK (proc_queue_lock);
if (!any)
hang = 0; hang = 0;
} }
/* fixme: we should check here for hanging processes */
if (hang) if (hang)
run_idle (); run_idle ();
} while (hang && !c->cancel ); } while (hang && !c->cancel );
@ -177,7 +200,6 @@ _gpgme_wait_on_condition ( GpgmeCtx c, int hang, volatile int *cond )
static int static int
do_select ( void ) do_select ( void )
{ {
struct wait_item_s *q;
int i, n; int i, n;
int any=0; int any=0;
@ -188,18 +210,27 @@ do_select ( void )
for (i=0; i < fd_table_size && n; i++ ) { for (i=0; i < fd_table_size && n; i++ ) {
if ( fd_table[i].fd != -1 && fd_table[i].signaled if ( fd_table[i].fd != -1 && fd_table[i].signaled
&& !fd_table[i].frozen ) { && !fd_table[i].frozen ) {
q = fd_table[i].opaque; struct wait_item_s *q;
assert (n); assert (n);
n--; n--;
if ( q->active )
any = 1; q = fd_table[i].opaque;
if ( q->active && q->handler (q->handler_value, assert ( q );
q->pid, fd_table[i].fd ) ) { assert ( q->proc );
DEBUG1 ("setting fd %d inactive", fd_table[i].fd ); assert ( !q->ready );
q->active = 0; any = 1;
if ( q->handler (q->handler_value,
q->proc->pid, fd_table[i].fd ) ) {
DEBUG2 ("setting fd %d (q=%p) ready", fd_table[i].fd, q );
q->ready = 1;
/* free the table entry*/
LOCK (fd_table_lock);
fd_table[i].for_read = 0; fd_table[i].for_read = 0;
fd_table[i].for_write = 0; fd_table[i].for_write = 0;
fd_table[i].fd = -1; fd_table[i].fd = -1;
fd_table[i].opaque = NULL;
UNLOCK (fd_table_lock);
} }
} }
} }
@ -220,22 +251,42 @@ _gpgme_register_pipe_handler ( void *opaque,
{ {
GpgmeCtx ctx = opaque; GpgmeCtx ctx = opaque;
struct wait_item_s *q; struct wait_item_s *q;
struct proc_s *proc;
int i; int i;
assert (opaque); assert (opaque);
assert (handler); assert (handler);
/* Allocate a structure to hold info about the handler */
q = xtrycalloc ( 1, sizeof *q ); q = xtrycalloc ( 1, sizeof *q );
if ( !q ) if ( !q )
return mk_error (Out_Of_Core); return mk_error (Out_Of_Core);
q->inbound = inbound; q->inbound = inbound;
q->handler = handler; q->handler = handler;
q->handler_value = handler_value; q->handler_value = handler_value;
q->pid = pid;
q->ctx = ctx;
q->active = 1;
lock_table (); /* Put this into the process queue */
LOCK (proc_queue_lock);
for (proc=proc_queue; proc && proc->pid != pid; proc = proc->next)
;
if (!proc) { /* a new process */
proc = xtrycalloc ( 1, sizeof *proc );
if (!proc) {
UNLOCK (proc_queue_lock);
return mk_error (Out_Of_Core);
}
proc->pid = pid;
proc->ctx = ctx;
proc->next = proc_queue;
proc_queue = proc;
}
assert (proc->ctx == ctx);
q->proc = proc;
q->next = proc->handler_list;
proc->handler_list = q;
UNLOCK (proc_queue_lock);
LOCK (fd_table_lock);
again: again:
for (i=0; i < fd_table_size; i++ ) { for (i=0; i < fd_table_size; i++ ) {
if ( fd_table[i].fd == -1 ) { if ( fd_table[i].fd == -1 ) {
@ -245,7 +296,7 @@ _gpgme_register_pipe_handler ( void *opaque,
fd_table[i].signaled = 0; fd_table[i].signaled = 0;
fd_table[i].frozen = 0; fd_table[i].frozen = 0;
fd_table[i].opaque = q; fd_table[i].opaque = q;
unlock_table (); UNLOCK (fd_table_lock);
return 0; return 0;
} }
} }
@ -264,8 +315,9 @@ _gpgme_register_pipe_handler ( void *opaque,
} }
} }
unlock_table (); UNLOCK (fd_table_lock);
xfree (q); xfree (q);
/* FIXME: remove the proc table entry */
return mk_error (Too_Many_Procs); return mk_error (Too_Many_Procs);
} }
@ -275,15 +327,19 @@ _gpgme_freeze_fd ( int fd )
{ {
int i; int i;
lock_table (); LOCK (fd_table_lock);
for (i=0; i < fd_table_size; i++ ) { for (i=0; i < fd_table_size; i++ ) {
if ( fd_table[i].fd == fd ) { if ( fd_table[i].fd == fd ) {
struct wait_item_s *q;
fd_table[i].frozen = 1; fd_table[i].frozen = 1;
DEBUG1 ("fd %d frozen", fd ); if ( (q=fd_table[i].opaque) )
q->frozen = 1;
DEBUG2 ("fd %d frozen (q=%p)", fd, q );
break; break;
} }
} }
unlock_table (); UNLOCK (fd_table_lock);
} }
void void
@ -291,15 +347,19 @@ _gpgme_thaw_fd ( int fd )
{ {
int i; int i;
lock_table (); LOCK (fd_table_lock);
for (i=0; i < fd_table_size; i++ ) { for (i=0; i < fd_table_size; i++ ) {
if ( fd_table[i].fd == fd ) { if ( fd_table[i].fd == fd ) {
struct wait_item_s *q;
fd_table[i].frozen = 0; fd_table[i].frozen = 0;
DEBUG1 ("fd %d thawed", fd ); if ( (q=fd_table[i].opaque) )
q->frozen = 0;
DEBUG2 ("fd %d thawed (q=%p)", fd, q );
break; break;
} }
} }
unlock_table (); UNLOCK (fd_table_lock);
} }

View File

@ -23,17 +23,14 @@
#include "gpgme.h" #include "gpgme.h"
#define SIZEOF_WAIT_QUEUE 10 void _gpgme_remove_proc_from_wait_queue ( int pid );
GpgmeError _gpgme_register_pipe_handler (
GpgmeError _gpgme_register_pipe_handler(
void *opaque, void *opaque,
int (*handler)(void*,int,int), int (*handler)(void*,int,int),
void *handler_value, void *handler_value,
int pid, int fd, int inbound ); int pid, int fd, int inbound );
#endif /* WAIT_H */ #endif /* WAIT_H */