scd: a race condition between scd_command_handler and handle_tick

NIIBE Yutaka gniibe at fsij.org
Wed Jun 6 07:30:27 CEST 2012


On 2012-04-04 at 10:43 +0900, Niibe Yutaka wrote:
> This is a bug report for GnuPG 2.0.19 (or 2.0.18).  I have not yet
> checked if there is this bug in the development version or not.

The bug is in the master branch too.

> For the BUG CASE, there is a race condition between two threads, the
> command handler thread and handle_tick thread.
> 
> COMMAND HANDLER THREAD's call-chain is like:
> -> scd_command_handler
>   -> cmd_serialno
>     -> open_card
>       ->apdu_connect
> 
> HANDLE_TICK THREAD's call-chain is like:
> -> handle_tick
>   -> scd_update_reader_status_file
>     -> update_reader_status_file
>       -> get_reader_slot
>         -> apdu_open_reader
>           -> open_pcsc_reader
>             -> open_pcsc_reader_wrapper
>               -> new_reader_slot
>               Then, forking gnupg-pcsc-wrapper...


I think that a patch like following is needed, at least
at the lower layer.

That is, success of new_reader_slot, it returns with the slot's lock.
The lock will be released by caller.

I haven't test it yet.

diff --git a/scd/apdu.c b/scd/apdu.c
index 7641e91..f88047a 100644
--- a/scd/apdu.c
+++ b/scd/apdu.c
@@ -346,8 +346,56 @@ static int pcsc_keypad_modify (int slot, int class, int ins, int p0, int p1,
  */
 

+static int
+lock_slot (int slot)
+{
+#ifdef USE_NPTH
+  int err;
+
+  err = npth_mutex_lock (&reader_table[slot].lock);
+  if (err)
+    {
+      log_error ("failed to acquire apdu lock: %s\n", strerror (err));
+      return SW_HOST_LOCKING_FAILED;
+    }
+#endif /*USE_NPTH*/
+  return 0;
+}
+
+static int
+trylock_slot (int slot)
+{
+#ifdef USE_NPTH
+  int err;
+
+  err = npth_mutex_trylock (&reader_table[slot].lock);
+  if (err == EBUSY)
+    return SW_HOST_BUSY;
+  else if (err)
+    {
+      log_error ("failed to acquire apdu lock: %s\n", strerror (err));
+      return SW_HOST_LOCKING_FAILED;
+    }
+#endif /*USE_NPTH*/
+  return 0;
+}
+
+static void
+unlock_slot (int slot)
+{
+#ifdef USE_NPTH
+  int err;
+
+  err = npth_mutex_unlock (&reader_table[slot].lock);
+  if (err)
+    log_error ("failed to release apdu lock: %s\n", strerror (errno));
+#endif /*USE_NPTH*/
+}
+
+
 /* Find an unused reader slot for PORTSTR and put it into the reader
-   table.  Return -1 on error or the index into the reader table. */
+   table.  Return -1 on error or the index into the reader table.
+   Acquire slot's lock on successful return.  Caller needs to unlock it. */
 static int
 new_reader_slot (void)
 {
@@ -375,6 +423,12 @@ new_reader_slot (void)
         }
       reader_table[reader].lock_initialized = 1;
     }
+
+  if (lock_slot (reader))
+    {
+      log_error ("error locking mutex: %s\n", strerror (err));
+      return -1;
+    }
 #endif /*USE_NPTH*/
   reader_table[reader].connect_card = NULL;
   reader_table[reader].disconnect_card = NULL;
@@ -660,6 +714,7 @@ open_ct_reader (int port)
       log_error ("apdu_open_ct_reader failed on port %d: %s\n",
                  port, ct_error_string (rc));
       reader_table[reader].used = 0;
+      unlock_slot (reader);
       return -1;
     }
 
@@ -681,6 +736,7 @@ open_ct_reader (int port)
   reader_table[reader].keypad_modify = NULL;
 
   dump_reader_status (reader);
+  unlock_slot (reader);
   return reader;
 }
 
@@ -1693,6 +1749,7 @@ open_pcsc_reader_direct (const char *portstr)
       reader_table[slot].used = 0;
       if (err == 0x8010001d)
         pcsc_no_service = 1;
+      unlock_slot (slot);
       return -1;
     }
   pcsc_no_service = 0;
@@ -1707,6 +1764,7 @@ open_pcsc_reader_direct (const char *portstr)
           log_error ("error allocating memory for reader list\n");
           pcsc_release_context (reader_table[slot].pcsc.context);
           reader_table[slot].used = 0;
+	  unlock_slot (slot);
           return -1 /*SW_HOST_OUT_OF_CORE*/;
         }
       err = pcsc_list_readers (reader_table[slot].pcsc.context,
@@ -1719,6 +1777,7 @@ open_pcsc_reader_direct (const char *portstr)
       pcsc_release_context (reader_table[slot].pcsc.context);
       reader_table[slot].used = 0;
       xfree (list);
+      unlock_slot (slot);
       return -1;
     }
 
@@ -1745,6 +1804,7 @@ open_pcsc_reader_direct (const char *portstr)
       log_error ("error allocating memory for reader name\n");
       pcsc_release_context (reader_table[slot].pcsc.context);
       reader_table[slot].used = 0;
+      unlock_slot (slot);
       return -1;
     }
   strcpy (reader_table[slot].rdrname, portstr? portstr : list);
@@ -1764,6 +1824,7 @@ open_pcsc_reader_direct (const char *portstr)
   reader_table[slot].dump_status_reader = dump_pcsc_reader_status;
 
   dump_reader_status (slot);
+  unlock_slot (slot);
   return slot;
 }
 #endif /*!NEED_PCSC_WRAPPER */
@@ -1810,6 +1871,7 @@ open_pcsc_reader_wrapped (const char *portstr)
     {
       log_error ("error creating a pipe: %s\n", strerror (errno));
       slotp->used = 0;
+      unlock_slot (slot);
       return -1;
     }
   if (pipe (wp) == -1)
@@ -1818,6 +1880,7 @@ open_pcsc_reader_wrapped (const char *portstr)
       close (rp[0]);
       close (rp[1]);
       slotp->used = 0;
+      unlock_slot (slot);
       return -1;
     }
 
@@ -1830,6 +1893,7 @@ open_pcsc_reader_wrapped (const char *portstr)
       close (wp[0]);
       close (wp[1]);
       slotp->used = 0;
+      unlock_slot (slot);
       return -1;
     }
   slotp->pcsc.pid = pid;
@@ -1963,6 +2027,7 @@ open_pcsc_reader_wrapped (const char *portstr)
   pcsc_get_status (slot, &dummy_status);
 
   dump_reader_status (slot);
+  unlock_slot (slot);
   return slot;
 
  command_failed:
@@ -1974,6 +2039,7 @@ open_pcsc_reader_wrapped (const char *portstr)
     kill (slotp->pcsc.pid, SIGTERM);
   slotp->pcsc.pid = (pid_t)(-1);
   slotp->used = 0;
+  unlock_slot (slot);
   /* There is no way to return SW. */
   return -1;
 
@@ -2412,6 +2478,7 @@ open_ccid_reader (const char *portstr)
   if (err)
     {
       slotp->used = 0;
+      unlock_slot (slot);
       return -1;
     }
 
@@ -2446,6 +2513,7 @@ open_ccid_reader (const char *portstr)
   reader_table[slot].is_t0 = 0;
 
   dump_reader_status (slot);
+  unlock_slot (slot);
   return slot;
 }
 
@@ -2684,6 +2752,7 @@ open_rapdu_reader (int portno,
   if (!slotp->rapdu.handle)
     {
       slotp->used = 0;
+      unlock_slot (slot);
       return -1;
     }
 
@@ -2738,12 +2807,14 @@ open_rapdu_reader (int portno,
 
   dump_reader_status (slot);
   rapdu_msg_release (msg);
+  unlock_slot (slot);
   return slot;
 
  failure:
   rapdu_msg_release (msg);
   rapdu_release (slotp->rapdu.handle);
   slotp->used = 0;
+  unlock_slot (slot);
   return -1;
 }
 
@@ -2756,53 +2827,6 @@ open_rapdu_reader (int portno,
  */
 

-static int
-lock_slot (int slot)
-{
-#ifdef USE_NPTH
-  int err;
-
-  err = npth_mutex_lock (&reader_table[slot].lock);
-  if (err)
-    {
-      log_error ("failed to acquire apdu lock: %s\n", strerror (err));
-      return SW_HOST_LOCKING_FAILED;
-    }
-#endif /*USE_NPTH*/
-  return 0;
-}
-
-static int
-trylock_slot (int slot)
-{
-#ifdef USE_NPTH
-  int err;
-
-  err = npth_mutex_trylock (&reader_table[slot].lock);
-  if (err == EBUSY)
-    return SW_HOST_BUSY;
-  else if (err)
-    {
-      log_error ("failed to acquire apdu lock: %s\n", strerror (err));
-      return SW_HOST_LOCKING_FAILED;
-    }
-#endif /*USE_NPTH*/
-  return 0;
-}
-
-static void
-unlock_slot (int slot)
-{
-#ifdef USE_NPTH
-  int err;
-
-  err = npth_mutex_unlock (&reader_table[slot].lock);
-  if (err)
-    log_error ("failed to release apdu lock: %s\n", strerror (errno));
-#endif /*USE_NPTH*/
-}
-
-
 /* Open the reader and return an internal slot number or -1 on
    error. If PORTSTR is NULL we default to a suitable port (for ctAPI:
    the first USB reader.  For PC/SC the first listed reader). */
-- 





More information about the Gnupg-devel mailing list