ardour
abstract_ui.cc
Go to the documentation of this file.
1 /*
2  Copyright (C) 2012 Paul Davis
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; either version 2 of the License, or
7  (at your option) any later version.
8 
9  This program is distributed in the hope that it will be useful,
10  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  GNU General Public License for more details.
13 
14  You should have received a copy of the GNU General Public License
15  along with this program; if not, write to the Free Software
16  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 
18 */
19 
20 #include <unistd.h>
21 #include <iostream>
22 
23 #include "pbd/stacktrace.h"
24 #include "pbd/abstract_ui.h"
25 #include "pbd/pthread_utils.h"
26 #include "pbd/failed_constructor.h"
27 #include "pbd/debug.h"
28 
29 #include "i18n.h"
30 
31 #ifdef COMPILER_MSVC
32 #include <ardourext/misc.h> // Needed for 'DECLARE_DEFAULT_COMPARISONS'. Objects in an STL container can be
33  // searched and sorted. Thus, when instantiating the container, MSVC complains
34  // if the type of object being contained has no appropriate comparison operators
35  // defined (specifically, if operators '<' and '==' are undefined). This seems
36  // to be the case with ptw32 'pthread_t' which is a simple struct.
37 DECLARE_DEFAULT_COMPARISONS(ptw32_handle_t)
38 #endif
39 
40 using namespace std;
41 
42 template<typename RequestBuffer> void
44 {
45  RequestBuffer* rb = (RequestBuffer*) ptr;
46 
47  /* this is called when the thread for which this request buffer was
48  * allocated dies. That could be before or after the end of the UI
49  * event loop for which this request buffer provides communication.
50  *
51  * We are not modifying the UI's thread/buffer map, just marking it
52  * dead. If the UI is currently processing the buffers and misses
53  * this "dead" signal, it will find it the next time it receives
54  * a request. If the UI has finished processing requests, then
55  * we will leak this buffer object.
56  */
57 
58  rb->dead = true;
59 }
60 
61 template<typename R>
62 Glib::Threads::Private<typename AbstractUI<R>::RequestBuffer> AbstractUI<R>::per_thread_request_buffer (cleanup_request_buffer<AbstractUI<R>::RequestBuffer>);
63 
64 template <typename RequestObject>
66  : BaseUI (name)
67 {
68  void (AbstractUI<RequestObject>::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI<RequestObject>::register_thread;
69 
70  /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and
71  register_thread() is thread safe anyway.
72  */
73 
74  PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4));
75 }
76 
77 template <typename RequestObject> void
78 AbstractUI<RequestObject>::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests)
79 {
80  /* the calling thread wants to register with the thread that runs this
81  * UI's event loop, so that it will have its own per-thread queue of
82  * requests. this means that when it makes a request to this UI it can
83  * do so in a realtime-safe manner (no locks).
84  */
85 
86  if (target_gui != name()) {
87  /* this UI is not the UI that the calling thread is trying to
88  register with
89  */
90  return;
91  }
92 
93  /* the per_thread_request_buffer is a thread-private variable.
94  See pthreads documentation for more on these, but the key
95  thing is that it is a variable that as unique value for
96  each thread, guaranteed.
97  */
98 
99  RequestBuffer* b = per_thread_request_buffer.get();
100 
101  if (b) {
102  /* thread already registered with this UI
103  */
104  return;
105  }
106 
107  /* create a new request queue/ringbuffer */
108 
109  b = new RequestBuffer (num_requests, *this);
110 
111  {
112  /* add the new request queue (ringbuffer) to our map
113  so that we can iterate over it when the time is right.
114  This step is not RT-safe, but is assumed to be called
115  only at thread initialization time, not repeatedly,
116  and so this is of little consequence.
117  */
118  Glib::Threads::Mutex::Lock lm (request_buffer_map_lock);
119  request_buffers[thread_id] = b;
120  }
121 
122  /* set this thread's per_thread_request_buffer to this new
123  queue/ringbuffer. remember that only this thread will
124  get this queue when it calls per_thread_request_buffer.get()
125 
126  the second argument is a function that will be called
127  when the thread exits, and ensures that the buffer is marked
128  dead. it will then be deleted during a call to handle_ui_requests()
129  */
130 
131  per_thread_request_buffer.set (b);
132 }
133 
134 template <typename RequestObject> RequestObject*
136 {
137  RequestBuffer* rbuf = per_thread_request_buffer.get ();
139 
140  /* see comments in ::register_thread() above for an explanation of
141  the per_thread_request_buffer variable
142  */
143 
144  if (rbuf != 0) {
145 
146  /* the calling thread has registered with this UI and therefore
147  * we have a per-thread request queue/ringbuffer. use it. this
148  * "allocation" of a request is RT-safe.
149  */
150 
151  rbuf->get_write_vector (&vec);
152 
153  if (vec.len[0] == 0) {
154  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: no space in per thread pool for request of type %2\n", name(), rt));
155  return 0;
156  }
157 
158  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated per-thread request of type %2, caller %3\n", name(), rt, pthread_name()));
159 
160  vec.buf[0]->type = rt;
161  vec.buf[0]->valid = true;
162  return vec.buf[0];
163  }
164 
165  /* calling thread has not registered, so just allocate a new request on
166  * the heap. the lack of registration implies that realtime constraints
167  * are not at work.
168  */
169 
170  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated normal heap request of type %2, caller %3\n", name(), rt, pthread_name()));
171 
172  RequestObject* req = new RequestObject;
173  req->type = rt;
174 
175  return req;
176 }
177 
178 template <typename RequestObject> void
180 {
183 
184  /* check all registered per-thread buffers first */
185 
186  request_buffer_map_lock.lock ();
187 
188  for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
189 
190  while (true) {
191 
192  /* we must process requests 1 by 1 because
193  the request may run a recursive main
194  event loop that will itself call
195  handle_ui_requests. when we return
196  from the request handler, we cannot
197  expect that the state of queued requests
198  is even remotely consistent with
199  the condition before we called it.
200  */
201 
202  i->second->get_read_vector (&vec);
203 
204  if (vec.len[0] == 0) {
205  break;
206  } else {
207  if (vec.buf[0]->valid) {
208  request_buffer_map_lock.unlock ();
209  do_request (vec.buf[0]);
210  request_buffer_map_lock.lock ();
211  if (vec.buf[0]->invalidation) {
212  vec.buf[0]->invalidation->requests.remove (vec.buf[0]);
213  }
214  delete vec.buf[0];
215  i->second->increment_read_ptr (1);
216  }
217  }
218  }
219  }
220 
221  /* clean up any dead request buffers (their thread has exited) */
222 
223  for (i = request_buffers.begin(); i != request_buffers.end(); ) {
224  if ((*i).second->dead) {
225  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 deleting dead per-thread request buffer for %3 @ %4\n",
226  name(), pthread_name(), i->second));
227  delete (*i).second;
228  RequestBufferMapIterator tmp = i;
229  ++tmp;
230  request_buffers.erase (i);
231  i = tmp;
232  } else {
233  ++i;
234  }
235  }
236 
237  request_buffer_map_lock.unlock ();
238 
239  /* and now, the generic request buffer. same rules as above apply */
240 
241  Glib::Threads::Mutex::Lock lm (request_list_lock);
242 
243  while (!request_list.empty()) {
244  RequestObject* req = request_list.front ();
245  request_list.pop_front ();
246 
247  /* We need to use this lock, because its the one
248  returned by slot_invalidation_mutex() and protects
249  against request invalidation.
250  */
251 
252  request_buffer_map_lock.lock ();
253  if (!req->valid) {
254  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 handling invalid heap request, type %3, deleting\n", name(), pthread_name(), req->type));
255  delete req;
256  request_buffer_map_lock.unlock ();
257  continue;
258  }
259 
260  /* we're about to execute this request, so its
261  too late for any invalidation. mark
262  the request as "done" before we start.
263  */
264 
265  if (req->invalidation) {
266  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 remove request from its invalidation list\n", name(), pthread_name()));
267 
268  /* after this call, if the object referenced by the
269  * invalidation record is deleted, it will no longer
270  * try to mark the request as invalid.
271  */
272 
273  req->invalidation->requests.remove (req);
274  }
275 
276  /* at this point, an object involved in a functor could be
277  * deleted before we actually execute the functor. so there is
278  * a race condition that makes the invalidation architecture
279  * somewhat pointless.
280  *
281  * really, we should only allow functors containing shared_ptr
282  * references to objects to enter into the request queue.
283  */
284 
285  request_buffer_map_lock.unlock ();
286 
287  /* unlock the request lock while we execute the request, so
288  * that we don't needlessly block other threads (note: not RT
289  * threads since they have their own queue) from making requests.
290  */
291 
292  lm.release ();
293 
294  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 execute request type %3\n", name(), pthread_name(), req->type));
295 
296  /* and lets do it ... this is a virtual call so that each
297  * specific type of UI can have its own set of requests without
298  * some kind of central request type registration logic
299  */
300 
301  do_request (req);
302 
303  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 delete heap request type %3\n", name(), pthread_name(), req->type));
304  delete req;
305 
306  /* re-acquire the list lock so that we check again */
307 
308  lm.acquire();
309  }
310 }
311 
312 template <typename RequestObject> void
314 {
315  /* This is called to ask a given UI to carry out a request. It may be
316  * called from the same thread that runs the UI's event loop (see the
317  * caller_is_self() case below), or from any other thread.
318  */
319 
320  if (base_instance() == 0) {
321  return; /* XXX is this the right thing to do ? */
322  }
323 
324  if (caller_is_self ()) {
325  /* the thread that runs this UI's event loop is sending itself
326  a request: we dispatch it immediately and inline.
327  */
328  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of request type %3\n", name(), pthread_name(), req->type));
329  do_request (req);
330  delete req;
331  } else {
332 
333  /* If called from a different thread, we first check to see if
334  * the calling thread is registered with this UI. If so, there
335  * is a per-thread ringbuffer of requests that ::get_request()
336  * just set up a new request in. If so, all we need do here is
337  * to advance the write ptr in that ringbuffer so that the next
338  * request by this calling thread will use the next slot in
339  * the ringbuffer. The ringbuffer has
340  * single-reader/single-writer semantics because the calling
341  * thread is the only writer, and the UI event loop is the only
342  * reader.
343  */
344 
345  RequestBuffer* rbuf = per_thread_request_buffer.get ();
346 
347  if (rbuf != 0) {
348  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send per-thread request type %3\n", name(), pthread_name(), req->type));
349  rbuf->increment_write_ptr (1);
350  } else {
351  /* no per-thread buffer, so just use a list with a lock so that it remains
352  single-reader/single-writer semantics
353  */
354  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send heap request type %3\n", name(), pthread_name(), req->type));
355  Glib::Threads::Mutex::Lock lm (request_list_lock);
356  request_list.push_back (req);
357  }
358 
359  /* send the UI event loop thread a wakeup so that it will look
360  at the per-thread and generic request lists.
361  */
362 
363  signal_new_request ();
364  }
365 }
366 
367 template<typename RequestObject> void
368 AbstractUI<RequestObject>::call_slot (InvalidationRecord* invalidation, const boost::function<void()>& f)
369 {
370  if (caller_is_self()) {
371  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of call slot via functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation));
372  f ();
373  return;
374  }
375 
376  RequestObject *req = get_request (BaseUI::CallSlot);
377 
378  if (req == 0) {
379  return;
380  }
381 
382  DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 queue call-slot using functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation));
383 
384  /* copy semantics: copy the functor into the request object */
385 
386  req->the_slot = f;
387 
388  /* the invalidation record is an object which will carry out
389  * invalidation of any requests associated with it when it is
390  * destroyed. it can be null. if its not null, associate this
391  * request with the invalidation record. this allows us to
392  * "cancel" requests submitted to the UI because they involved
393  * a functor that uses an object that is being deleted.
394  */
395 
396  req->invalidation = invalidation;
397 
398  if (invalidation) {
399  invalidation->requests.push_back (req);
400  invalidation->event_loop = this;
401  }
402 
403  send_request (req);
404 }
405 
void send_request(RequestObject *)
Definition: abstract_ui.cc:313
static RequestType CallSlot
Definition: base_ui.h:62
LIBPBD_API const char * pthread_name()
RequestObject * get_request(RequestType)
Definition: abstract_ui.cc:135
tuple f
Definition: signals.py:35
Definition: Beats.hpp:239
void register_thread(std::string, pthread_t, std::string, uint32_t num_requests)
Definition: abstract_ui.cc:78
std::list< BaseRequestObject * > requests
Definition: event_loop.h:54
void cleanup_request_buffer(void *ptr)
Definition: abstract_ui.cc:43
PBD::ScopedConnection new_thread_connection
Definition: abstract_ui.h:104
void set(size_t r, size_t w)
Definition: ringbufferNPT.h:54
#define DEBUG_TRACE(bits, str)
Definition: debug.h:55
AbstractUI(const std::string &name)
Definition: abstract_ui.cc:65
static Glib::Threads::Private< RequestBuffer > per_thread_request_buffer
Definition: abstract_ui.h:94
void call_slot(EventLoop::InvalidationRecord *, const boost::function< void()> &)
Definition: abstract_ui.cc:368
LIBPBD_API uint64_t AbstractUI
Definition: debug.cc:51
std::map< pthread_t, RequestBuffer * >::iterator RequestBufferMapIterator
Definition: abstract_ui.h:89
const char * name
void get_write_vector(rw_vector *)
void increment_write_ptr(size_t cnt)
Definition: ringbufferNPT.h:79
RequestBuffer::rw_vector RequestBufferVector
Definition: abstract_ui.h:76
void handle_ui_requests()
Definition: abstract_ui.cc:179
Definition: base_ui.h:45
std::string string_compose(const std::string &fmt, const T1 &o1)
Definition: compose.h:208
LIBPBD_API PBD::Signal4< void, std::string, pthread_t, std::string, uint32_t > ThreadCreatedWithRequestSize