Mercurial > projects > ldc
comparison tango/lib/common/tango/stdc/posix/pthread_darwin.d @ 132:1700239cab2e trunk
[svn r136] MAJOR UNSTABLE UPDATE!!!
Initial commit after moving to Tango instead of Phobos.
Lots of bugfixes...
This build is not suitable for most things.
author | lindquist |
---|---|
date | Fri, 11 Jan 2008 17:57:40 +0100 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
131:5825d48b27d1 | 132:1700239cab2e |
---|---|
1 /** | |
2 * D header file for POSIX. | |
3 * | |
4 * Copyright: Public Domain | |
5 * License: Public Domain | |
6 * Authors: Sean Kelly | |
7 * Standards: The Open Group Base Specifications Issue 6, IEEE Std 1003.1, 2004 Edition | |
8 */ | |
9 module tango.stdc.posix.pthread; | |
10 | |
11 public import tango.stdc.posix.sys.types; | |
12 public import tango.stdc.posix.sched; | |
13 public import tango.stdc.posix.time; | |
14 private import tango.stdc.stdlib; | |
15 | |
16 extern (C): | |
17 | |
18 // | |
19 // Required | |
20 // | |
21 | |
22 version( darwin ) | |
23 { | |
24 int pthread_cond_broadcast(pthread_cond_t*); | |
25 int pthread_cond_destroy(pthread_cond_t*); | |
26 int pthread_cond_init(pthread_cond_t*, pthread_condattr_t*); | |
27 //int pthread_cond_signal(pthread_cond_t*); | |
28 //int pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, timespec*); | |
29 int pthread_cond_wait(pthread_cond_t*, pthread_mutex_t*); | |
30 | |
31 int pthread_mutex_destroy(pthread_mutex_t*); | |
32 int pthread_mutex_init(pthread_mutex_t*, pthread_mutexattr_t*); | |
33 int pthread_mutex_lock(pthread_mutex_t*); | |
34 int pthread_mutex_trylock(pthread_mutex_t*); | |
35 int pthread_mutex_unlock(pthread_mutex_t*); | |
36 | |
37 //int pthread_rwlock_destroy(pthread_rwlock_t*); | |
38 //int pthread_rwlock_init(pthread_rwlock_t*, pthread_rwlockattr_t*); | |
39 //int pthread_rwlock_rdlock(pthread_rwlock_t*); | |
40 int pthread_rwlock_tryrdlock(pthread_rwlock_t*); | |
41 int pthread_rwlock_trywrlock(pthread_rwlock_t*); | |
42 //int pthread_rwlock_unlock(pthread_rwlock_t*); | |
43 //int pthread_rwlock_wrlock(pthread_rwlock_t*); | |
44 } | |
45 | |
46 // | |
47 // Barrier (BAR) | |
48 // | |
49 /* | |
50 PTHREAD_BARRIER_SERIAL_THREAD | |
51 | |
52 int pthread_barrier_destroy(pthread_barrier_t*); | |
53 int pthread_barrier_init(pthread_barrier_t*, pthread_barrierattr_t*, uint); | |
54 int pthread_barrier_wait(pthread_barrier_t*); | |
55 int pthread_barrierattr_destroy(pthread_barrierattr_t*); | |
56 int pthread_barrierattr_getpshared(pthread_barrierattr_t*, int*); (BAR|TSH) | |
57 int pthread_barrierattr_init(pthread_barrierattr_t*); | |
58 int pthread_barrierattr_setpshared(pthread_barrierattr_t*, int); (BAR|TSH) | |
59 */ | |
60 | |
61 version( darwin ) | |
62 { | |
63 const PTHREAD_BARRIER_SERIAL_THREAD = -1; | |
64 | |
65 // defined in tango.stdc.posix.pthread and redefined here | |
66 enum | |
67 { | |
68 PTHREAD_PROCESS_PRIVATE, | |
69 PTHREAD_PROCESS_SHARED | |
70 } | |
71 | |
72 int pthread_barrier_destroy( pthread_barrier_t* barrier ) | |
73 { | |
74 if( barrier is null ) | |
75 return EINVAL; | |
76 if( barrier.b_waiters > 0 ) | |
77 return EBUSY; | |
78 int mret = pthread_mutex_destroy( &barrier.b_lock ); | |
79 int cret = pthread_cond_destroy( &barrier.b_cond ); | |
80 free( barrier ); | |
81 return mret ? mret : cret; | |
82 } | |
83 | |
84 int pthread_barrier_init( pthread_barrier_t* barrier, | |
85 pthread_barrierattr_t* attr, | |
86 uint count ) | |
87 { | |
88 if( barrier is null || count <= 0 ) | |
89 return EINVAL; | |
90 | |
91 pthread_barrier_t* newbarrier = cast(pthread_barrier_t*) | |
92 malloc( pthread_barrier_t.sizeof ); | |
93 if( newbarrier is null ) | |
94 return ENOMEM; | |
95 | |
96 int ret; | |
97 if( ( ret = pthread_mutex_init( &newbarrier.b_lock, null ) ) != 0 ) | |
98 { | |
99 free( newbarrier ); | |
100 return ret; | |
101 } | |
102 if( ( ret = pthread_cond_init( &newbarrier.b_cond, null ) ) != 0 ) | |
103 { | |
104 pthread_mutex_destroy( &newbarrier.b_lock ); | |
105 free( newbarrier ); | |
106 return ret; | |
107 } | |
108 newbarrier.b_waiters = 0; | |
109 newbarrier.b_count = count; | |
110 newbarrier.b_generation = 0; | |
111 *barrier = *newbarrier; | |
112 | |
113 return 0; | |
114 } | |
115 | |
116 int pthread_barrier_wait( pthread_barrier_t* barrier ) | |
117 { | |
118 if( barrier is null ) | |
119 return EINVAL; | |
120 | |
121 int ret; | |
122 if( ( ret = pthread_mutex_lock( &barrier.b_lock ) ) != 0 ) | |
123 return ret; | |
124 | |
125 if( ++barrier.b_waiters == barrier.b_count ) | |
126 { | |
127 // current thread is lastest thread | |
128 barrier.b_generation++; | |
129 barrier.b_waiters = 0; | |
130 if( ( ret = pthread_cond_broadcast( &barrier.b_cond ) ) == 0 ) | |
131 ret = PTHREAD_BARRIER_SERIAL_THREAD; | |
132 } | |
133 else | |
134 { | |
135 int gen = barrier.b_generation; | |
136 do | |
137 { | |
138 ret = pthread_cond_wait( &barrier.b_cond, &barrier.b_lock ); | |
139 // test generation to avoid bogus wakeup | |
140 } while( ret == 0 && gen == barrier.b_generation ); | |
141 } | |
142 pthread_mutex_unlock( &barrier.b_lock ); | |
143 return ret; | |
144 } | |
145 | |
146 int pthread_barrierattr_destroy( pthread_barrierattr_t* attr ) | |
147 { | |
148 if( attr is null ) | |
149 return EINVAL; | |
150 free( attr ); | |
151 return 0; | |
152 } | |
153 | |
154 int pthread_barrierattr_getpshared( pthread_barrierattr_t* attr, int* pshared ) | |
155 { | |
156 if( attr is null ) | |
157 return EINVAL; | |
158 *pshared = attr.pshared; | |
159 return 0; | |
160 } | |
161 | |
162 int pthread_barrierattr_init( pthread_barrierattr_t* attr ) | |
163 { | |
164 if( attr is null ) | |
165 return EINVAL; | |
166 if( ( attr = cast(pthread_barrierattr_t*) | |
167 malloc( pthread_barrierattr_t.sizeof ) ) is null ) | |
168 return ENOMEM; | |
169 attr.pshared = PTHREAD_PROCESS_PRIVATE; | |
170 return 0; | |
171 } | |
172 | |
173 int pthread_barrierattr_setpshared( pthread_barrierattr_t* attr, int pshared ) | |
174 { | |
175 if( attr is null ) | |
176 return EINVAL; | |
177 // only PTHREAD_PROCESS_PRIVATE is supported | |
178 if( pshared != PTHREAD_PROCESS_PRIVATE ) | |
179 return EINVAL; | |
180 attr.pshared = pshared; | |
181 return 0; | |
182 } | |
183 } | |
184 | |
185 // | |
186 // Timeouts (TMO) | |
187 // | |
188 /* | |
189 int pthread_mutex_timedlock(pthread_mutex_t*, timespec*); | |
190 int pthread_rwlock_timedrdlock(pthread_rwlock_t*, timespec*); | |
191 int pthread_rwlock_timedwrlock(pthread_rwlock_t*, timespec*); | |
192 */ | |
193 | |
194 version( darwin ) | |
195 { | |
196 private | |
197 { | |
198 import tango.stdc.errno; | |
199 import tango.stdc.posix.unistd; | |
200 import tango.stdc.posix.sys.time; | |
201 | |
202 extern (D) | |
203 { | |
204 void timerclear( timeval* tvp ) | |
205 { | |
206 tvp.tv_sec = tvp.tv_usec = 0; | |
207 } | |
208 | |
209 bool timerisset( timeval* tvp ) | |
210 { | |
211 return tvp.tv_sec || tvp.tv_usec; | |
212 } | |
213 | |
214 bool timer_cmp_leq( timeval* tvp, timeval* uvp ) | |
215 { | |
216 return tvp.tv_sec == uvp.tv_sec ? | |
217 tvp.tv_usec <= uvp.tv_usec : | |
218 tvp.tv_sec <= uvp.tv_sec; | |
219 } | |
220 | |
221 void timeradd( timeval* tvp, timeval* uvp, timeval* vvp ) | |
222 { | |
223 vvp.tv_sec = tvp.tv_sec + uvp.tv_sec; | |
224 vvp.tv_usec = tvp.tv_usec + uvp.tv_usec; | |
225 if( vvp.tv_usec >= 1000000 ) | |
226 { | |
227 vvp.tv_sec++; | |
228 vvp.tv_usec -= 1000000; | |
229 } | |
230 } | |
231 | |
232 void timersub( timeval* tvp, timeval* uvp, timeval* vvp ) | |
233 { | |
234 vvp.tv_sec = tvp.tv_sec - uvp.tv_sec; | |
235 vvp.tv_usec = tvp.tv_usec - uvp.tv_usec; | |
236 if( vvp.tv_usec < 0 ) | |
237 { | |
238 vvp.tv_sec--; | |
239 vvp.tv_usec += 1000000; | |
240 } | |
241 } | |
242 | |
243 void TIMEVAL_TO_TIMESPEC( timeval* tv, timespec* ts ) | |
244 { | |
245 ts.tv_sec = tv.tv_sec; | |
246 ts.tv_nsec = tv.tv_usec * 1000; | |
247 } | |
248 | |
249 void TIMESPEC_TO_TIMEVAL( timeval* tv, timespec* ts ) | |
250 { | |
251 tv.tv_sec = ts.tv_sec; | |
252 tv.tv_usec = ts.tv_nsec / 1000; | |
253 } | |
254 } | |
255 } | |
256 | |
257 int pthread_mutex_timedlock( pthread_mutex_t* m, timespec* t ) | |
258 { | |
259 timeval currtime; | |
260 timeval maxwait; | |
261 TIMESPEC_TO_TIMEVAL( &maxwait, t ); | |
262 timeval waittime; | |
263 waittime.tv_usec = 100; | |
264 | |
265 while( timer_cmp_leq( &currtime, &maxwait ) ) | |
266 { | |
267 int ret = pthread_mutex_trylock( m ); | |
268 switch( ret ) | |
269 { | |
270 case 0: // locked successfully | |
271 return ret; | |
272 case EBUSY: // waiting | |
273 timeradd( &currtime, &waittime, &currtime ); | |
274 break; | |
275 default: | |
276 return ret; | |
277 } | |
278 usleep( waittime.tv_usec ); | |
279 } | |
280 return ETIMEDOUT; | |
281 } | |
282 | |
283 int pthread_rwlock_timedrdlock( pthread_rwlock_t *rwlock, timespec* t ) | |
284 { | |
285 timeval currtime; | |
286 timeval maxwait; | |
287 TIMESPEC_TO_TIMEVAL( &maxwait, t ); | |
288 timeval waittime; | |
289 waittime.tv_usec = 100; | |
290 | |
291 while( timer_cmp_leq( &currtime, &maxwait ) ) | |
292 { | |
293 int ret = pthread_rwlock_tryrdlock( rwlock ); | |
294 switch( ret ) | |
295 { | |
296 case 0: // locked successfully | |
297 return ret; | |
298 case EBUSY: // waiting | |
299 timeradd( &currtime, &waittime, &currtime ); | |
300 break; | |
301 default: | |
302 return ret; | |
303 } | |
304 usleep( waittime.tv_usec ); | |
305 } | |
306 return ETIMEDOUT; | |
307 } | |
308 | |
309 int pthread_rwlock_timedwrlock( pthread_rwlock_t* l, timespec* t ) | |
310 { | |
311 timeval currtime; | |
312 timeval maxwait; | |
313 TIMESPEC_TO_TIMEVAL( &maxwait, t ); | |
314 timeval waittime; | |
315 waittime.tv_usec = 100; | |
316 | |
317 while( timer_cmp_leq( &currtime, &maxwait ) ) | |
318 { | |
319 int ret = pthread_rwlock_trywrlock( l ); | |
320 switch( ret ) | |
321 { | |
322 case 0: // locked successfully | |
323 return ret; | |
324 case EBUSY: // waiting | |
325 timeradd( &currtime, &waittime, &currtime ); | |
326 break; | |
327 default: | |
328 return ret; | |
329 } | |
330 usleep( waittime.tv_usec ); | |
331 } | |
332 return ETIMEDOUT; | |
333 } | |
334 } |