1 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
  2
  3    Thread * Self = THREAD ;
  4
  5    if (THREAD != _owner) {
  6
  7      if (THREAD->is_lock_owned((address) _owner)) {
  8
  9        // Transmute _owner from a BasicLock pointer to a Thread address.
 10
 11        // We don't need to hold _mutex for this transition.
 12
 13        // Non-null to Non-null is safe as long as all readers can
 14
 15        // tolerate either flavor.
 16
 17        assert (_recursions == 0, "invariant") ;
 18
 19        _owner = THREAD ;
 20
 21        _recursions = 0 ;
 22
 23        OwnerIsThread = 1 ;
 24
 25      } else {
 26
 27        // NOTE: we need to handle unbalanced monitor enter/exit
 28
 29        // in native code by throwing an exception.
 30
 31        // TODO: Throw an IllegalMonitorStateException ?
 32
 33        TEVENT (Exit - Throw IMSX) ;
 34
 35        assert(false, "Non-balanced monitor enter/exit!");
 36
 37        if (false) {
 38
 39           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 40
 41        }
 42
 43        return;
 44
 45      }
 46
 47    }
 48
 49    此处是偏向锁逻辑,偏向次数减一后直接返回
 50    if (_recursions != 0) {
 51
 52      _recursions--;        // this is simple recursive enter
 53
 54      TEVENT (Inflated exit - recursive) ;
 55
 56      return ;
 57
 58    }
 59
 60
 61    // Invariant: after setting Responsible=null an thread must execute
 62
 63    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 64
 65    if ((SyncFlags & 4) == 0) {
 66
 67       _Responsible = NULL ;
 68
 69    }
 70
 71
 72 #if INCLUDE_TRACE
 73
 74    // get the owner's thread id for the MonitorEnter event
 75
 76    // if it is enabled and the thread isn't suspended
 77
 78    if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
 79
 80      _previous_owner_tid = SharedRuntime::get_java_tid(Self);
 81
 82    }
 83
 84 #endif
 85
 86
 87    for (;;) {
 88
 89       assert (THREAD == _owner, "invariant") ;
 90
 91
 92
 93       if (Knob_ExitPolicy == 0) {
 94
 95          // release semantics: prior loads and stores from within the critical section
 96
 97          // must not float (reorder) past the following store that drops the lock.
 98
 99          // On SPARC that requires MEMBAR #loadstore|#storestore.
100
101          // But of course in TSO #loadstore|#storestore is not required.
102
103          // I'd like to write one of the following:
104
105          // A.  OrderAccess::release() ; _owner = NULL
106
107          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
108
109          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
110
111          // store into a _dummy variable.  That store is not needed, but can result
112
113          // in massive wasteful coherency traffic on classic SMP systems.
114
115          // Instead, I use release_store(), which is implemented as just a simple
116
117          // ST on x64, x86 and SPARC.
118
119          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
120
121          OrderAccess::storeload() ;                         // See if we need to wake a successor
122
123          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
124
125             TEVENT (Inflated exit - simple egress) ;
126
127             return ;
128
129          }
130
131          TEVENT (Inflated exit - complex egress) ;
132
133
134          // Normally the exiting thread is responsible for ensuring succession,
135
136          // but if other successors are ready or other entering threads are spinning
137
138          // then this thread can simply store NULL into _owner and exit without
139
140          // waking a successor.  The existence of spinners or ready successors
141
142          // guarantees proper succession (liveness).  Responsibility passes to the
143
144          // ready or running successors.  The exiting thread delegates the duty.
145
146          // More precisely, if a successor already exists this thread is absolved
147
148          // of the responsibility of waking (unparking) one.
149
150          //
151
152          // The _succ variable is critical to reducing futile wakeup frequency.
153
154          // _succ identifies the "heir presumptive" thread that has been made
155
156          // ready (unparked) but that has not yet run.  We need only one such
157
158          // successor thread to guarantee progress.
159
160          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
161
162          // section 3.3 "Futile Wakeup Throttling" for details.
163
164          //
165
166          // Note that spinners in Enter() also set _succ non-null.
167
168          // In the current implementation spinners opportunistically set
169
170          // _succ so that exiting threads might avoid waking a successor.
171
172          // Another less appealing alternative would be for the exiting thread
173
174          // to drop the lock and then spin briefly to see if a spinner managed
175
176          // to acquire the lock.  If so, the exiting thread could exit
177
178          // immediately without waking a successor, otherwise the exiting
179
180          // thread would need to dequeue and wake a successor.
181
182          // (Note that we'd need to make the post-drop spin short, but no
183
184          // shorter than the worst-case round-trip cache-line migration time.
185
186          // The dropped lock needs to become visible to the spinner, and then
187
188          // the acquisition of the lock by the spinner must become visible to
189
190          // the exiting thread).
191
192          //
193
194
195          // It appears that an heir-presumptive (successor) must be made ready.
196
197          // Only the current lock owner can manipulate the EntryList or
198
199          // drain _cxq, so we need to reacquire the lock.  If we fail
200
201          // to reacquire the lock the responsibility for ensuring succession
202
203          // falls to the new owner.
204
205          //
206
207          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
208
209             return ;
210
211          }
212
213          TEVENT (Exit - Reacquired) ;
214
215       } else {
216
217          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
218
219             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
220
221             OrderAccess::storeload() ;
222
223             // Ratify the previously observed values.
224
225             if (_cxq == NULL || _succ != NULL) {
226
227                 TEVENT (Inflated exit - simple egress) ;
228
229                 return ;
230
231             }
232
233
234             // inopportune interleaving -- the exiting thread (this thread)
235
236             // in the fast-exit path raced an entering thread in the slow-enter
237
238             // path.
239
240             // We have two choices:
241
242             // A.  Try to reacquire the lock.
243
244             //     If the CAS() fails return immediately, otherwise
245
246             //     we either restart/rerun the exit operation, or simply
247
248             //     fall-through into the code below which wakes a successor.
249
250             // B.  If the elements forming the EntryList|cxq are TSM
251
252             //     we could simply unpark() the lead thread and return
253
254             //     without having set _succ.
255
256             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
257
258                TEVENT (Inflated exit - reacquired succeeded) ;
259
260                return ;
261
262             }
263
264             TEVENT (Inflated exit - reacquired failed) ;
265
266          } else {
267
268             TEVENT (Inflated exit - complex egress) ;
269
270          }
271
272       }
273
274
275       guarantee (_owner == THREAD, "invariant") ;
276
277
278       ObjectWaiter * w = NULL ;
279
280       int QMode = Knob_QMode ;
281
282      QMode = 2,并且_cxq非空:取_cxq队列排头位置的ObjectWaiter对象,调用ExitEpilog方法,该方法会唤醒ObjectWaiter对象的线程,此处会立即返回,后面的代码不会执行了
283       if (QMode == 2 && _cxq != NULL) {
284
285           // QMode == 2 : cxq has precedence over EntryList.
286
287           // Try to directly wake a successor from the cxq.
288
289           // If successful, the successor will need to unlink itself from cxq.
290
291           w = _cxq ;
292
293           assert (w != NULL, "invariant") ;
294
295           assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
296
297           ExitEpilog (Self, w) ;
298
299           return ;
300
301       }
302
303     QMode = 3,并且_cxq非空:把_cxq队列首元素放入_EntryList的尾部;
304       if (QMode == 3 && _cxq != NULL) {
305
306           // Aggressively drain cxq into EntryList at the first opportunity.
307
308           // This policy ensure that recently-run threads live at the head of EntryList.
309
310           // Drain _cxq into EntryList - bulk transfer.
311
312           // First, detach _cxq.
313
314           // The following loop is tantamount to: w = swap (&cxq, NULL)
315
316           w = _cxq ;
317
318           for (;;) {
319
320              assert (w != NULL, "Invariant") ;
321
322              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
323
324              if (u == w) break ;
325
326              w = u ;
327
328           }
329
330           assert (w != NULL              , "invariant") ;
331
332
333           ObjectWaiter * q = NULL ;
334
335           ObjectWaiter * p ;
336
337           for (p = w ; p != NULL ; p = p->_next) {
338
339               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
340
341               p->TState = ObjectWaiter::TS_ENTER ;
342
343               p->_prev = q ;
344
345               q = p ;
346
347           }
348
349
350           // Append the RATs to the EntryList
351
352           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
353
354           ObjectWaiter * Tail ;
355
356           for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
357
358           if (Tail == NULL) {
359
360               _EntryList = w ;
361
362           } else {
363
364               Tail->_next = w ;
365
366               w->_prev = Tail ;
367
368           }
369
370
371           // Fall thru into code that tries to wake a successor from EntryList
372
373       }
374
375     Mode = 4,并且_cxq非空:把_cxq队列首元素放入_EntryList的头部;
376       if (QMode == 4 && _cxq != NULL) {
377
378           // Aggressively drain cxq into EntryList at the first opportunity.
379
380           // This policy ensure that recently-run threads live at the head of EntryList.
381
382
383           // Drain _cxq into EntryList - bulk transfer.
384
385           // First, detach _cxq.
386
387           // The following loop is tantamount to: w = swap (&cxq, NULL)
388
389           w = _cxq ;
390
391           for (;;) {
392
393              assert (w != NULL, "Invariant") ;
394
395              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
396
397              if (u == w) break ;
398
399              w = u ;
400
401           }
402
403           assert (w != NULL              , "invariant") ;
404
405
406           ObjectWaiter * q = NULL ;
407
408           ObjectWaiter * p ;
409
410           for (p = w ; p != NULL ; p = p->_next) {
411
412               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
413
414               p->TState = ObjectWaiter::TS_ENTER ;
415
416               p->_prev = q ;
417
418               q = p ;
419
420           }
421
422
423           // Prepend the RATs to the EntryList
424
425           if (_EntryList != NULL) {
426
427               q->_next = _EntryList ;
428
429               _EntryList->_prev = q ;
430
431           }
432
433           _EntryList = w ;
434
435
436           // Fall thru into code that tries to wake a successor from EntryList
437
438       }
439
440
441       w = _EntryList  ;
442
443       if (w != NULL) {
444
445           // I'd like to write: guarantee (w->_thread != Self).
446
447           // But in practice an exiting thread may find itself on the EntryList.
448
449           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
450
451           // then calls exit().  Exit release the lock by setting O._owner to NULL.
452
453           // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
454
455           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
456
457           // release the lock "O".  T2 resumes immediately after the ST of null into
458
459           // _owner, above.  T2 notices that the EntryList is populated, so it
460
461           // reacquires the lock and then finds itself on the EntryList.
462
463           // Given all that, we have to tolerate the circumstance where "w" is
464
465           // associated with Self.
466
467           assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
468
469           ExitEpilog (Self, w) ;
470
471           return ;
472
473       }
474
475
476       // If we find that both _cxq and EntryList are null then just
477
478       // re-run the exit protocol from the top.
479
480       w = _cxq ;
481
482       if (w == NULL) continue ;
483
484
485       // Drain _cxq into EntryList - bulk transfer.
486
487       // First, detach _cxq.
488
489       // The following loop is tantamount to: w = swap (&cxq, NULL)
490
491       for (;;) {
492
493           assert (w != NULL, "Invariant") ;
494
495           ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
496
497           if (u == w) break ;
498
499           w = u ;
500
501       }
502
503       TEVENT (Inflated exit - drain cxq into EntryList) ;
504
505
506       assert (w != NULL              , "invariant") ;
507
508       assert (_EntryList  == NULL    , "invariant") ;
509
510
511       // Convert the LIFO SLL anchored by _cxq into a DLL.
512
513       // The list reorganization step operates in O(LENGTH(w)) time.
514
515       // It's critical that this step operate quickly as
516
517       // "Self" still holds the outer-lock, restricting parallelism
518
519       // and effectively lengthening the critical section.
520
521       // Invariant: s chases t chases u.
522
523       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
524
525       // we have faster access to the tail.
526
527
528       if (QMode == 1) {
529
530          // QMode == 1 : drain cxq to EntryList, reversing order
531
532          // We also reverse the order of the list.
533
534          ObjectWaiter * s = NULL ;
535
536          ObjectWaiter * t = w ;
537
538          ObjectWaiter * u = NULL ;
539
540          while (t != NULL) {
541
542              guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
543
544              t->TState = ObjectWaiter::TS_ENTER ;
545
546              u = t->_next ;
547
548              t->_prev = u ;
549
550              t->_next = s ;
551
552              s = t;
553
554              t = u ;
555
556          }
557
558          _EntryList  = s ;
559
560          assert (s != NULL, "invariant") ;
561
562       } else {
563
564          // QMode == 0 or QMode == 2
565
566          _EntryList = w ;
567
568          ObjectWaiter * q = NULL ;
569
570          ObjectWaiter * p ;
571
572          for (p = w ; p != NULL ; p = p->_next) {
573
574              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
575
576              p->TState = ObjectWaiter::TS_ENTER ;
577
578              p->_prev = q ;
579
580              q = p ;
581
582          }
583
584       }
585
586
587       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
588
589       // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
590
591
592       // See if we can abdicate to a spinner instead of waking a thread.
593
594       // A primary goal of the implementation is to reduce the
595
596       // context-switch rate.
597
598       if (_succ != NULL) continue;
599
600
601       w = _EntryList  ;
602
603       if (w != NULL) {
604
605           guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
606
607           ExitEpilog (Self, w) ;
608
609           return ;
610
611       }
612
613    }
614
615 }

总结释放锁内容:

1. 偏向锁逻辑;

2. 根据QMode的不同,将ObjectWaiter从_cxq或者_EntryList中取出后唤醒;

3. 唤醒的元素会继续执行挂起前的代码,按照我们之前的分析,线程唤醒后,就会通过CAS去竞争锁;

  1. 根据QMode的不同,将ObjectWaiter从_cxq或者_EntryList中取出后唤醒;
01-08 12:27