001/*******************************************************************************
002 * Copyright (c) 2024, 2026, Olivier Ayache.  All rights reserved.
003 *
004 * This file is part of AVPKit.
005 *
006 * AVPKit is free software: you can redistribute it and/or modify
007 * it under the terms of the GNU Lesser General Public License as published by
008 * the Free Software Foundation, either version 3 of the License, or
009 * (at your option) any later version.
010 *
011 * AVPKit is distributed in the hope that it will be useful,
012 * but WITHOUT ANY WARRANTY; without even the implied warranty of
013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
014 * GNU Lesser General Public License for more details.
015 *
016 * You should have received a copy of the GNU Lesser General Public License
017 * along with AVPKit.  If not, see <http://www.gnu.org/licenses/>.
018 *******************************************************************************/
019
020package com.avpkit.ferry;
021
022
023import java.lang.ref.ReferenceQueue;
024import java.nio.ByteBuffer;
025import java.util.concurrent.atomic.AtomicBoolean;
026import java.util.concurrent.locks.Lock;
027import java.util.concurrent.locks.ReentrantLock;
028
029import org.slf4j.Logger;
030import org.slf4j.LoggerFactory;
031
032/**
033 * Manages the native memory that Ferry objects allocate and destroy.
034 * <p>
035 * Native memory isn't nicely garbage collected like Java memory is, and
036 * managing it can be challenging to those not familiar with it. Ferry is all
037 * about making native objects behave nicely in Java, and in order to have Ferry
038 * make objects <i>look</i> like Java objects, the {@link JNIMemoryManager} does
039 * some black magic to ensure native memory is released behind the scenes.
040 * </p>
041 * <p>
042 * To do this by default Ferry uses a Robust mechanism for ensuring native
043 * memory is released, but that comes at the expense of some Speed. This
044 * approach is tunable though.
045 * </p>
046 * <p>
047 * if you run a Java Profiler and see your application is spending a lot of time
048 * copying on incremental collections, or you need to eke out a few more
049 * microseconds of speed, or you're bored, then it's time to experiment with
050 * different {@link MemoryModel} configurations that Ferry supports by calling
051 * {@link #setMemoryModel(MemoryModel)}. This is pretty advanced stuff though,
052 * so be warned.
053 * </p>
054 * <p>
055 * Read {@link MemoryModel} for more.
056 * </p>
057 * 
058 * @see MemoryModel
059 * @author aclarke
060 * 
061 */
062public final class JNIMemoryManager
063{
064  /**
065   * The different types of native memory allocation models Ferry supports. <h2>
066   * Memory Model Performance Implications</h2> Choosing the {@link MemoryModel}
067   * you use in Ferry libraries can have a big effect. Some models emphasize
068   * code that will work "as you expect" (Robustness), but sacrifice some
069   * execution speed to make that happen. Other models value speed first, and
070   * assume you know what you're doing and can manage your own memory.
071   * <p>
072   * In our experience the set of people who need robust software is larger than
073   * the set of people who need the (small) speed price paid, and so we default
074   * to the most robust model.
075   * </p>
076   * <p>
077   * Also in our experience, the set of people who really should just use the
078   * robust model, but instead think they need speed is much larger than the set
079   * of people who actually know what they're doing with java memory management,
080   * so please, <strong>we strongly recommend you start with a robust model and
081   * only change the {@link MemoryModel} if your performance testing shows you
082   * need speed.</strong> Don't say we didn't warn you.
083   * </p>
084   * 
085   * <table>
086   * <tr>
087   * <th>Model</th>
088   * <th>Robustness</th>
089   * <th>Speed</th>
090   * </tr>
091   * 
092   * <tr>
093   * <td> {@link #JAVA_STANDARD_HEAP} (default)</td>
094   * <td>+++++</td>
095   * <td>+</td>
096   * </tr>
097   * 
098   * <tr>
099   * <td> {@link #JAVA_DIRECT_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION}</td>
100   * <td>+++</td>
101   * <td>++</td>
102   * </tr>
103   * 
104   * <tr>
105   * <td> {@link #NATIVE_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION}</td>
106   * <td>+++</td>
107   * <td>+++</td>
108   * </tr>
109   * 
110   * <tr>
111   * <td> {@link #JAVA_DIRECT_BUFFERS} (not recommended)</td>
112   * <td>+</td>
113   * <td>++++</td>
114   * </tr>
115   * 
116   * <tr>
117   * <td> {@link #NATIVE_BUFFERS}</td>
118   * <td>+</td>
119   * <td>+++++</td>
120   * </tr>
121   * 
122   * </table>
123   * <h2>What is &quot;Robustness&quot;?</h2>
124   * <p>
125   * Ferry objects have to allocate native memory to do their job -- it's the
126   * reason for Ferry's existence. And native memory management is very
127   * different than Java memory management (for example, native C++ code doesn't
128   * have a garbage collector). To make things easier for our Java friends,
129   * Ferry tries to make Ferry objects look like Java objects.
130   * </p>
131   * <p>
132   * Which leads us to robustness. The more of these criteria we can hit with a
133   * {@link MemoryModel} the more robust it is.
134   * </p>
135   * <ol>
136   * 
137   * <li><strong>Allocation</strong>: Calls to <code>make()</code> must
138   * correctly allocate memory that can be accessed from native or Java code and
139   * calls to <code>delete()</code> must release that memory immediately.</li>
140   * 
141   * <li><strong>Collection</strong>: Objects no longer referenced in Java
142   * should have their underlying native memory released in a timely fashion.</li>
143   * 
144   * <li><strong>Low Memory</strong>: New allocation in low memory conditions
145   * should first have the Java garbage collector release any old objects.</li>
146   * 
147   * </ol>
148   * <h2>What is &quot;Speed&quot;?</h2>
149   * <p>
150   * Speed is how fast code executes under normal operating conditions. This is
151   * more subjective than it sounds, as how do you define normal operation
152   * conditions? But in general, we define it as &quot;generally plenty of heap
153   * space available&quot;
154   * </p>
155   * 
156   * <h2>How Does JNIMemoryManager Work?</h2>
157   * <p>
158   * Every object that is exposed from native code inherits from
159   * {@link com.avpkit.ferry.RefCounted}.
160   * </p>
161   * <p>
162   * Ferry works by implementing a reference-counted memory management scheme
163   * in native code that is then manipulated from Java so you don't have to
164   * (usually) think about when to release native memory. Every time an object
165   * is created in native memory it has its reference count incremented by one;
166   * and everywhere inside the code we take care to release a reference when
167   * we're done.
168   * </p>
169   * <p>
170   * This maps nicely to the Java model of memory management, but with the
171   * benefit that Java does all the releasing behind the scenes. When you pass
172   * an object from Native code to Java, Ferry makes sure it has a reference
173   * count incremented, and then when the Java Virtual Machine collects the
174   * instance, Ferry automatically decrements the reference it in native code.
175   * </p>
176   * <p>
177   * In fact, in theory all you need to do is make a finalize() method on the
178   * Java object that decrements the reference count in the native code and
179   * everyone goes home happy.
180   * </p>
181   * <p>
182   * So far so good, but it brings up a big problem:
183   * <ul>
184   * <li>
185   * Turns out that video, audio and packets can be fairly large objects. For
186   * example, a 640x480 YUV420P decoded video frame object will take up around
187   * 500K of memory. If those are allocated from native code, Java has no idea
188   * it got allocated; in fact the corresponding Java object will seem to only
189   * take up a few bytes of memory. Keep enough video frames around, and your
190   * Java process (that you expect to run in 64 Megs of heap memory) starts to
191   * consume large amounts of native memory. Not good.</li>
192   * <li>
193   * The Java virtual machine only collects garbage when it thinks it needs the
194   * space. However, because native code allocated the large chunks of memory,
195   * Java doesn't know that memory is being used. So it doesn't collect unused
196   * references, which if Ferry just used "finalize()" would mean that lots of
197   * unused memory might exist that clog up your system.</li>
198   * <li>
199   * Lastly, even if Java does do a garbage collection, it must make sure that
200   * all methods that have a finalize() method are first collected and put in a
201   * "safe-area" that awaits a second collection. On the second collection call,
202   * it starts calling finalize() on all those objects, but (don't ask why just
203   * trust us) if needs to dedicate a separate finalizer thread to this process.
204   * The result of this is if you allocate a lot of objects quickly, the
205   * finalizer thread can start to fall very far behind.</li>
206   * </ul>
207   * Now, aren't you sorry you asked. Here's the good news; The
208   * {@link com.avpkit.ferry.RefCounted} implementation solves all these
209   * problems for you.
210   * <p>
211   * How you ask:
212   * </p>
213   * <ul>
214   * <li>
215   * We use Java Weak References to determine if a native object is no longer
216   * used in Java. Ferry objects allocated from native code do not finalizers.
217   * </li>
218   * <li>
219   * Then every-time you create a new Ferry object, we first make sure we do a
220   * mini-collection of all unused Ferry objects and release that native
221   * memory.</li>
222   * <li>
223   * Then, each Ferry object also maintains a reference to another object that
224   * DOES have a finalize() method and the only thing that method does is make
225   * sure another mini-collection is done. That way we can make sure memory is
226   * freed even if you never do another Ferry allocation.</li>
227   * <li>
228   * Lastly, we make sure that whenever we need large chunks of memory (for
229   * IPacket, IFrame and IAudioSamples interfaces) we can allocate those objects
230   * from Java, so Java ALWAYS knows just how much memory it's using.</li>
231   * </ul>
232   * The end result: you usually don't need to worry. </p>
233   * <p>
234   * In the event you need to manage memory more expicitly, every Ferry object
235   * has a "copyReference()" method that will create a new Java object that
236   * points to the same underlying native object.
237   * <p>
238   * And In the unlikely event you want to control EXACTLY when a native object
239   * is released, each Ferry object has a {@link RefCounted#delete()} method
240   * that you can use. Once you call "delete()", you must ENSURE your object is
241   * never referenced again from that Java object -- Ferry tries to help you
242   * avoid crashes if you accidentally use an object after deletion but on this
243   * but we cannot offer 100% protection (specifically if another thread is
244   * accessing that object EXACTLY when you {@link RefCounted#delete()} it). If
245   * you don't call {@link RefCounted#delete()}, we will call it at some point
246   * in the future, but you can't depend on when (and depending on the
247   * {@link MemoryModel} you are using, we may not be able to do it promptly).
248   * </p>
249   * <h2>What does all of this mean?</h2>
250   * <p>
251   * Well, it means if you're first writing code, don't worry about this. If
252   * you're instead trying to optimize for performance, first measure where your
253   * problems are, and if fingers are pointing at allocation in Ferry then start
254   * trying different models.
255   * </p>
256   * <p>
257   * But before you switch models, be sure to read the caveats and restrictions
258   * on each of the non {@link #JAVA_STANDARD_HEAP} models, and make sure you
259   * have a good understanding of how <a
260   * href="http://java.sun.com/docs/hotspot/gc5.0/gc_tuning_5.html"> Java
261   * Garbage Collection</a> works.
262   * </p>
263   * 
264   * @author aclarke
265   * 
266   */
267  public enum MemoryModel
268  {
269    /**
270     * <p>
271     * Large memory blocks are allocated in Java byte[] arrays, and passed back
272     * into native code. Releasing of underlying native resources happens behind
273     * the scenes with no management required on the programmer's part.
274     * </p>
275     * </p> <h2>Speed</h2>
276     * <p>
277     * This is the slowest model available.
278     * </p>
279     * <p>
280     * The main decrease in speed occurs for medium-life-span objects. Short
281     * life-span objects (objects that die during the life-span of an
282     * incremental collection) are relatively efficient. Once an object makes it
283     * into the Tenured generation in Java, then unnecessary copying stops until
284     * the next full collection.
285     * </p>
286     * <p>
287     * However while in the Eden generation but surviving between incremental
288     * collections, large native buffers may get copied many times
289     * unnecessarily. This copying can have a significant performance impact.
290     * </p>
291     * <h2>Robustness</h2>
292     * <ol>
293     * 
294     * <li><strong>Allocation</strong>: Works as expected.</li>
295     * 
296     * <li><strong>Collection</strong>: Released either when
297     * <code>delete()</code> is called, the item is marked for collection, or
298     * we're in Low Memory conditions and the item is unused.</li>
299     * 
300     * <li><strong>Low Memory</strong>: Very strong. In this model Java always
301     * knows exactly how much native heap space is being used, and can trigger
302     * collections at the right time.</li>
303     * 
304     * </ol>
305     * 
306     * <h2>Tuning Tips</h2>
307     * <p>
308     * When using this model, these tips may increase performance, although in
309     * some situations, may instead decrease your performance. Always measure.
310     * </p>
311     * <ul>
312     * <li>Try different garbage collectors in Java. To try the parallel
313     * incremental collector, start your Java process with: these options:
314     * 
315     * <pre>
316     * -XX:+UseParallelGC
317     * </pre>
318     * 
319     * The concurrent garbage collector works well too. To use that pass these
320     * options to java on startup:
321     * 
322     * <pre>
323     * -XX:+UseConcMarkSweepGC -XX:+UseParNewGC
324     * </pre>
325     * 
326     * </li>
327     * <li>If you are not re-using objects across Ferry calls, ensure your
328     * objects are short-lived; null out references when done.</li>
329     * <li>Potentially try caching objects and reusing large objects across
330     * multiple calls -- this may give those objects time to move into the
331     * Tenured generation and reduce the copying overhead.</li>
332     * <li>Explicitly manage Ferry memory yourself by calling
333     * <code>delete()</code> on every {@link RefCounted} object when done with
334     * your objects to let Java know it doesn't need to copy the item across a
335     * collection. You can also use <code>copyReference()</code> to get a new
336     * Java version of the same Ferry object that you can pass to another thread
337     * if you don't know when <code>delete()</code> can be safely called.</li>
338     * 
339     * <li>Try a different {@link MemoryModel}.</li>
340     * </ul>
341     */
342    JAVA_STANDARD_HEAP(0),
343
344    /**
345     * Large memory blocks are allocated as Direct {@link ByteBuffer} objects
346     * (as returned from {@link ByteBuffer#allocateDirect(int)}).
347     * <p>
348     * This model is not recommended. It is faster than
349     * {@link #JAVA_STANDARD_HEAP}, but because of how Sun implements direct
350     * buffers, it works poorly in low memory conditions. This model has all the
351     * caveats of the {@link #NATIVE_BUFFERS} model, but allocation is slightly
352     * slower.
353     * </p>
354     * <h2>Speed</h2>
355     * <p>
356     * This is the 2nd fastest model available. In tests it is generally 20-30%
357     * faster than the {@link #JAVA_STANDARD_HEAP} model.
358     * </p>
359     * 
360     * </p>
361     * <p>
362     * It is using Java to allocate direct memory, which is slightly slower than
363     * using {@link #NATIVE_BUFFERS}, but much faster than using the
364     * {@link #JAVA_STANDARD_HEAP} model.
365     * </p>
366     * <p>
367     * The downside is that for high-performance applications, you may need to
368     * explicitly manage {@link RefCounted} object life-cycles with
369     * {@link RefCounted#delete()} to ensure direct memory is released in a
370     * timely manner.
371     * </p>
372     * <h2>Robustness</h2>
373     * <ol>
374     * 
375     * <li><strong>Allocation</strong>: Weak. Java controls allocations of
376     * direct memory from a separate heap (yet another one), and has an
377     * additional tuning option to set that. By default on most JVMs, this heap
378     * size is set to 64mb which is very low for video processing (queue up 100
379     * images and see what we mean).</li>
380     * 
381     * <li><strong>Collection</strong>: Released either when
382     * <code>delete()</code> is called, or when the item is marked for
383     * collection</li>
384     * 
385     * <li><strong>Low Memory</strong>: Weak. In this model Java knows how much
386     * <strong>direct</strong> memory it has allocated, but it does not use the
387     * size of the Direct Heap to influence when it collects the normal
388     * non-direct Java Heap -- and our allocation scheme depends on normal Java
389     * Heap collection. Therefore it can fail to run collections in a timely
390     * manner because it thinks the standard heap has plenty of space to grow.
391     * This may cause failures.</li>
392     * 
393     * </ol>
394     * 
395     * <h2>Tuning Tips</h2>
396     * <p>
397     * When using this model, these tips may increase performance, although in
398     * some situations, may instead decrease performance. Always measure.
399     * </p>
400     * <ul>
401     * <li>Increase the size of Sun's Java's direct buffer heap. Sun's Java
402     * implementation has an artificially low default separate heap for direct
403     * buffers (64mb). To make it higher pass this option to Java at startup:
404     * 
405     * <pre>
406     * -XX:MaxDirectMemorySize=&lt;size&gt;
407     * </pre>
408     * 
409     * </li>
410     * <li>Paradoxically, try decreasing the size of your Java Heap if you get
411     * {@link OutOfMemoryError} exceptions. Objects that are allocated in native
412     * memory have a small proxy object representing them in the Java Heap. By
413     * decreasing your heap size, those proxy objects will exert more collection
414     * pressure, and hopefully cause Java to do incremental collections more
415     * often (and notice your unused objects). To set the maximum size of your
416     * java heap, pass this option to java on startup:
417     * 
418     * <pre>
419     * -Xmx&lt;size&gt;
420     * </pre>
421     * 
422     * To change the minimum size of your java heap, pass this option to java on
423     * startup:
424     * 
425     * <pre>
426     * -Xms&lt;size&gt;
427     * </pre>
428     * 
429     * </li>
430     * <li>
431     * <li>Try different garbage collectors in Java. To try the parallel
432     * incremental collector, start your Java process with: these options:
433     * 
434     * <pre>
435     * -XX:+UseParallelGC
436     * </pre>
437     * 
438     * The concurrent garbage collector works well too. To use that pass these
439     * options to java on startup:
440     * 
441     * <pre>
442     * -XX:+UseConcMarkSweepGC -XX:+UseParNewGC
443     * </pre>
444     * 
445     * </li>
446     * <li>If you are not re-using objects across Ferry calls, ensure your
447     * objects are short-lived; null out references when done.</li>
448     * <li>Potentially try caching objects and reusing large objects across
449     * multiple calls -- this may give those objects time to move into the
450     * Tenured generation and reduce the copying overhead.</li>
451     * <li>Explicitly manage Ferry memory yourself by calling
452     * <code>delete()</code> on every {@link RefCounted} object when done with
453     * your objects to let Java know it doesn't need to copy the item across a
454     * collection. You can also use <code>copyReference()</code> to get a new
455     * Java version of the same Ferry object that you can pass to another thread
456     * if you don't know when <code>delete()</code> can be safely called.</li>
457     * 
458     * <li>Try the
459     * {@link MemoryModel#JAVA_DIRECT_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION}
460     * model.</li>
461     * 
462     * </ul>
463     */
464    JAVA_DIRECT_BUFFERS(1),
465
466    /**
467     * Large memory blocks are allocated as Direct {@link ByteBuffer} objects
468     * (as returned from {@link ByteBuffer#allocateDirect(int)}), but the Java
469     * standard-heap is <i>informed</i> of the allocation by also attempting to
470     * quickly allocate (and release) a buffer of the same size on the standard
471     * heap..
472     * <p>
473     * This model can work well if your application is mostly single-threaded,
474     * and your Ferry application is doing most of the memory allocation in your
475     * program. The trick of <i>informing</i> Java will put pressure on the JVM
476     * to collect appropriately, but by not keeping the references we avoid
477     * unnecessary copying for objects that survive collections.
478     * </p>
479     * <p>
480     * This heuristic is not failsafe though, and can still lead to collections
481     * not occurring at the right time for some applications.
482     * </p>
483     * <p>
484     * It is similar to the
485     * {@link #NATIVE_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION} model and in
486     * general we recommend that model over this one.
487     * </p>
488     * <h2>Speed</h2>
489     * <p>
490     * This model trades off some robustness for some speed. In tests it is
491     * generally 10-20% faster than the {@link #JAVA_STANDARD_HEAP} model.
492     * </p>
493     * <p>
494     * It is worth testing as a way of avoiding the explicit memory management
495     * needed to effectively use the {@link #JAVA_DIRECT_BUFFERS} model.
496     * However, the heuristic used is not fool-proof, and therefore may
497     * sometimes lead to unnecessary collection or {@link OutOfMemoryError}
498     * because Java didn't collect unused references in the standard heap in
499     * time (and hence did not release underlying native references).
500     * </p>
501     * <h2>Robustness</h2>
502     * <ol>
503     * 
504     * <li><strong>Allocation</strong>: Good. Java controls allocations of
505     * direct memory from a separate heap (yet another one), and has an
506     * additional tuning option to set that. By default on most JVMs, this heap
507     * size is set to 64mb which is very low for video processing (queue up 100
508     * images and see what we mean). With this option though we <i>inform</i>
509     * Java of the allocation in the Direct heap, and this will often encourage
510     * Java to collect memory on a more timely basis.</li>
511     * 
512     * <li><strong>Collection</strong>: Good. Released either when
513     * <code>delete()</code> is called, or when the item is marked for
514     * collection. Collections happen more frequently than under the
515     * {@link #JAVA_DIRECT_BUFFERS} model due to <i>informing</i> the standard
516     * heap at allocation time.</li>
517     * 
518     * <li><strong>Low Memory</strong>: Good. Especially for mostly
519     * single-threaded applications, the collection pressure introduced on
520     * allocation will lead to more timely collections to avoid
521     * {@link OutOfMemoryError} errors on the Direct heap.</li>
522     * </ol>
523     * 
524     * <h2>Tuning Tips</h2>
525     * <p>
526     * When using this model, these tips may increase performance, although in
527     * some situations, may instead decrease performance. Always measure.
528     * </p>
529     * <ul>
530     * <li>Increase the size of Sun's Java's direct buffer heap. Sun's Java
531     * implementation has an artificially low default separate heap for direct
532     * buffers (64mb). To make it higher pass this option to Java at startup:
533     * 
534     * <pre>
535     * -XX:MaxDirectMemorySize=&lt;size&gt;
536     * </pre>
537     * 
538     * </li>
539     * <li>Paradoxically, try decreasing the size of your Java Heap if you get
540     * {@link OutOfMemoryError} exceptions. Objects that are allocated in native
541     * memory have a small proxy object representing them in the Java Heap. By
542     * decreasing your heap size, those proxy objects will exert more collection
543     * pressure, and hopefully cause Java to do incremental collections more
544     * often (and notice your unused objects). To set the maximum size of your
545     * java heap, pass this option to java on startup:
546     * 
547     * <pre>
548     * -Xmx&lt;size&gt;
549     * </pre>
550     * 
551     * To change the minimum size of your java heap, pass this option to java on
552     * startup:
553     * 
554     * <pre>
555     * -Xms&lt;size&gt;
556     * </pre>
557     * 
558     * </li>
559     * <li>
560     * <li>Try different garbage collectors in Java. To try the parallel
561     * incremental collector, start your Java process with: these options:
562     * 
563     * <pre>
564     * -XX:+UseParallelGC
565     * </pre>
566     * 
567     * The concurrent garbage collector works well too. To use that pass these
568     * options to java on startup:
569     * 
570     * <pre>
571     * -XX:+UseConcMarkSweepGC -XX:+UseParNewGC
572     * </pre>
573     * 
574     * </li>
575     * <li>If you are not re-using objects across Ferry calls, ensure your
576     * objects are short-lived; null out references when done.</li>
577     * <li>Potentially try caching objects and reusing large objects across
578     * multiple calls -- this may give those objects time to move into the
579     * Tenured generation and reduce the copying overhead.</li>
580     * <li>Explicitly manage Ferry memory yourself by calling
581     * <code>delete()</code> on every {@link RefCounted} object when done with
582     * your objects to let Java know it doesn't need to copy the item across a
583     * collection. You can also use <code>copyReference()</code> to get a new
584     * Java version of the same Ferry object that you can pass to another thread
585     * if you don't know when <code>delete()</code> can be safely called.</li>
586     * 
587     * <li>Try the {@link #JAVA_STANDARD_HEAP} model.</li>
588     * 
589     * </ul>
590     */
591    JAVA_DIRECT_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION(2),
592
593    /**
594     * Large memory blocks are allocated in native memory, completely bypassing
595     * the Java heap.
596     * <p>
597     * It is <strong>much</strong> faster than the {@link #JAVA_STANDARD_HEAP},
598     * but much less robust.
599     * </p>
600     * <h2>Speed</h2>
601     * <p>
602     * This is the fastest model available. In tests it is generally 30-40%
603     * faster than the {@link #JAVA_STANDARD_HEAP} model.
604     * </p>
605     * 
606     * </p>
607     * <p>
608     * It is using the native operating system to allocate direct memory, which
609     * is slightly faster than using {@link #JAVA_DIRECT_BUFFERS}, and much
610     * faster than using the {@link #JAVA_STANDARD_HEAP} model.
611     * </p>
612     * <p>
613     * The downside is that for high-performance applications, you may need to
614     * explicitly manage {@link RefCounted} object life-cycles with
615     * {@link RefCounted#delete()} to ensure native memory is released in a
616     * timely manner.
617     * </p>
618     * <h2>Robustness</h2>
619     * <ol>
620     * 
621     * <li><strong>Allocation</strong>: Weak. Allocations using
622     * <code>make</code> and releasing objects with {@link RefCounted#delete()}
623     * works like normal, but because Java has no idea of how much space is
624     * actually allocated in native memory, it may not collect
625     * {@link RefCounted} objects as quickly as you need it to (it will
626     * eventually collect and free all references though).</li>
627     * 
628     * <li><strong>Collection</strong>: Released either when
629     * <code>delete()</code> is called, or when the item is marked for
630     * collection</li>
631     * 
632     * <li><strong>Low Memory</strong>: Weak. In this model Java has no idea how
633     * much native memory is allocated, and therefore does not use that
634     * knowledge in its determination of when to collect. This can lead to
635     * {@link RefCounted} objects you created surviving longer than you want to,
636     * and therefore not releasing native memory in a timely fashion.</li>
637     * </ol>
638     * 
639     * <h2>Tuning Tips</h2>
640     * <p>
641     * When using this model, these tips may increase performance, although in
642     * some situations, may instead decrease performance. Always measure.
643     * </p>
644     * <ul>
645     * <li>Paradoxically, try decreasing the size of your Java Heap if you get
646     * {@link OutOfMemoryError} exceptions. Objects that are allocated in native
647     * memory have a small proxy object representing them in the Java Heap. By
648     * decreasing your heap size, those proxy objects will exert more collection
649     * pressure, and hopefully cause Java to do incremental collections more
650     * often (and notice your unused objects). To set the maximum size of your
651     * java heap, pass this option to java on startup:
652     * 
653     * <pre>
654     * -Xmx&lt;size&gt;
655     * </pre>
656     * 
657     * To change the minimum size of your java heap, pass this option to java on
658     * startup:
659     * 
660     * <pre>
661     * -Xms&lt;size&gt;
662     * </pre>
663     * 
664     * </li>
665     * <li>
666     * <li>Try different garbage collectors in Java. To try the parallel
667     * incremental collector, start your Java process with: these options:
668     * 
669     * <pre>
670     * -XX:+UseParallelGC
671     * </pre>
672     * 
673     * The concurrent garbage collector works well too. To use that pass these
674     * options to java on startup:
675     * 
676     * <pre>
677     * -XX:+UseConcMarkSweepGC -XX:+UseParNewGC
678     * </pre>
679     * 
680     * </li>
681     * <li>Use the {@link JNIMemoryManager#startCollectionThread()} method to
682     * start up a thread dedicated to releasing objects as soon as they are
683     * enqued in a {@link ReferenceQueue}, rather than (the default) waiting for
684     * the next Ferry allocation or {@link JNIMemoryManager#collect()} explicit
685     * call. Or periodically call {@link JNIMemoryManager#collect()} yourself.</li>
686     * <li>Cache long lived objects and reuse them across calls to avoid
687     * allocations.</li>
688     * <li>Explicitly manage Ferry memory yourself by calling
689     * <code>delete()</code> on every {@link RefCounted} object when done with
690     * your objects to let Java know it doesn't need to copy the item across a
691     * collection. You can also use <code>copyReference()</code> to get a new
692     * Java version of the same Ferry object that you can pass to another thread
693     * if you don't know when <code>delete()</code> can be safely called.</li>
694     * 
695     * <li>Try the
696     * {@link MemoryModel#NATIVE_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION} model.
697     * </li>
698     * 
699     * </ul>
700     */
701    NATIVE_BUFFERS(3),
702    /**
703     * Large memory blocks are allocated in native memory, completely bypassing
704     * the Java heap, but Java is <i>informed</i> of the allocation by briefly
705     * creating (and immediately releasing) a Java standard heap byte[] array of
706     * the same size.
707     * <p>
708     * It is faster than the {@link #JAVA_STANDARD_HEAP}, but less robust.
709     * </p>
710     * <p>
711     * This model can work well if your application is mostly single-threaded,
712     * and your Ferry application is doing most of the memory allocation in your
713     * program. The trick of informing Java will put pressure on the JVM to
714     * collect appropriately, but by not keeping the references to the byte[]
715     * array we temporarily allocate, we avoid unnecessary copying for objects
716     * that survive collections.
717     * </p>
718     * <p>
719     * This heuristic is not failsafe though, and can still lead to collections
720     * not occurring at the right time for some applications.
721     * </p>
722     * <p>
723     * It is similar to the
724     * {@link #JAVA_DIRECT_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION} model.
725     * </p>
726     * <h2>Speed</h2>
727     * <p>
728     * In tests this model is generally 25-30% faster than the
729     * {@link #JAVA_STANDARD_HEAP} model.
730     * </p>
731     * </p>
732     * <p>
733     * It is using the native operating system to allocate direct memory, which
734     * is slightly faster than using
735     * {@link #JAVA_DIRECT_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION}, and much
736     * faster than using the {@link #JAVA_STANDARD_HEAP} model.
737     * </p>
738     * <p>
739     * It is worth testing as a way of avoiding the explicit memory management
740     * needed to effectively use the {@link #NATIVE_BUFFERS} model. However, the
741     * heuristic used is not fool-proof, and therefore may sometimes lead to
742     * unnecessary collection or {@link OutOfMemoryError} because Java didn't
743     * collect unused references in the standard heap in time (and hence did not
744     * release underlying native references).
745     * </p>
746     * <h2>Robustness</h2>
747     * <ol>
748     * 
749     * <li><strong>Allocation</strong>: Good. With this option we allocate
750     * large, long-lived memory from the native heap, but we <i>inform</i> Java
751     * of the allocation in the Direct heap, and this will often encourage Java
752     * to collect memory on a more timely basis.</li>
753     * 
754     * <li><strong>Collection</strong>: Good. Released either when
755     * <code>delete()</code> is called, or when the item is marked for
756     * collection. Collections happen more frequently than under the
757     * {@link #NATIVE_BUFFERS} model due to <i>informing</i> the standard heap
758     * at allocation time.</li>
759     * 
760     * <li><strong>Low Memory</strong>: Good. Especially for mostly
761     * single-threaded applications, the collection pressure introduced on
762     * allocation will lead to more timely collections to avoid
763     * {@link OutOfMemoryError} errors on the native heap.</li>
764     * </ol>
765     * 
766     * <h2>Tuning Tips</h2>
767     * <p>
768     * When using this model, these tips may increase performance, although in
769     * some situations, may instead decrease performance. Always measure.
770     * </p>
771     * <ul>
772     * <li>Paradoxically, try decreasing the size of your Java Heap if you get
773     * {@link OutOfMemoryError} exceptions. Objects that are allocated in native
774     * memory have a small proxy object representing them in the Java Heap. By
775     * decreasing your heap size, those proxy objects will exert more collection
776     * pressure, and hopefully cause Java to do incremental collections more
777     * often (and notice your unused objects). To set the maximum size of your
778     * java heap, pass this option to java on startup:
779     * 
780     * <pre>
781     * -Xmx&lt;size&gt;
782     * </pre>
783     * 
784     * To change the minimum size of your java heap, pass this option to java on
785     * startup:
786     * 
787     * <pre>
788     * -Xms&lt;size&gt;
789     * </pre>
790     * 
791     * </li>
792     * <li>
793     * <li>Try different garbage collectors in Java. To try the parallel
794     * incremental collector, start your Java process with: these options:
795     * 
796     * <pre>
797     * -XX:+UseParallelGC
798     * </pre>
799     * 
800     * The concurrent garbage collector works well too. To use that pass these
801     * options to java on startup:
802     * 
803     * <pre>
804     * -XX:+UseConcMarkSweepGC -XX:+UseParNewGC
805     * </pre>
806     * 
807     * </li>
808     * <li>Use the {@link JNIMemoryManager#startCollectionThread()} method to
809     * start up a thread dedicated to releasing objects as soon as they are
810     * enqued in a {@link ReferenceQueue}, rather than (the default) waiting for
811     * the next Ferry allocation or {@link JNIMemoryManager#collect()} explicit
812     * call. Or periodically call {@link JNIMemoryManager#collect()} yourself.</li>
813     * <li>Cache long lived objects and reuse them across calls to avoid
814     * allocations.</li>
815     * <li>Explicitly manage Ferry memory yourself by calling
816     * <code>delete()</code> on every {@link RefCounted} object when done with
817     * your objects to let Java know it doesn't need to copy the item across a
818     * collection. You can also use <code>copyReference()</code> to get a new
819     * Java version of the same Ferry object that you can pass to another thread
820     * if you don't know when <code>delete()</code> can be safely called.</li>
821     * 
822     * <li>Try the
823     * {@link MemoryModel#NATIVE_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION} model.
824     * </li>
825     * 
826     * </ul>
827     */
828    NATIVE_BUFFERS_WITH_STANDARD_HEAP_NOTIFICATION(4);
829
830    /**
831     * The integer native mode that the JNIMemoryManager.cpp file expects
832     */
833    private final int mNativeValue;
834
835    /**
836     * Create a {@link MemoryModel}.
837     * 
838     * @param nativeValue What we actually use in native code.
839     */
840    private MemoryModel(int nativeValue)
841    {
842      mNativeValue = nativeValue;
843    }
844
845    /**
846     * Get the native value to pass to native code
847     * 
848     * @return a value.
849     */
850    public int getNativeValue()
851    {
852      return mNativeValue;
853    }
854
855  }
856
857  /**
858   * Our singleton (classloader while) manager.
859   */
860  private static final JNIMemoryManager mMgr = new JNIMemoryManager();
861  final private Logger log = LoggerFactory.getLogger(this.getClass());
862
863  private static MemoryModel mMemoryModel;
864  /*
865   * This is executed in hot code, so instead we cache the value
866   * and assume it's only set from Java code.
867   */
868  static {
869    int model = 0;
870    mMemoryModel = MemoryModel.JAVA_STANDARD_HEAP;
871    model = FerryJNI.getMemoryModel();
872    for (MemoryModel candidate : MemoryModel.values())
873      if (candidate.getNativeValue() == model)
874        mMemoryModel = candidate;
875  }
876
877  /**
878   * Get the global {@link JNIMemoryManager} being used.
879   * 
880   * @return the manager
881   */
882  static public JNIMemoryManager getMgr()
883  {
884    return mMgr;
885  }
886
887  /**
888   * A convenience way to call {@link #getMgr()}.{@link #gc()}.
889   * <p>
890   * Really somewhat mis-named, as this will cause us to free any
891   * native memory allocated by ferry, but won't cause us to walk
892   * our own internal heap -- that's only done on allocation.
893   * </p>
894   */
895  static public void collect()
896  {
897    getMgr().gc();
898  }
899
900  /**
901   * The reference queue that {@link RefCounted} objects will eventually find
902   * their way to.
903   */
904  private final ReferenceQueue<Object> mRefQueue;
905
906  /**
907   * Used for managing our collect-and-sweep JNIReference heap.
908   */
909  private final AtomicBoolean mSpinLock;
910  private final Lock mLock;
911  private JNIReference mValidReferences[];
912  private volatile int mNextAvailableReferenceSlot;
913  private volatile int mMaxValidReference;
914  private int mMinimumReferencesToCache;
915  private double mExpandIncrement;
916  private double mShrinkScaleFactor;
917  private double mMaxFreeRatio;
918  private double mMinFreeRatio;
919  
920  /**
921   * The constructor is package level so others can't create it.
922   */
923  JNIMemoryManager()
924  {
925    mRefQueue = new ReferenceQueue<Object>();
926    mCollectionThread = null;
927    mLock = new ReentrantLock();
928    mSpinLock = new AtomicBoolean(false);
929    final int minReferences=1024*4;
930    mMinimumReferencesToCache = minReferences;
931    mExpandIncrement = 0.20; // expand by 20% at a time
932    mShrinkScaleFactor = 0.25; // shrink by 25% of mExpandIncrement
933    mValidReferences = new JNIReference[minReferences]; 
934    mMaxValidReference = minReferences;
935    mNextAvailableReferenceSlot = 0;
936    mMaxFreeRatio = 0.70;
937    mMinFreeRatio = 0.30;
938  }
939  
940  private void blockingLock()
941  {
942    mLock.lock();
943    while(!mSpinLock.compareAndSet(false, true))
944      ; // grab the spin lock
945  }
946  private void blockingUnlock()
947  {
948    final boolean result = mSpinLock.compareAndSet(true, false);
949    assert result : "Should never ever be unlocked here";
950    mLock.unlock();
951  }
952
953  /**
954   * Sets the minimum number of references to cache.
955   * <p>
956   * The {@link JNIMemoryManager} needs to cache weak references
957   * to allocated Ferry object.  This setting controls the minimum
958   * size of that cache.
959   * </p>
960   * 
961   * @param minimumReferencesToCache Minimum number of references to cache.
962   * @throws IllegalArgumentException if <= 0
963   */
964  public void setMinimumReferencesToCache(int minimumReferencesToCache)
965  {
966    if (minimumReferencesToCache <= 0)
967      throw new IllegalArgumentException("Must pass in a positive integer");
968    mMinimumReferencesToCache = minimumReferencesToCache;
969  }
970
971  /**
972   * Get the minimum number of references to cache.
973   * @return The minimum number of references to cache.
974   * @see #setMinimumReferencesToCache(int)
975   */
976  public int getMinimumReferencesToCache()
977  {
978    return mMinimumReferencesToCache;
979  }
980
981  /**
982   * Get the percentage value we will increment the reference cache by
983   * if we need to expand it.
984   * @param expandIncrement A percentage we will increment the reference cache
985   *  by if we need to expand it.
986   * @throws IllegalArgumentException if <= 0
987   */
988  public void setExpandIncrement(double expandIncrement)
989  {
990    if (expandIncrement <= 0)
991      throw new IllegalArgumentException("Must pass in positive percentage");
992    mExpandIncrement = expandIncrement/100;
993  }
994
995  /**
996   * Get the percentage value we will increment the reference cache by
997   * if we need to expand it.
998   * @return the percentage value.
999   * @see #setExpandIncrement(double)
1000   */
1001  public double getExpandIncrement()
1002  {
1003    return mExpandIncrement*100;
1004  }
1005
1006  /**
1007   * Set the percentage value we will shrink the reference cache by when
1008   * we determine shrinking is possible.
1009   * <p>
1010   * If we decide to shrink, the amount we shrink the cache by is
1011   * {@link #getExpandIncrement()}*{@link #getShrinkFactor()}.
1012   * </p>
1013   * 
1014   * @param shrinkFactor The shrink percentage.
1015   * @see #setExpandIncrement(double)
1016   * @throws IllegalArgumentException if shrinkFactor <=0 or >= 100.
1017   */
1018  public void setShrinkFactor(double shrinkFactor)
1019  {
1020    if (shrinkFactor <= 0 || shrinkFactor >= 100)
1021      throw new IllegalArgumentException("only 0 < shrinkFactor < 100 allowed");
1022    mShrinkScaleFactor = shrinkFactor/100;
1023  }
1024
1025  /**
1026   * Get the shrink factor.
1027   * @return the shrink factor.
1028   * @see #setShrinkFactor(double)
1029   */
1030  public double getShrinkFactor()
1031  {
1032    return mShrinkScaleFactor*100;
1033  }
1034
1035
1036  /**
1037   * Sets the maximum ratio of free space we'll allow without
1038   * trying to shrink the memory manager heap.
1039   * @param maxFreeRatio The maximum amount (0 < maxFreeRatio < 100) of
1040   *   free space.
1041   */
1042  public void setMaxFreeRatio(double maxFreeRatio)
1043  {
1044    mMaxFreeRatio = maxFreeRatio/100;
1045  }
1046
1047  /**
1048   * Get the maximum ratio of free space we'll allow in a memory manager heap
1049   * before trying to shrink on the next collection. 
1050   * @return the ratio of free space
1051   * @see #setMaxFreeRatio(double)
1052   */
1053  public double getMaxFreeRatio()
1054  {
1055    return mMaxFreeRatio*100;
1056  }
1057
1058  /**
1059   * Sets the minimum ratio of free space to total memory manager heap
1060   * size we'll allow before expanding the heap.  
1061   * @param minFreeRatio The minimum free ratio.
1062   */
1063  public void setMinFreeRatio(double minFreeRatio)
1064  {
1065    mMinFreeRatio = minFreeRatio/100;
1066  }
1067
1068  /**
1069   * Gets the minimum ratio of free space to total memory manager heap
1070   * size we'll allow before expanding the heap.
1071   * @return The minimum free ratio.
1072   * @see #setMinFreeRatio(double)
1073   */
1074  public double getMinFreeRatio()
1075  {
1076    return mMinFreeRatio*100;
1077  }
1078
1079  private int sweepAndCollect()
1080  {
1081    // time to sweep, collect, and possibly grow.
1082    JNIReference[] survivors = new JNIReference[mMaxValidReference];
1083    int numSurvivors=0;
1084    final int numValid = mMaxValidReference;
1085    for(int i = 0; i < numValid; i++)
1086    {
1087      JNIReference victim = mValidReferences[i];
1088      if (victim != null && !victim.isDeleted())
1089      {
1090        survivors[numSurvivors] = victim;
1091        ++numSurvivors;
1092      }
1093    }
1094    final int survivorLength = survivors.length;
1095    int freeSpace = survivorLength - numSurvivors;
1096    if (freeSpace > survivorLength * mMaxFreeRatio)
1097    {
1098      // time to shrink
1099      int newSize = (int) (survivorLength*(1.0 - mExpandIncrement*mShrinkScaleFactor));
1100      // never shrink smaller than the minimum
1101      if (newSize >= mMinimumReferencesToCache) {
1102        JNIReference[] shrunk = new JNIReference[newSize];
1103        System.arraycopy(survivors, 0, shrunk, 0, newSize);
1104        survivors = shrunk;
1105      }
1106    } else if (freeSpace <= survivorLength*mMinFreeRatio)
1107    {
1108      // time to expand
1109      int newSize = (int) (survivorLength*(1.0 + mExpandIncrement));
1110      JNIReference[] expanded = new JNIReference[newSize];
1111      System.arraycopy(survivors, 0, expanded, 0, survivorLength);
1112      survivors = expanded;
1113    }
1114    // and swap in our new array
1115    // ORDER REALLY MATTERS HERE.  See #addReference
1116    mValidReferences = survivors;
1117    mMaxValidReference = survivors.length;
1118    mNextAvailableReferenceSlot = numSurvivors;
1119    return numSurvivors;
1120  }
1121
1122  /**
1123   * The collection thread if running.
1124   */
1125  private volatile Thread mCollectionThread;
1126
1127  /**
1128   * Get the underlying queue we track references with.
1129   * 
1130   * @return The queue.
1131   */
1132  ReferenceQueue<Object> getQueue()
1133  {
1134    return mRefQueue;
1135  }
1136
1137  /**
1138   * Get the number of Ferry objects we believe are still in use.
1139   * <p>
1140   * This may be different than what you think because the Java garbage
1141   * collector may not have collected all objects yet.
1142   * </p>
1143   * <p>
1144   * Also, this method needs to walk the entire ferry reference heap, so it
1145   * can be expensive and not accurate (as the value may change even before
1146   * this method returns).  Use only for debugging.
1147   * </p>
1148   * @return number of ferry objects in use.
1149   */
1150  public long getNumPinnedObjects()
1151  {
1152    long numPinnedObjects = 0;
1153    blockingLock();
1154    try {
1155      int numItems = mNextAvailableReferenceSlot;
1156      for(int i = 0; i < numItems; i++)
1157      {
1158        JNIReference ref = mValidReferences[i];
1159        if (ref != null && !ref.isDeleted())
1160          ++numPinnedObjects;
1161      }
1162    } finally {
1163      blockingUnlock();
1164    }
1165    return numPinnedObjects;
1166  }
1167
1168  /**
1169   * Dump the contents of our memory cache to the log.
1170   * <p>
1171   * This method requires a global lock in order to run so only
1172   * use for debugging.  
1173   * </p>
1174   */
1175  public void dumpMemoryLog()
1176  {
1177    blockingLock();
1178    try {
1179      int numItems = mNextAvailableReferenceSlot;
1180      log.debug("Memory slots in use: {}", numItems);
1181      for(int i = 0; i < numItems; i++)
1182      {
1183        JNIReference ref = mValidReferences[i];
1184        if (ref != null)
1185          log.debug("Slot: {}; Ref: {}", i, ref);
1186      }
1187    } finally {
1188      blockingUnlock();
1189    }
1190    return;
1191  }
1192  
1193  /**
1194   * Will object allocations contain debug information when allocated?
1195   * @see #setMemoryDebugging(boolean)
1196   */
1197  public boolean isMemoryDebugging()
1198  {
1199    return JNIReference.isMemoryDebugging();
1200  }
1201
1202  /**
1203   * Set whether the {@link JNIMemoryManager} should cause objects
1204   * to be allocated with debugging information.  This is false
1205   * by default as it causes a slight performance hit per-allocation.
1206   * <p>
1207   * If true, then each allocation after setting to true will remember
1208   * the class of each object allocated, and the unique java hash
1209   * code ({@link Object#hashCode()}) of each object allocated.  Then
1210   * in calls to {@link #dumpMemoryLog()}, those classes and hash
1211   * values will also be printed.
1212   * </p>
1213   * @param value true to turn on memory debugging; false to turn it off.
1214   */
1215  public void setMemoryDebugging(boolean value)
1216  {
1217    JNIReference.setMemoryDebugging(value);
1218  }
1219  
1220  /**
1221   * A finalizer for the memory manager itself. It just calls internal garbage
1222   * collections and then exits.
1223   */
1224  public void finalize()
1225  {
1226    /**
1227     * 
1228     * This may end up "leaking" some memory if all Ferry objects have not
1229     * otherwise been collected, but this is not a huge problem for most
1230     * applications, as it's only called when the class loader is exiting.
1231     */
1232    gc();
1233  }
1234
1235  /**
1236   * Add a reference to the set of references we'll collect.
1237   * 
1238   * @param ref The reference to collect.
1239   * @return true if already in list; false otherwise.
1240   */
1241  final boolean addReference(final JNIReference ref)
1242  {
1243    /* Implementation note: This method is extremely
1244     * hot, and so I've unrolled the lock and unlock
1245     * methods from above.  Take care if you change
1246     * them to change the unrolled versions here.
1247     * 
1248     */
1249    // First try to grab the non blocking lock
1250    boolean gotNonblockingLock = false;
1251    gotNonblockingLock = mSpinLock.compareAndSet(false, true);
1252    if (gotNonblockingLock)
1253    {
1254      final int slot = mNextAvailableReferenceSlot++;
1255      if (slot < mMaxValidReference)
1256      {
1257        mValidReferences[slot] = ref;
1258        // unlock the non-blocking lock, and progress to a full lock.
1259        final boolean result = mSpinLock.compareAndSet(true, false);
1260        assert result : "Should never be unlocked here";
1261        return true;
1262      }
1263      // try the big lock without blocking
1264      if (!mLock.tryLock()) {
1265        // we couldn't get the big lock, so release the spin lock
1266        // and try getting the bit lock while blocking
1267        gotNonblockingLock = false;
1268        mSpinLock.compareAndSet(true, false);
1269      }
1270    }
1271    // The above code needs to make sure that we never
1272    // have gotNonblockingLock set, unless we have both
1273    // the spin lock and the big lock.
1274    if (!gotNonblockingLock){
1275      mLock.lock();
1276      while(!mSpinLock.compareAndSet(false, true))
1277        ; // grab the spin lock
1278    }
1279    try {
1280      int slot = mNextAvailableReferenceSlot++;
1281      if (slot >= mMaxValidReference)
1282      {
1283        sweepAndCollect();
1284        slot = mNextAvailableReferenceSlot++;
1285      }
1286      mValidReferences[slot] = ref;
1287    } finally {
1288      final boolean result = mSpinLock.compareAndSet(true, false);
1289      assert result : "Should never ever be unlocked here";
1290      mLock.unlock();
1291    }
1292    return true;
1293  }
1294
1295  /**
1296   * Do a Ferry Garbage Collection.
1297   * <p>
1298   * This takes all Ferry objects that are no longer reachable and deletes the
1299   * underlying native memory. It is called every time you allocate a new Ferry
1300   * object to ensure Ferry is freeing up native objects as soon as possible
1301   * (rather than waiting for the potentially slow finalizer thread). It is also
1302   * called via a finalizer on an object that is referenced by the Ferry'ed
1303   * object (that way, the earlier of the next Ferry allocation, or the
1304   * finalizer thread, frees up unused native memory). Lastly, you can use
1305   * {@link #startCollectionThread()} to start up a thread to call this
1306   * automagically for you (and that thread will exit when your JVM exits).
1307   * </p>
1308   */
1309  public void gc()
1310  {
1311    gc(false);
1312  }
1313
1314  /**
1315   * Does a Ferry Garbage Collection, and also sweeps our internal
1316   * {@link JNIReference} heap to remove any lightweight references we may
1317   * have left around.
1318   * @param doSweep if true, we sweep the heap.  This involves a global lock
1319   *   and so should be used sparingly.
1320   */
1321  public void gc(boolean doSweep)
1322  {
1323    gcInternal();
1324    if (doSweep) {
1325      blockingLock();
1326      try {
1327        sweepAndCollect();
1328      } finally {
1329        blockingUnlock();
1330      }
1331    }
1332  }
1333  /**
1334   * The actual GC; 
1335   */
1336  void gcInternal()
1337  {
1338    JNIReference ref = null;
1339    while ((ref = (JNIReference) mRefQueue.poll()) != null)
1340    {
1341      ref.delete();
1342    }
1343  }
1344
1345  /**
1346   * Starts a new Ferry collection thread that will wake up whenever a memory
1347   * reference needs clean-up from native code.
1348   * <p>
1349   * This thread is not started by default as Ferry calls {@link #gc()}
1350   * internally whenever a new Ferry object is allocated. But if you're
1351   * caching Ferry objects and hence avoiding new allocations, you may want to
1352   * call this to ensure all objects are promptly collected.
1353   * </p>
1354   * <p>
1355   * This call is ignored if the collection thread is already running.
1356   * </p>
1357   * <p>
1358   * The thread can be stopped by calling {@link #stopCollectionThread()}, and
1359   * will also exit if interrupted by Java.
1360   * </p>
1361   */
1362  public void startCollectionThread()
1363  {
1364    synchronized (this)
1365    {
1366      if (mCollectionThread != null)
1367        throw new RuntimeException("Thread already running");
1368
1369      mCollectionThread = new Thread(new Runnable()
1370      {
1371
1372        public void run()
1373        {
1374          JNIReference ref = null;
1375          try
1376          {
1377            while (true)
1378            {
1379              ref = (JNIReference) mRefQueue.remove();
1380              if (ref != null)
1381                ref.delete();
1382            }
1383          }
1384          catch (InterruptedException ex)
1385          {
1386            synchronized (JNIMemoryManager.this)
1387            {
1388              mCollectionThread = null;
1389              // reset the interruption
1390              Thread.currentThread().interrupt();
1391            }
1392            return;
1393          }
1394
1395        }
1396      }, "AVPKit Ferry Collection Thread");
1397      mCollectionThread.setDaemon(true);
1398      mCollectionThread.start();
1399    }
1400  }
1401
1402  /**
1403   * Stops the Ferry collection thread if running. This does nothing if no
1404   * collection thread is running.
1405   */
1406  public void stopCollectionThread()
1407  {
1408    synchronized (this)
1409    {
1410      if (mCollectionThread != null)
1411        mCollectionThread.interrupt();
1412    }
1413  }
1414
1415  /**
1416   * Get the {@link MemoryModel} that Ferry is using for allocating large memory
1417   * blocks.
1418   * 
1419   * @return the memory model currently being used.
1420   * 
1421   * @see MemoryModel
1422   */
1423  public static MemoryModel getMemoryModel()
1424  {
1425    return mMemoryModel;
1426  }
1427
1428  /**
1429   * Sets the {@link MemoryModel}.
1430   * <p>
1431   * Only call once per process; Calling more than once has an unspecified
1432   * effect.
1433   * </p>
1434   * 
1435   * @param model The model you want to use.
1436   * 
1437   * @see #getMemoryModel()
1438   * @see MemoryModel
1439   */
1440  public static void setMemoryModel(MemoryModel model)
1441  {
1442    FerryJNI.setMemoryModel(model.getNativeValue());
1443    mMemoryModel = model;
1444  }
1445
1446  /**
1447   * Internal Only.
1448   * 
1449   * Immediately frees all active objects in the system. Do not call unless you
1450   * REALLY know what you're doing.
1451   */
1452  final public void flush()
1453  {
1454    blockingLock();
1455    try {
1456      int numSurvivors = sweepAndCollect();
1457      for(int i = 0; i < numSurvivors; i++)
1458      {
1459        final JNIReference ref = mValidReferences[i];
1460        if (ref != null)
1461          ref.delete();
1462      }
1463      sweepAndCollect();
1464      // finally, reset the valid references to the minimum
1465      mValidReferences = new JNIReference[mMinimumReferencesToCache];
1466      mNextAvailableReferenceSlot = 0;
1467      mMaxValidReference = mMinimumReferencesToCache;
1468    } finally {
1469      blockingUnlock();
1470    }
1471  }
1472
1473}