001/* ---------------------------------------------------------------------------- 002 * This file was automatically generated by SWIG (http://www.swig.org). 003 * Version 4.0.2 004 * 005 * Do not make changes to this file unless you know what you are doing--modify 006 * the SWIG interface file instead. 007 * ----------------------------------------------------------------------------- */ 008 009package com.avpkit.core; 010import com.avpkit.ferry.*; 011/** 012 * A set of raw (decoded) samples, plus a timestamp for when to play those<br> 013 * samples relative to other items in a given {IContainer}.<br> 014 * <br> 015 * The timestamp value in decoded data is always in Microseonds. 016 */ 017public class IAudioSamples extends IMediaData { 018 // JNIHelper.swg: Start generated code 019 // >>>>>>>>>>>>>>>>>>>>>>>>>>> 020 /** 021 * This method is only here to use some references and remove 022 * a Eclipse compiler warning. 023 */ 024 @SuppressWarnings("unused") 025 private void noop() 026 { 027 IBuffer.make(null, 1); 028 } 029 030 private volatile long swigCPtr; 031 032 /** 033 * Internal Only. 034 */ 035 protected IAudioSamples(long cPtr, boolean cMemoryOwn) { 036 super(AVPKitJNI.IAudioSamples_SWIGUpcast(cPtr), cMemoryOwn); 037 swigCPtr = cPtr; 038 } 039 040 /** 041 * Internal Only. 042 */ 043 protected IAudioSamples(long cPtr, boolean cMemoryOwn, 044 java.util.concurrent.atomic.AtomicLong ref) 045 { 046 super(AVPKitJNI.IAudioSamples_SWIGUpcast(cPtr), 047 cMemoryOwn, ref); 048 swigCPtr = cPtr; 049 } 050 051 /** 052 * Internal Only. Not part of public API. 053 * 054 * Get the raw value of the native object that obj is proxying for. 055 * 056 * @param obj The java proxy object for a native object. 057 * @return The raw pointer obj is proxying for. 058 */ 059 public static long getCPtr(IAudioSamples obj) { 060 if (obj == null) return 0; 061 return obj.getMyCPtr(); 062 } 063 064 /** 065 * Internal Only. Not part of public API. 066 * 067 * Get the raw value of the native object that we're proxying for. 068 * 069 * @return The raw pointer we're proxying for. 070 */ 071 public long getMyCPtr() { 072 if (swigCPtr == 0) throw new IllegalStateException("underlying native object already deleted"); 073 return swigCPtr; 074 } 075 076 /** 077 * Create a new IAudioSamples object that is actually referring to the 078 * exact same underlying native object. 079 * 080 * @return the new Java object. 081 */ 082 @Override 083 public IAudioSamples copyReference() { 084 if (swigCPtr == 0) 085 return null; 086 else 087 return new IAudioSamples(swigCPtr, swigCMemOwn, getJavaRefCount()); 088 } 089 090 /** 091 * Compares two values, returning true if the underlying objects in native code are the same object. 092 * 093 * That means you can have two different Java objects, but when you do a comparison, you'll find out 094 * they are the EXACT same object. 095 * 096 * @return True if the underlying native object is the same. False otherwise. 097 */ 098 public boolean equals(Object obj) { 099 boolean equal = false; 100 if (obj instanceof IAudioSamples) 101 equal = (((IAudioSamples)obj).swigCPtr == this.swigCPtr); 102 return equal; 103 } 104 105 /** 106 * Get a hashable value for this object. 107 * 108 * @return the hashable value. 109 */ 110 public int hashCode() { 111 return (int)swigCPtr; 112 } 113 114 // <<<<<<<<<<<<<<<<<<<<<<<<<<< 115 // JNIHelper.swg: End generated code 116 117 118 /** 119 * info about this packet 120 * @return information about this packet 121 */ 122 123 @Override 124 public String toString() 125 { 126 StringBuilder result = new StringBuilder(); 127 128 result.append(this.getClass().getName()+"@"+hashCode()+"["); 129 result.append("sample rate:"+getSampleRate()+";"); 130 result.append("channels:"+getChannels()+";"); 131 result.append("format:"+getFormat()+";"); 132 result.append("time stamp:"+getTimeStamp()+";"); 133 result.append("complete:"+isComplete()+";"); 134 result.append("num samples:"+getNumSamples()+";"); 135 result.append("size:"+getSize()+";"); 136 result.append("key:"+isKey()+";"); 137 IRational timeBase = IRational.make(1,(int)Global.DEFAULT_PTS_PER_SECOND); 138 result.append("time base:"+timeBase+";"); 139 if (timeBase != null) timeBase.delete(); 140 result.append("]"); 141 return result.toString(); 142 } 143 144 145 /** 146 * Returns whether or not we think this buffer has been filled<br> 147 * with data.<br> 148 * <br> 149 * <br> 150 * @return Has setComplete() been called and the buffer populated. 151 */ 152 public boolean isComplete() { 153 return AVPKitJNI.IAudioSamples_isComplete(swigCPtr, this); 154 } 155 156 /** 157 * Find the sample rate of the samples in this audio buffer.<br> 158 * <br> 159 * <br> 160 * @return The Sampling Rate of the samples in this buffer (e.g. 22050). 161 */ 162 public int getSampleRate() { 163 return AVPKitJNI.IAudioSamples_getSampleRate(swigCPtr, this); 164 } 165 166 /** 167 * Return the number of channels of the samples in this buffer. For example,<br> 168 * 1 is mono, 2 is stereo.<br> 169 * <br> 170 * <br> 171 * @return The number of channels. 172 */ 173 public int getChannels() { 174 return AVPKitJNI.IAudioSamples_getChannels(swigCPtr, this); 175 } 176 177 /** 178 * Find out the bit-depth of the samples in this buffer.<br> 179 * <br> 180 * <br> 181 * @return Number of bits in a raw sample (per channel) 182 */ 183 public int getSampleBitDepth() { 184 return AVPKitJNI.IAudioSamples_getSampleBitDepth(swigCPtr, this); 185 } 186 187 /** 188 * Find the Format of the samples in this buffer. Right now<br> 189 * only FMT_S16 is supported.<br> 190 * <br> 191 * <br> 192 * @return The format of the samples. 193 */ 194 public IAudioSamples.Format getFormat() { 195 return IAudioSamples.Format.swigToEnum(AVPKitJNI.IAudioSamples_getFormat(swigCPtr, this)); 196 } 197 198 /** 199 * Get the number of samples in this video.<br> 200 * <br> 201 * <br> 202 * For example, if you have 100 bytes of stereo (2-channel) 16-bit<br> 203 * audio in this buffer, there are 25 samples. If you have<br> 204 * 100 bytes of mono (1-channel) 16-bit audio in this buffer, you<br> 205 * have 50 samples.<br> 206 * <br> 207 * <br> 208 * @return The number of samples. 209 */ 210 public int getNumSamples() { 211 return AVPKitJNI.IAudioSamples_getNumSamples(swigCPtr, this); 212 } 213 214 /** 215 * @return Maximum number of bytes that can be put in<br> 216 * this buffer. To get the number of samples you can<br> 217 * put in this IAudioSamples instance, do the following<br> 218 * num_samples = getMaxBufferSize() / (getSampleSize()) 219 */ 220 public int getMaxBufferSize() { 221 return AVPKitJNI.IAudioSamples_getMaxBufferSize(swigCPtr, this); 222 } 223 224 /** 225 * @return Maximum number of samples this buffer can hold. 226 */ 227 public int getMaxSamples() { 228 return AVPKitJNI.IAudioSamples_getMaxSamples(swigCPtr, this); 229 } 230 231 /** 232 * @return Number of bytes in a single sample of audio (including channels).<br> 233 * You can also get this by getSampleBitDepth()*getChannels()/8. 234 */ 235 public int getSampleSize() { 236 return AVPKitJNI.IAudioSamples_getSampleSize(swigCPtr, this); 237 } 238 239 /** 240 * What is the Presentation Time Stamp of this set of audio samples.<br> 241 * <br> 242 * @return the presentation time stamp (pts) 243 */ 244 public long getPts() { 245 return AVPKitJNI.IAudioSamples_getPts(swigCPtr, this); 246 } 247 248 /** 249 * Set the Presentation Time Stamp for this set of samples.<br> 250 * <br> 251 * @param aValue the new value 252 */ 253 public void setPts(long aValue) { 254 AVPKitJNI.IAudioSamples_setPts(swigCPtr, this, aValue); 255 } 256 257 /** 258 * What would be the next Presentation Time Stamp after all the<br> 259 * samples in this buffer were played?<br> 260 * <br> 261 * @return the next presentation time stamp (pts) 262 */ 263 public long getNextPts() { 264 return AVPKitJNI.IAudioSamples_getNextPts(swigCPtr, this); 265 } 266 267 /** 268 * Call this if you modify the samples and are now done. This<br> 269 * updates the pertinent information in the structure.<br> 270 * <br> 271 * @param complete Is this set of samples complete?<br> 272 * @param numSamples Number of samples in this update (note that<br> 273 * 4 shorts of 16-bit audio in stereo is actually 1 sample).<br> 274 * @param sampleRate The sample rate (in Hz) of this set of samples.<br> 275 * @param channels The number of channels in this set of samples.<br> 276 * @param format The sample-format of this set of samples.<br> 277 * @param pts The presentation time stamp of the starting sample in this buffer.<br> 278 * Caller must ensure pts is in units of 1/1,000,000 of a second 279 */ 280 public void setComplete(boolean complete, int numSamples, int sampleRate, int channels, IAudioSamples.Format format, long pts) { 281 AVPKitJNI.IAudioSamples_setComplete__SWIG_0(swigCPtr, this, complete, numSamples, sampleRate, channels, format.swigValue(), pts); 282 } 283 284 /** 285 * Sets the sample at the given index and channel to the sample. In<br> 286 * theory we assume input is the given Format, and will convert<br> 287 * if needed, but right now we only support FMT_S16 anyway.<br> 288 * <br> 289 * <br> 290 * @param sampleIndex The zero-based index into the set of samples<br> 291 * @param channel The zero-based channel number. If this set of samples doesn't<br> 292 * have that given channel, an error is returned.<br> 293 * @param format The format of the given sample<br> 294 * @param sample The actual sample<br> 295 * <br> 296 * @return >= 0 on success; -1 on error. 297 */ 298 public int setSample(int sampleIndex, int channel, IAudioSamples.Format format, int sample) { 299 return AVPKitJNI.IAudioSamples_setSample(swigCPtr, this, sampleIndex, channel, format.swigValue(), sample); 300 } 301 302 /** 303 * Get the sample at the given sampleIndex and channel, and return it in<br> 304 * the asked for format.<br> 305 * <br> 306 * @param sampleIndex The zero-based index into this set of samples.<br> 307 * @param channel The zero-based channel to get the sample from<br> 308 * @param format The format to return in<br> 309 * <br> 310 * @return The sample if available. If that sample is not available<br> 311 * (e.g. because the channel doesn't exist, or the samples have not<br> 312 * been #setComplete(bool, int32_t, int32_t, int32_t, Format, int64_t)),<br> 313 * then this method returns 0. It is up to the caller to ensure<br> 314 * the inputs are valid given that 0 is also a valid sample value. 315 */ 316 public int getSample(int sampleIndex, int channel, IAudioSamples.Format format) { 317 return AVPKitJNI.IAudioSamples_getSample(swigCPtr, this, sampleIndex, channel, format.swigValue()); 318 } 319 320 /** 321 * A convenience method that returns the # of bits in a given<br> 322 * format. Be aware that right now this library only supports<br> 323 * 16-bit audio.<br> 324 * <br> 325 * @param format The format you want to find the number of bits in.<br> 326 * <br> 327 * <br> 328 * @return The number of bits (not bytes) in the passed in format. 329 */ 330 public static int findSampleBitDepth(IAudioSamples.Format format) { 331 return AVPKitJNI.IAudioSamples_findSampleBitDepth(format.swigValue()); 332 } 333 334 /** 335 * Get a new audio samples buffer.<br> 336 * <p><br> 337 * Note that any buffers this objects needs will be<br> 338 * lazily allocated (i.e. we won't actually grab all<br> 339 * the memory until we need it).<br> 340 * </p><br> 341 * @param numSamples The minimum number of samples you're<br> 342 * going to want to put in this buffer. We may (and probably<br> 343 * will) return a larger buffer, but you cannot assume that.<br> 344 * @param numChannels The number of channels in the audio you'll<br> 345 * want to put in this buffer.<br> 346 * @return A new object, or null if we can't allocate one. 347 */ 348 public static IAudioSamples make(int numSamples, int numChannels) { 349 long cPtr = AVPKitJNI.IAudioSamples_make__SWIG_0(numSamples, numChannels); 350 return (cPtr == 0) ? null : new IAudioSamples(cPtr, false); 351 } 352 353 /** 354 * Converts a number of samples at a given sampleRate into <br> 355 * Microseconds.<br> 356 * @param samples Number of samples.<br> 357 * @param sampleRate sample rate that those samples are recorded at.<br> 358 * @return number of microseconds it would take to play that audio. 359 */ 360 public static long samplesToDefaultPts(long samples, int sampleRate) { 361 return AVPKitJNI.IAudioSamples_samplesToDefaultPts(samples, sampleRate); 362 } 363 364 /** 365 * Converts a duration in microseconds into<br> 366 * a number of samples, assuming a given sampleRate.<br> 367 * @param duration The duration in microseconds.<br> 368 * @param sampleRate sample rate that you want to use.<br> 369 * @return The number of samples it would take (at the given sampleRate) to take duration microseconds to play. 370 */ 371 public static long defaultPtsToSamples(long duration, int sampleRate) { 372 return AVPKitJNI.IAudioSamples_defaultPtsToSamples(duration, sampleRate); 373 } 374 375 public IAudioSamples.ChannelLayout getChannelLayout() { 376 return IAudioSamples.ChannelLayout.swigToEnum(AVPKitJNI.IAudioSamples_getChannelLayout(swigCPtr, this)); 377 } 378 379 public void setComplete(boolean complete, int numSamples, int sampleRate, int channels, IAudioSamples.ChannelLayout channelLayout, IAudioSamples.Format format, long pts) { 380 AVPKitJNI.IAudioSamples_setComplete__SWIG_1(swigCPtr, this, complete, numSamples, sampleRate, channels, channelLayout.swigValue(), format.swigValue(), pts); 381 } 382 383 /** 384 * Creates an {IAudioSamples} object by wrapping an<br> 385 * {com.avpkit.ferry.IBuffer object}.<br> 386 * <p><br> 387 * If you are decoding into this buffer, the buffer must be at least<br> 388 * 192k*channels large (an FFmpeg requirement) or the decodeAudio<br> 389 * call on {IStreamCoder} will fail with an error.<br> 390 * If you are encoding from, any size should do.<br> 391 * </p><br> 392 * @param buffer the buffer to wrap<br> 393 * @param channels the number of channels of audio you will put it the buffer<br> 394 * @param format the audio sample format<br> 395 * <br> 396 * @return a new {IAudioSamples} object, or null on error. 397 */ 398 public static IAudioSamples make(IBuffer buffer, int channels, IAudioSamples.Format format) { 399 long cPtr = AVPKitJNI.IAudioSamples_make__SWIG_1(IBuffer.getCPtr(buffer), buffer, channels, format.swigValue()); 400 return (cPtr == 0) ? null : new IAudioSamples(cPtr, false); 401 } 402 403 /** 404 * Get a new audio samples buffer.<br> 405 * <p><br> 406 * Note that any buffers this objects needs will be<br> 407 * lazily allocated (i.e. we won't actually grab all<br> 408 * the memory until we need it).<br> 409 * </p><br> 410 * @param numSamples The minimum number of samples you're<br> 411 * going to want to put in this buffer. We may (and probably<br> 412 * will) return a larger buffer, but you cannot assume that.<br> 413 * @param numChannels The number of channels in the audio you'll<br> 414 * want to put in this buffer.<br> 415 * @param format The format of this buffer<br> 416 * @return A new object, or null if we can't allocate one. 417 */ 418 public static IAudioSamples make(int numSamples, int numChannels, IAudioSamples.Format format) { 419 long cPtr = AVPKitJNI.IAudioSamples_make__SWIG_2(numSamples, numChannels, format.swigValue()); 420 return (cPtr == 0) ? null : new IAudioSamples(cPtr, false); 421 } 422 423 /** 424 * The format we use to represent audio. Today<br> 425 * only FMT_S16 (signed integer 16-bit audio) is supported. 426 */ 427 public enum Format { 428 FMT_NONE(AVPKitJNI.IAudioSamples_FMT_NONE_get()), 429 /** 430 * unsigned 8 bits 431 */ 432 FMT_U8, 433 /** 434 * signed 16 bits 435 */ 436 FMT_S16, 437 /** 438 * signed 32 bits 439 */ 440 FMT_S32, 441 /** 442 * float 443 */ 444 FMT_FLT, 445 /** 446 * double 447 */ 448 FMT_DBL, 449 /** 450 * unsigned 8 bits, planar 451 */ 452 FMT_U8P, 453 /** 454 * signed 16 bits, planar 455 */ 456 FMT_S16P, 457 /** 458 * signed 32 bits, planar 459 */ 460 FMT_S32P, 461 /** 462 * float, planar 463 */ 464 FMT_FLTP, 465 /** 466 * double, planar 467 */ 468 FMT_DBLP; 469 470 public final int swigValue() { 471 return swigValue; 472 } 473 474 public static Format swigToEnum(int swigValue) { 475 Format[] swigValues = Format.class.getEnumConstants(); 476 if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue) 477 return swigValues[swigValue]; 478 for (Format swigEnum : swigValues) 479 if (swigEnum.swigValue == swigValue) 480 return swigEnum; 481 throw new IllegalArgumentException("No enum " + Format.class + " with value " + swigValue); 482 } 483 484 @SuppressWarnings("unused") 485 private Format() { 486 this.swigValue = SwigNext.next++; 487 } 488 489 @SuppressWarnings("unused") 490 private Format(int swigValue) { 491 this.swigValue = swigValue; 492 SwigNext.next = swigValue+1; 493 } 494 495 @SuppressWarnings("unused") 496 private Format(Format swigEnum) { 497 this.swigValue = swigEnum.swigValue; 498 SwigNext.next = this.swigValue+1; 499 } 500 501 private final int swigValue; 502 503 private static class SwigNext { 504 private static int next = 0; 505 } 506 } 507 508 public enum ChannelLayout { 509 CH_NONE(AVPKitJNI.IAudioSamples_ChannelLayout_CH_NONE_get()), 510 CH_FRONT_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_FRONT_LEFT_get()), 511 CH_FRONT_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_FRONT_RIGHT_get()), 512 CH_FRONT_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_FRONT_CENTER_get()), 513 CH_LOW_FREQUENCY(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LOW_FREQUENCY_get()), 514 CH_BACK_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_BACK_LEFT_get()), 515 CH_BACK_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_BACK_RIGHT_get()), 516 CH_FRONT_LEFT_OF_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_FRONT_LEFT_OF_CENTER_get()), 517 CH_FRONT_RIGHT_OF_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_FRONT_RIGHT_OF_CENTER_get()), 518 CH_BACK_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_BACK_CENTER_get()), 519 CH_SIDE_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_SIDE_LEFT_get()), 520 CH_SIDE_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_SIDE_RIGHT_get()), 521 CH_TOP_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_CENTER_get()), 522 CH_TOP_FRONT_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_FRONT_LEFT_get()), 523 CH_TOP_FRONT_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_FRONT_CENTER_get()), 524 CH_TOP_FRONT_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_FRONT_RIGHT_get()), 525 CH_TOP_BACK_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_BACK_LEFT_get()), 526 CH_TOP_BACK_CENTER(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_BACK_CENTER_get()), 527 CH_TOP_BACK_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_TOP_BACK_RIGHT_get()), 528 /** 529 * Stereo downmix. 530 */ 531 CH_STEREO_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_STEREO_LEFT_get()), 532 /** 533 * See CH_STEREO_LEFT. 534 */ 535 CH_STEREO_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_STEREO_RIGHT_get()), 536 CH_WIDE_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_WIDE_LEFT_get()), 537 CH_WIDE_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_WIDE_RIGHT_get()), 538 CH_SURROUND_DIRECT_LEFT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_SURROUND_DIRECT_LEFT_get()), 539 CH_SURROUND_DIRECT_RIGHT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_SURROUND_DIRECT_RIGHT_get()), 540 CH_LOW_FREQUENCY_2(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LOW_FREQUENCY_2_get()), 541 /** 542 * Channel mask value used for AVCodecContext.request_channel_layout<br> 543 * to indicate that the user requests the channel order of the decoder output<br> 544 * to be the native codec channel order. 545 */ 546 CH_LAYOUT_NATIVE(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_NATIVE_get()), 547 /** 548 * 549 */ 550 CH_LAYOUT_MONO(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_MONO_get()), 551 CH_LAYOUT_STEREO(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_STEREO_get()), 552 CH_LAYOUT_2POINT1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_2POINT1_get()), 553 CH_LAYOUT_2_1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_2_1_get()), 554 CH_LAYOUT_SURROUND(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_SURROUND_get()), 555 CH_LAYOUT_3POINT1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_3POINT1_get()), 556 CH_LAYOUT_4POINT0(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_4POINT0_get()), 557 CH_LAYOUT_4POINT1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_4POINT1_get()), 558 CH_LAYOUT_2_2(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_2_2_get()), 559 CH_LAYOUT_QUAD(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_QUAD_get()), 560 CH_LAYOUT_5POINT0(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_5POINT0_get()), 561 CH_LAYOUT_5POINT1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_5POINT1_get()), 562 CH_LAYOUT_5POINT0_BACK(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_5POINT0_BACK_get()), 563 CH_LAYOUT_5POINT1_BACK(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_5POINT1_BACK_get()), 564 CH_LAYOUT_6POINT0(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_6POINT0_get()), 565 CH_LAYOUT_6POINT0_FRONT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_6POINT0_FRONT_get()), 566 CH_LAYOUT_HEXAGONAL(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_HEXAGONAL_get()), 567 CH_LAYOUT_6POINT1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_6POINT1_get()), 568 CH_LAYOUT_6POINT1_BACK(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_6POINT1_BACK_get()), 569 CH_LAYOUT_6POINT1_FRONT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_6POINT1_FRONT_get()), 570 CH_LAYOUT_7POINT0(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_7POINT0_get()), 571 CH_LAYOUT_7POINT0_FRONT(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_7POINT0_FRONT_get()), 572 CH_LAYOUT_7POINT1(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_7POINT1_get()), 573 CH_LAYOUT_7POINT1_WIDE(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_7POINT1_WIDE_get()), 574 CH_LAYOUT_7POINT1_WIDE_BACK(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_7POINT1_WIDE_BACK_get()), 575 CH_LAYOUT_OCTAGONAL(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_OCTAGONAL_get()), 576 CH_LAYOUT_HEXADECAGONAL(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_HEXADECAGONAL_get()), 577 CH_LAYOUT_STEREO_DOWNMIX(AVPKitJNI.IAudioSamples_ChannelLayout_CH_LAYOUT_STEREO_DOWNMIX_get()); 578 579 public final int swigValue() { 580 return swigValue; 581 } 582 583 public static ChannelLayout swigToEnum(int swigValue) { 584 ChannelLayout[] swigValues = ChannelLayout.class.getEnumConstants(); 585 if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue) 586 return swigValues[swigValue]; 587 for (ChannelLayout swigEnum : swigValues) 588 if (swigEnum.swigValue == swigValue) 589 return swigEnum; 590 throw new IllegalArgumentException("No enum " + ChannelLayout.class + " with value " + swigValue); 591 } 592 593 @SuppressWarnings("unused") 594 private ChannelLayout() { 595 this.swigValue = SwigNext.next++; 596 } 597 598 @SuppressWarnings("unused") 599 private ChannelLayout(int swigValue) { 600 this.swigValue = swigValue; 601 SwigNext.next = swigValue+1; 602 } 603 604 @SuppressWarnings("unused") 605 private ChannelLayout(ChannelLayout swigEnum) { 606 this.swigValue = swigEnum.swigValue; 607 SwigNext.next = this.swigValue+1; 608 } 609 610 private final int swigValue; 611 612 private static class SwigNext { 613 private static int next = 0; 614 } 615 } 616 617}