FFmpeg  2.6.9
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
wmaprodec.c
Go to the documentation of this file.
1 /*
2  * Wmapro compatible decoder
3  * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
4  * Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * @brief wmapro decoder implementation
26  * Wmapro is an MDCT based codec comparable to wma standard or AAC.
27  * The decoding therefore consists of the following steps:
28  * - bitstream decoding
29  * - reconstruction of per-channel data
30  * - rescaling and inverse quantization
31  * - IMDCT
32  * - windowing and overlapp-add
33  *
34  * The compressed wmapro bitstream is split into individual packets.
35  * Every such packet contains one or more wma frames.
36  * The compressed frames may have a variable length and frames may
37  * cross packet boundaries.
38  * Common to all wmapro frames is the number of samples that are stored in
39  * a frame.
40  * The number of samples and a few other decode flags are stored
41  * as extradata that has to be passed to the decoder.
42  *
43  * The wmapro frames themselves are again split into a variable number of
44  * subframes. Every subframe contains the data for 2^N time domain samples
45  * where N varies between 7 and 12.
46  *
47  * Example wmapro bitstream (in samples):
48  *
49  * || packet 0 || packet 1 || packet 2 packets
50  * ---------------------------------------------------
51  * || frame 0 || frame 1 || frame 2 || frames
52  * ---------------------------------------------------
53  * || | | || | | | || || subframes of channel 0
54  * ---------------------------------------------------
55  * || | | || | | | || || subframes of channel 1
56  * ---------------------------------------------------
57  *
58  * The frame layouts for the individual channels of a wma frame does not need
59  * to be the same.
60  *
61  * However, if the offsets and lengths of several subframes of a frame are the
62  * same, the subframes of the channels can be grouped.
63  * Every group may then use special coding techniques like M/S stereo coding
64  * to improve the compression ratio. These channel transformations do not
65  * need to be applied to a whole subframe. Instead, they can also work on
66  * individual scale factor bands (see below).
67  * The coefficients that carry the audio signal in the frequency domain
68  * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
69  * In addition to that, the encoder can switch to a runlevel coding scheme
70  * by transmitting subframe_length / 128 zero coefficients.
71  *
72  * Before the audio signal can be converted to the time domain, the
73  * coefficients have to be rescaled and inverse quantized.
74  * A subframe is therefore split into several scale factor bands that get
75  * scaled individually.
76  * Scale factors are submitted for every frame but they might be shared
77  * between the subframes of a channel. Scale factors are initially DPCM-coded.
78  * Once scale factors are shared, the differences are transmitted as runlevel
79  * codes.
80  * Every subframe length and offset combination in the frame layout shares a
81  * common quantization factor that can be adjusted for every channel by a
82  * modifier.
83  * After the inverse quantization, the coefficients get processed by an IMDCT.
84  * The resulting values are then windowed with a sine window and the first half
85  * of the values are added to the second half of the output from the previous
86  * subframe in order to reconstruct the output samples.
87  */
88 
89 #include <inttypes.h>
90 
91 #include "libavutil/float_dsp.h"
92 #include "libavutil/intfloat.h"
93 #include "libavutil/intreadwrite.h"
94 #include "avcodec.h"
95 #include "internal.h"
96 #include "get_bits.h"
97 #include "put_bits.h"
98 #include "wmaprodata.h"
99 #include "sinewin.h"
100 #include "wma.h"
101 #include "wma_common.h"
102 
103 /** current decoder limitations */
104 #define WMAPRO_MAX_CHANNELS 8 ///< max number of handled channels
105 #define MAX_SUBFRAMES 32 ///< max number of subframes per channel
106 #define MAX_BANDS 29 ///< max number of scale factor bands
107 #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
108 
109 #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size
110 #define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size
111 #define WMAPRO_BLOCK_MIN_SIZE (1 << WMAPRO_BLOCK_MIN_BITS) ///< minimum block size
112 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size
113 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes
114 
115 
116 #define VLCBITS 9
117 #define SCALEVLCBITS 8
118 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
119 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
120 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
121 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
122 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
123 
124 static VLC sf_vlc; ///< scale factor DPCM vlc
125 static VLC sf_rl_vlc; ///< scale factor run length vlc
126 static VLC vec4_vlc; ///< 4 coefficients per symbol
127 static VLC vec2_vlc; ///< 2 coefficients per symbol
128 static VLC vec1_vlc; ///< 1 coefficient per symbol
129 static VLC coef_vlc[2]; ///< coefficient run length vlc codes
130 static float sin64[33]; ///< sine table for decorrelation
131 
132 /**
133  * @brief frame specific decoder context for a single channel
134  */
135 typedef struct WMAProChannelCtx {
136  int16_t prev_block_len; ///< length of the previous block
139  uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
140  uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
141  uint8_t cur_subframe; ///< current subframe number
142  uint16_t decoded_samples; ///< number of already processed samples
143  uint8_t grouped; ///< channel is part of a group
144  int quant_step; ///< quantization step for the current subframe
145  int8_t reuse_sf; ///< share scale factors between subframes
146  int8_t scale_factor_step; ///< scaling step for the current subframe
147  int max_scale_factor; ///< maximum scale factor for the current subframe
148  int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
149  int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
150  int* scale_factors; ///< pointer to the scale factor values used for decoding
151  uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
152  float* coeffs; ///< pointer to the subframe decode buffer
153  uint16_t num_vec_coeffs; ///< number of vector coded coefficients
154  DECLARE_ALIGNED(32, float, out)[WMAPRO_BLOCK_MAX_SIZE + WMAPRO_BLOCK_MAX_SIZE / 2]; ///< output buffer
156 
157 /**
158  * @brief channel group for channel transformations
159  */
160 typedef struct WMAProChannelGrp {
161  uint8_t num_channels; ///< number of channels in the group
162  int8_t transform; ///< transform on / off
163  int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
165  float* channel_data[WMAPRO_MAX_CHANNELS]; ///< transformation coefficients
167 
168 /**
169  * @brief main decoder context
170  */
171 typedef struct WMAProDecodeCtx {
172  /* generic decoder variables */
173  AVCodecContext* avctx; ///< codec context for av_log
176  FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
177  PutBitContext pb; ///< context for filling the frame_data buffer
178  FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]; ///< MDCT context per block size
179  DECLARE_ALIGNED(32, float, tmp)[WMAPRO_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
180  const float* windows[WMAPRO_BLOCK_SIZES]; ///< windows for the different block sizes
181 
182  /* frame size dependent frame information (set during initialization) */
183  uint32_t decode_flags; ///< used compression features
184  uint8_t len_prefix; ///< frame is prefixed with its length
185  uint8_t dynamic_range_compression; ///< frame contains DRC data
186  uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
187  uint16_t samples_per_frame; ///< number of samples to output
188  uint16_t log2_frame_size;
189  int8_t lfe_channel; ///< lfe channel index
191  uint8_t subframe_len_bits; ///< number of bits used for the subframe length
192  uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
194  int8_t num_sfb[WMAPRO_BLOCK_SIZES]; ///< scale factor bands per block size
195  int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
196  int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
197  int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]; ///< subwoofer cutoff values
198 
199  /* packet decode state */
200  GetBitContext pgb; ///< bitstream reader context for the packet
201  int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
202  uint8_t packet_offset; ///< frame offset in the packet
203  uint8_t packet_sequence_number; ///< current packet number
204  int num_saved_bits; ///< saved number of bits
205  int frame_offset; ///< frame offset in the bit reservoir
206  int subframe_offset; ///< subframe offset in the bit reservoir
207  uint8_t packet_loss; ///< set in case of bitstream error
208  uint8_t packet_done; ///< set when a packet is fully decoded
209 
210  /* frame decode state */
211  uint32_t frame_num; ///< current frame number (not used for decoding)
212  GetBitContext gb; ///< bitstream reader context
213  int buf_bit_size; ///< buffer size in bits
214  uint8_t drc_gain; ///< gain for the DRC tool
215  int8_t skip_frame; ///< skip output step
216  int8_t parsed_all_subframes; ///< all subframes decoded?
217 
218  /* subframe/block decode state */
219  int16_t subframe_len; ///< current subframe length
220  int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
222  int8_t num_bands; ///< number of scale factor bands
223  int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
224  int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
225  uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
226  int8_t esc_len; ///< length of escaped coefficients
227 
228  uint8_t num_chgroups; ///< number of channel groups
229  WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]; ///< channel group information
230 
233 
234 
235 /**
236  *@brief helper function to print the most important members of the context
237  *@param s context
238  */
240 {
241 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
242 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %"PRIx32"\n", a, b);
243 
244  PRINT("ed sample bit depth", s->bits_per_sample);
245  PRINT_HEX("ed decode flags", s->decode_flags);
246  PRINT("samples per frame", s->samples_per_frame);
247  PRINT("log2 frame size", s->log2_frame_size);
248  PRINT("max num subframes", s->max_num_subframes);
249  PRINT("len prefix", s->len_prefix);
250  PRINT("num channels", s->avctx->channels);
251 }
252 
253 /**
254  *@brief Uninitialize the decoder and free all resources.
255  *@param avctx codec context
256  *@return 0 on success, < 0 otherwise
257  */
259 {
260  WMAProDecodeCtx *s = avctx->priv_data;
261  int i;
262 
263  av_freep(&s->fdsp);
264 
265  for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
266  ff_mdct_end(&s->mdct_ctx[i]);
267 
268  return 0;
269 }
270 
271 /**
272  *@brief Initialize the decoder.
273  *@param avctx codec context
274  *@return 0 on success, -1 otherwise
275  */
277 {
278  WMAProDecodeCtx *s = avctx->priv_data;
279  uint8_t *edata_ptr = avctx->extradata;
280  unsigned int channel_mask;
281  int i, bits;
282  int log2_max_num_subframes;
283  int num_possible_block_sizes;
284 
285  if (!avctx->block_align) {
286  av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
287  return AVERROR(EINVAL);
288  }
289 
290  s->avctx = avctx;
292  if (!s->fdsp)
293  return AVERROR(ENOMEM);
294 
296 
298 
299  if (avctx->extradata_size >= 18) {
300  s->decode_flags = AV_RL16(edata_ptr+14);
301  channel_mask = AV_RL32(edata_ptr+2);
302  s->bits_per_sample = AV_RL16(edata_ptr);
303 
304  if (s->bits_per_sample > 32 || s->bits_per_sample < 1) {
305  avpriv_request_sample(avctx, "bits per sample is %d", s->bits_per_sample);
306  return AVERROR_PATCHWELCOME;
307  }
308 
309  /** dump the extradata */
310  for (i = 0; i < avctx->extradata_size; i++)
311  av_dlog(avctx, "[%x] ", avctx->extradata[i]);
312  av_dlog(avctx, "\n");
313 
314  } else {
315  avpriv_request_sample(avctx, "Unknown extradata size");
316  return AVERROR_PATCHWELCOME;
317  }
318 
319  /** generic init */
320  s->log2_frame_size = av_log2(avctx->block_align) + 4;
321  if (s->log2_frame_size > 25) {
322  avpriv_request_sample(avctx, "Large block align");
323  return AVERROR_PATCHWELCOME;
324  }
325 
326  /** frame info */
327  s->skip_frame = 1; /* skip first frame */
328  s->packet_loss = 1;
329  s->len_prefix = (s->decode_flags & 0x40);
330 
331  /** get frame len */
332  bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags);
333  if (bits > WMAPRO_BLOCK_MAX_BITS) {
334  avpriv_request_sample(avctx, "14-bit block sizes");
335  return AVERROR_PATCHWELCOME;
336  }
337  s->samples_per_frame = 1 << bits;
338 
339  /** subframe info */
340  log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
341  s->max_num_subframes = 1 << log2_max_num_subframes;
342  if (s->max_num_subframes == 16 || s->max_num_subframes == 4)
343  s->max_subframe_len_bit = 1;
344  s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
345 
346  num_possible_block_sizes = log2_max_num_subframes + 1;
348  s->dynamic_range_compression = (s->decode_flags & 0x80);
349 
350  if (s->max_num_subframes > MAX_SUBFRAMES) {
351  av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %"PRId8"\n",
352  s->max_num_subframes);
353  return AVERROR_INVALIDDATA;
354  }
355 
357  av_log(avctx, AV_LOG_ERROR, "min_samples_per_subframe of %d too small\n",
359  return AVERROR_INVALIDDATA;
360  }
361 
362  if (s->avctx->sample_rate <= 0) {
363  av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
364  return AVERROR_INVALIDDATA;
365  }
366 
367  if (avctx->channels < 0) {
368  av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
369  avctx->channels);
370  return AVERROR_INVALIDDATA;
371  } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
372  avpriv_request_sample(avctx,
373  "More than %d channels", WMAPRO_MAX_CHANNELS);
374  return AVERROR_PATCHWELCOME;
375  }
376 
377  /** init previous block len */
378  for (i = 0; i < avctx->channels; i++)
380 
381  /** extract lfe channel position */
382  s->lfe_channel = -1;
383 
384  if (channel_mask & 8) {
385  unsigned int mask;
386  for (mask = 1; mask < 16; mask <<= 1) {
387  if (channel_mask & mask)
388  ++s->lfe_channel;
389  }
390  }
391 
393  scale_huffbits, 1, 1,
394  scale_huffcodes, 2, 2, 616);
395 
397  scale_rl_huffbits, 1, 1,
398  scale_rl_huffcodes, 4, 4, 1406);
399 
400  INIT_VLC_STATIC(&coef_vlc[0], VLCBITS, HUFF_COEF0_SIZE,
401  coef0_huffbits, 1, 1,
402  coef0_huffcodes, 4, 4, 2108);
403 
404  INIT_VLC_STATIC(&coef_vlc[1], VLCBITS, HUFF_COEF1_SIZE,
405  coef1_huffbits, 1, 1,
406  coef1_huffcodes, 4, 4, 3912);
407 
409  vec4_huffbits, 1, 1,
410  vec4_huffcodes, 2, 2, 604);
411 
413  vec2_huffbits, 1, 1,
414  vec2_huffcodes, 2, 2, 562);
415 
417  vec1_huffbits, 1, 1,
418  vec1_huffcodes, 2, 2, 562);
419 
420  /** calculate number of scale factor bands and their offsets
421  for every possible block size */
422  for (i = 0; i < num_possible_block_sizes; i++) {
423  int subframe_len = s->samples_per_frame >> i;
424  int x;
425  int band = 1;
426 
427  s->sfb_offsets[i][0] = 0;
428 
429  for (x = 0; x < MAX_BANDS-1 && s->sfb_offsets[i][band - 1] < subframe_len; x++) {
430  int offset = (subframe_len * 2 * critical_freq[x])
431  / s->avctx->sample_rate + 2;
432  offset &= ~3;
433  if (offset > s->sfb_offsets[i][band - 1])
434  s->sfb_offsets[i][band++] = offset;
435 
436  if (offset >= subframe_len)
437  break;
438  }
439  s->sfb_offsets[i][band - 1] = subframe_len;
440  s->num_sfb[i] = band - 1;
441  if (s->num_sfb[i] <= 0) {
442  av_log(avctx, AV_LOG_ERROR, "num_sfb invalid\n");
443  return AVERROR_INVALIDDATA;
444  }
445  }
446 
447 
448  /** Scale factors can be shared between blocks of different size
449  as every block has a different scale factor band layout.
450  The matrix sf_offsets is needed to find the correct scale factor.
451  */
452 
453  for (i = 0; i < num_possible_block_sizes; i++) {
454  int b;
455  for (b = 0; b < s->num_sfb[i]; b++) {
456  int x;
457  int offset = ((s->sfb_offsets[i][b]
458  + s->sfb_offsets[i][b + 1] - 1) << i) >> 1;
459  for (x = 0; x < num_possible_block_sizes; x++) {
460  int v = 0;
461  while (s->sfb_offsets[x][v + 1] << x < offset) {
462  v++;
463  av_assert0(v < MAX_BANDS);
464  }
465  s->sf_offsets[i][x][b] = v;
466  }
467  }
468  }
469 
470  /** init MDCT, FIXME: only init needed sizes */
471  for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
473  1.0 / (1 << (WMAPRO_BLOCK_MIN_BITS + i - 1))
474  / (1 << (s->bits_per_sample - 1)));
475 
476  /** init MDCT windows: simple sine window */
477  for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) {
478  const int win_idx = WMAPRO_BLOCK_MAX_BITS - i;
479  ff_init_ff_sine_windows(win_idx);
480  s->windows[WMAPRO_BLOCK_SIZES - i - 1] = ff_sine_windows[win_idx];
481  }
482 
483  /** calculate subwoofer cutoff values */
484  for (i = 0; i < num_possible_block_sizes; i++) {
485  int block_size = s->samples_per_frame >> i;
486  int cutoff = (440*block_size + 3LL * (s->avctx->sample_rate >> 1) - 1)
487  / s->avctx->sample_rate;
488  s->subwoofer_cutoffs[i] = av_clip(cutoff, 4, block_size);
489  }
490 
491  /** calculate sine values for the decorrelation matrix */
492  for (i = 0; i < 33; i++)
493  sin64[i] = sin(i*M_PI / 64.0);
494 
495  if (avctx->debug & FF_DEBUG_BITSTREAM)
496  dump_context(s);
497 
498  avctx->channel_layout = channel_mask;
499 
500  return 0;
501 }
502 
503 /**
504  *@brief Decode the subframe length.
505  *@param s context
506  *@param offset sample offset in the frame
507  *@return decoded subframe length on success, < 0 in case of an error
508  */
510 {
511  int frame_len_shift = 0;
512  int subframe_len;
513 
514  /** no need to read from the bitstream when only one length is possible */
515  if (offset == s->samples_per_frame - s->min_samples_per_subframe)
516  return s->min_samples_per_subframe;
517 
518  if (get_bits_left(&s->gb) < 1)
519  return AVERROR_INVALIDDATA;
520 
521  /** 1 bit indicates if the subframe is of maximum length */
522  if (s->max_subframe_len_bit) {
523  if (get_bits1(&s->gb))
524  frame_len_shift = 1 + get_bits(&s->gb, s->subframe_len_bits-1);
525  } else
526  frame_len_shift = get_bits(&s->gb, s->subframe_len_bits);
527 
528  subframe_len = s->samples_per_frame >> frame_len_shift;
529 
530  /** sanity check the length */
531  if (subframe_len < s->min_samples_per_subframe ||
532  subframe_len > s->samples_per_frame) {
533  av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
534  subframe_len);
535  return AVERROR_INVALIDDATA;
536  }
537  return subframe_len;
538 }
539 
540 /**
541  *@brief Decode how the data in the frame is split into subframes.
542  * Every WMA frame contains the encoded data for a fixed number of
543  * samples per channel. The data for every channel might be split
544  * into several subframes. This function will reconstruct the list of
545  * subframes for every channel.
546  *
547  * If the subframes are not evenly split, the algorithm estimates the
548  * channels with the lowest number of total samples.
549  * Afterwards, for each of these channels a bit is read from the
550  * bitstream that indicates if the channel contains a subframe with the
551  * next subframe size that is going to be read from the bitstream or not.
552  * If a channel contains such a subframe, the subframe size gets added to
553  * the channel's subframe list.
554  * The algorithm repeats these steps until the frame is properly divided
555  * between the individual channels.
556  *
557  *@param s context
558  *@return 0 on success, < 0 in case of an error
559  */
561 {
562  uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */
563  uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
564  int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */
565  int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */
566  int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
567  int c;
568 
569  /* Should never consume more than 3073 bits (256 iterations for the
570  * while loop when always the minimum amount of 128 samples is subtracted
571  * from missing samples in the 8 channel case).
572  * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
573  */
574 
575  /** reset tiling information */
576  for (c = 0; c < s->avctx->channels; c++)
577  s->channel[c].num_subframes = 0;
578 
579  if (s->max_num_subframes == 1 || get_bits1(&s->gb))
580  fixed_channel_layout = 1;
581 
582  /** loop until the frame data is split between the subframes */
583  do {
584  int subframe_len;
585 
586  /** check which channels contain the subframe */
587  for (c = 0; c < s->avctx->channels; c++) {
588  if (num_samples[c] == min_channel_len) {
589  if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
590  (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
591  contains_subframe[c] = 1;
592  else
593  contains_subframe[c] = get_bits1(&s->gb);
594  } else
595  contains_subframe[c] = 0;
596  }
597 
598  /** get subframe length, subframe_len == 0 is not allowed */
599  if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0)
600  return AVERROR_INVALIDDATA;
601 
602  /** add subframes to the individual channels and find new min_channel_len */
603  min_channel_len += subframe_len;
604  for (c = 0; c < s->avctx->channels; c++) {
605  WMAProChannelCtx* chan = &s->channel[c];
606 
607  if (contains_subframe[c]) {
608  if (chan->num_subframes >= MAX_SUBFRAMES) {
610  "broken frame: num subframes > 31\n");
611  return AVERROR_INVALIDDATA;
612  }
613  chan->subframe_len[chan->num_subframes] = subframe_len;
614  num_samples[c] += subframe_len;
615  ++chan->num_subframes;
616  if (num_samples[c] > s->samples_per_frame) {
617  av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
618  "channel len > samples_per_frame\n");
619  return AVERROR_INVALIDDATA;
620  }
621  } else if (num_samples[c] <= min_channel_len) {
622  if (num_samples[c] < min_channel_len) {
623  channels_for_cur_subframe = 0;
624  min_channel_len = num_samples[c];
625  }
626  ++channels_for_cur_subframe;
627  }
628  }
629  } while (min_channel_len < s->samples_per_frame);
630 
631  for (c = 0; c < s->avctx->channels; c++) {
632  int i;
633  int offset = 0;
634  for (i = 0; i < s->channel[c].num_subframes; i++) {
635  av_dlog(s->avctx, "frame[%i] channel[%i] subframe[%i]"
636  " len %i\n", s->frame_num, c, i,
637  s->channel[c].subframe_len[i]);
638  s->channel[c].subframe_offset[i] = offset;
639  offset += s->channel[c].subframe_len[i];
640  }
641  }
642 
643  return 0;
644 }
645 
646 /**
647  *@brief Calculate a decorrelation matrix from the bitstream parameters.
648  *@param s codec context
649  *@param chgroup channel group for which the matrix needs to be calculated
650  */
652  WMAProChannelGrp *chgroup)
653 {
654  int i;
655  int offset = 0;
656  int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
657  memset(chgroup->decorrelation_matrix, 0, s->avctx->channels *
658  s->avctx->channels * sizeof(*chgroup->decorrelation_matrix));
659 
660  for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
661  rotation_offset[i] = get_bits(&s->gb, 6);
662 
663  for (i = 0; i < chgroup->num_channels; i++)
664  chgroup->decorrelation_matrix[chgroup->num_channels * i + i] =
665  get_bits1(&s->gb) ? 1.0 : -1.0;
666 
667  for (i = 1; i < chgroup->num_channels; i++) {
668  int x;
669  for (x = 0; x < i; x++) {
670  int y;
671  for (y = 0; y < i + 1; y++) {
672  float v1 = chgroup->decorrelation_matrix[x * chgroup->num_channels + y];
673  float v2 = chgroup->decorrelation_matrix[i * chgroup->num_channels + y];
674  int n = rotation_offset[offset + x];
675  float sinv;
676  float cosv;
677 
678  if (n < 32) {
679  sinv = sin64[n];
680  cosv = sin64[32 - n];
681  } else {
682  sinv = sin64[64 - n];
683  cosv = -sin64[n - 32];
684  }
685 
686  chgroup->decorrelation_matrix[y + x * chgroup->num_channels] =
687  (v1 * sinv) - (v2 * cosv);
688  chgroup->decorrelation_matrix[y + i * chgroup->num_channels] =
689  (v1 * cosv) + (v2 * sinv);
690  }
691  }
692  offset += i;
693  }
694 }
695 
696 /**
697  *@brief Decode channel transformation parameters
698  *@param s codec context
699  *@return >= 0 in case of success, < 0 in case of bitstream errors
700  */
702 {
703  int i;
704  /* should never consume more than 1921 bits for the 8 channel case
705  * 1 + MAX_CHANNELS * (MAX_CHANNELS + 2 + 3 * MAX_CHANNELS * MAX_CHANNELS
706  * + MAX_CHANNELS + MAX_BANDS + 1)
707  */
708 
709  /** in the one channel case channel transforms are pointless */
710  s->num_chgroups = 0;
711  if (s->avctx->channels > 1) {
712  int remaining_channels = s->channels_for_cur_subframe;
713 
714  if (get_bits1(&s->gb)) {
716  "Channel transform bit");
717  return AVERROR_PATCHWELCOME;
718  }
719 
720  for (s->num_chgroups = 0; remaining_channels &&
722  WMAProChannelGrp* chgroup = &s->chgroup[s->num_chgroups];
723  float** channel_data = chgroup->channel_data;
724  chgroup->num_channels = 0;
725  chgroup->transform = 0;
726 
727  /** decode channel mask */
728  if (remaining_channels > 2) {
729  for (i = 0; i < s->channels_for_cur_subframe; i++) {
730  int channel_idx = s->channel_indexes_for_cur_subframe[i];
731  if (!s->channel[channel_idx].grouped
732  && get_bits1(&s->gb)) {
733  ++chgroup->num_channels;
734  s->channel[channel_idx].grouped = 1;
735  *channel_data++ = s->channel[channel_idx].coeffs;
736  }
737  }
738  } else {
739  chgroup->num_channels = remaining_channels;
740  for (i = 0; i < s->channels_for_cur_subframe; i++) {
741  int channel_idx = s->channel_indexes_for_cur_subframe[i];
742  if (!s->channel[channel_idx].grouped)
743  *channel_data++ = s->channel[channel_idx].coeffs;
744  s->channel[channel_idx].grouped = 1;
745  }
746  }
747 
748  /** decode transform type */
749  if (chgroup->num_channels == 2) {
750  if (get_bits1(&s->gb)) {
751  if (get_bits1(&s->gb)) {
753  "Unknown channel transform type");
754  return AVERROR_PATCHWELCOME;
755  }
756  } else {
757  chgroup->transform = 1;
758  if (s->avctx->channels == 2) {
759  chgroup->decorrelation_matrix[0] = 1.0;
760  chgroup->decorrelation_matrix[1] = -1.0;
761  chgroup->decorrelation_matrix[2] = 1.0;
762  chgroup->decorrelation_matrix[3] = 1.0;
763  } else {
764  /** cos(pi/4) */
765  chgroup->decorrelation_matrix[0] = 0.70703125;
766  chgroup->decorrelation_matrix[1] = -0.70703125;
767  chgroup->decorrelation_matrix[2] = 0.70703125;
768  chgroup->decorrelation_matrix[3] = 0.70703125;
769  }
770  }
771  } else if (chgroup->num_channels > 2) {
772  if (get_bits1(&s->gb)) {
773  chgroup->transform = 1;
774  if (get_bits1(&s->gb)) {
775  decode_decorrelation_matrix(s, chgroup);
776  } else {
777  /** FIXME: more than 6 coupled channels not supported */
778  if (chgroup->num_channels > 6) {
780  "Coupled channels > 6");
781  } else {
782  memcpy(chgroup->decorrelation_matrix,
784  chgroup->num_channels * chgroup->num_channels *
785  sizeof(*chgroup->decorrelation_matrix));
786  }
787  }
788  }
789  }
790 
791  /** decode transform on / off */
792  if (chgroup->transform) {
793  if (!get_bits1(&s->gb)) {
794  int i;
795  /** transform can be enabled for individual bands */
796  for (i = 0; i < s->num_bands; i++) {
797  chgroup->transform_band[i] = get_bits1(&s->gb);
798  }
799  } else {
800  memset(chgroup->transform_band, 1, s->num_bands);
801  }
802  }
803  remaining_channels -= chgroup->num_channels;
804  }
805  }
806  return 0;
807 }
808 
809 /**
810  *@brief Extract the coefficients from the bitstream.
811  *@param s codec context
812  *@param c current channel number
813  *@return 0 on success, < 0 in case of bitstream errors
814  */
815 static int decode_coeffs(WMAProDecodeCtx *s, int c)
816 {
817  /* Integers 0..15 as single-precision floats. The table saves a
818  costly int to float conversion, and storing the values as
819  integers allows fast sign-flipping. */
820  static const uint32_t fval_tab[16] = {
821  0x00000000, 0x3f800000, 0x40000000, 0x40400000,
822  0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
823  0x41000000, 0x41100000, 0x41200000, 0x41300000,
824  0x41400000, 0x41500000, 0x41600000, 0x41700000,
825  };
826  int vlctable;
827  VLC* vlc;
828  WMAProChannelCtx* ci = &s->channel[c];
829  int rl_mode = 0;
830  int cur_coeff = 0;
831  int num_zeros = 0;
832  const uint16_t* run;
833  const float* level;
834 
835  av_dlog(s->avctx, "decode coefficients for channel %i\n", c);
836 
837  vlctable = get_bits1(&s->gb);
838  vlc = &coef_vlc[vlctable];
839 
840  if (vlctable) {
841  run = coef1_run;
842  level = coef1_level;
843  } else {
844  run = coef0_run;
845  level = coef0_level;
846  }
847 
848  /** decode vector coefficients (consumes up to 167 bits per iteration for
849  4 vector coded large values) */
850  while ((s->transmit_num_vec_coeffs || !rl_mode) &&
851  (cur_coeff + 3 < ci->num_vec_coeffs)) {
852  uint32_t vals[4];
853  int i;
854  unsigned int idx;
855 
856  idx = get_vlc2(&s->gb, vec4_vlc.table, VLCBITS, VEC4MAXDEPTH);
857 
858  if (idx == HUFF_VEC4_SIZE - 1) {
859  for (i = 0; i < 4; i += 2) {
860  idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
861  if (idx == HUFF_VEC2_SIZE - 1) {
862  uint32_t v0, v1;
863  v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
864  if (v0 == HUFF_VEC1_SIZE - 1)
865  v0 += ff_wma_get_large_val(&s->gb);
866  v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
867  if (v1 == HUFF_VEC1_SIZE - 1)
868  v1 += ff_wma_get_large_val(&s->gb);
869  vals[i ] = av_float2int(v0);
870  vals[i+1] = av_float2int(v1);
871  } else {
872  vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
873  vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
874  }
875  }
876  } else {
877  vals[0] = fval_tab[ symbol_to_vec4[idx] >> 12 ];
878  vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF];
879  vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF];
880  vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF];
881  }
882 
883  /** decode sign */
884  for (i = 0; i < 4; i++) {
885  if (vals[i]) {
886  uint32_t sign = get_bits1(&s->gb) - 1;
887  AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
888  num_zeros = 0;
889  } else {
890  ci->coeffs[cur_coeff] = 0;
891  /** switch to run level mode when subframe_len / 128 zeros
892  were found in a row */
893  rl_mode |= (++num_zeros > s->subframe_len >> 8);
894  }
895  ++cur_coeff;
896  }
897  }
898 
899  /** decode run level coded coefficients */
900  if (cur_coeff < s->subframe_len) {
901  memset(&ci->coeffs[cur_coeff], 0,
902  sizeof(*ci->coeffs) * (s->subframe_len - cur_coeff));
903  if (ff_wma_run_level_decode(s->avctx, &s->gb, vlc,
904  level, run, 1, ci->coeffs,
905  cur_coeff, s->subframe_len,
906  s->subframe_len, s->esc_len, 0))
907  return AVERROR_INVALIDDATA;
908  }
909 
910  return 0;
911 }
912 
913 /**
914  *@brief Extract scale factors from the bitstream.
915  *@param s codec context
916  *@return 0 on success, < 0 in case of bitstream errors
917  */
919 {
920  int i;
921 
922  /** should never consume more than 5344 bits
923  * MAX_CHANNELS * (1 + MAX_BANDS * 23)
924  */
925 
926  for (i = 0; i < s->channels_for_cur_subframe; i++) {
928  int* sf;
929  int* sf_end;
931  sf_end = s->channel[c].scale_factors + s->num_bands;
932 
933  /** resample scale factors for the new block size
934  * as the scale factors might need to be resampled several times
935  * before some new values are transmitted, a backup of the last
936  * transmitted scale factors is kept in saved_scale_factors
937  */
938  if (s->channel[c].reuse_sf) {
939  const int8_t* sf_offsets = s->sf_offsets[s->table_idx][s->channel[c].table_idx];
940  int b;
941  for (b = 0; b < s->num_bands; b++)
942  s->channel[c].scale_factors[b] =
943  s->channel[c].saved_scale_factors[s->channel[c].scale_factor_idx][*sf_offsets++];
944  }
945 
946  if (!s->channel[c].cur_subframe || get_bits1(&s->gb)) {
947 
948  if (!s->channel[c].reuse_sf) {
949  int val;
950  /** decode DPCM coded scale factors */
951  s->channel[c].scale_factor_step = get_bits(&s->gb, 2) + 1;
952  val = 45 / s->channel[c].scale_factor_step;
953  for (sf = s->channel[c].scale_factors; sf < sf_end; sf++) {
954  val += get_vlc2(&s->gb, sf_vlc.table, SCALEVLCBITS, SCALEMAXDEPTH) - 60;
955  *sf = val;
956  }
957  } else {
958  int i;
959  /** run level decode differences to the resampled factors */
960  for (i = 0; i < s->num_bands; i++) {
961  int idx;
962  int skip;
963  int val;
964  int sign;
965 
966  idx = get_vlc2(&s->gb, sf_rl_vlc.table, VLCBITS, SCALERLMAXDEPTH);
967 
968  if (!idx) {
969  uint32_t code = get_bits(&s->gb, 14);
970  val = code >> 6;
971  sign = (code & 1) - 1;
972  skip = (code & 0x3f) >> 1;
973  } else if (idx == 1) {
974  break;
975  } else {
976  skip = scale_rl_run[idx];
977  val = scale_rl_level[idx];
978  sign = get_bits1(&s->gb)-1;
979  }
980 
981  i += skip;
982  if (i >= s->num_bands) {
984  "invalid scale factor coding\n");
985  return AVERROR_INVALIDDATA;
986  }
987  s->channel[c].scale_factors[i] += (val ^ sign) - sign;
988  }
989  }
990  /** swap buffers */
992  s->channel[c].table_idx = s->table_idx;
993  s->channel[c].reuse_sf = 1;
994  }
995 
996  /** calculate new scale factor maximum */
998  for (sf = s->channel[c].scale_factors + 1; sf < sf_end; sf++) {
1000  FFMAX(s->channel[c].max_scale_factor, *sf);
1001  }
1002 
1003  }
1004  return 0;
1005 }
1006 
1007 /**
1008  *@brief Reconstruct the individual channel data.
1009  *@param s codec context
1010  */
1012 {
1013  int i;
1014 
1015  for (i = 0; i < s->num_chgroups; i++) {
1016  if (s->chgroup[i].transform) {
1017  float data[WMAPRO_MAX_CHANNELS];
1018  const int num_channels = s->chgroup[i].num_channels;
1019  float** ch_data = s->chgroup[i].channel_data;
1020  float** ch_end = ch_data + num_channels;
1021  const int8_t* tb = s->chgroup[i].transform_band;
1022  int16_t* sfb;
1023 
1024  /** multichannel decorrelation */
1025  for (sfb = s->cur_sfb_offsets;
1026  sfb < s->cur_sfb_offsets + s->num_bands; sfb++) {
1027  int y;
1028  if (*tb++ == 1) {
1029  /** multiply values with the decorrelation_matrix */
1030  for (y = sfb[0]; y < FFMIN(sfb[1], s->subframe_len); y++) {
1031  const float* mat = s->chgroup[i].decorrelation_matrix;
1032  const float* data_end = data + num_channels;
1033  float* data_ptr = data;
1034  float** ch;
1035 
1036  for (ch = ch_data; ch < ch_end; ch++)
1037  *data_ptr++ = (*ch)[y];
1038 
1039  for (ch = ch_data; ch < ch_end; ch++) {
1040  float sum = 0;
1041  data_ptr = data;
1042  while (data_ptr < data_end)
1043  sum += *data_ptr++ * *mat++;
1044 
1045  (*ch)[y] = sum;
1046  }
1047  }
1048  } else if (s->avctx->channels == 2) {
1049  int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
1050  s->fdsp->vector_fmul_scalar(ch_data[0] + sfb[0],
1051  ch_data[0] + sfb[0],
1052  181.0 / 128, len);
1053  s->fdsp->vector_fmul_scalar(ch_data[1] + sfb[0],
1054  ch_data[1] + sfb[0],
1055  181.0 / 128, len);
1056  }
1057  }
1058  }
1059  }
1060 }
1061 
1062 /**
1063  *@brief Apply sine window and reconstruct the output buffer.
1064  *@param s codec context
1065  */
1067 {
1068  int i;
1069  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1070  int c = s->channel_indexes_for_cur_subframe[i];
1071  const float* window;
1072  int winlen = s->channel[c].prev_block_len;
1073  float* start = s->channel[c].coeffs - (winlen >> 1);
1074 
1075  if (s->subframe_len < winlen) {
1076  start += (winlen - s->subframe_len) >> 1;
1077  winlen = s->subframe_len;
1078  }
1079 
1080  window = s->windows[av_log2(winlen) - WMAPRO_BLOCK_MIN_BITS];
1081 
1082  winlen >>= 1;
1083 
1084  s->fdsp->vector_fmul_window(start, start, start + winlen,
1085  window, winlen);
1086 
1088  }
1089 }
1090 
1091 /**
1092  *@brief Decode a single subframe (block).
1093  *@param s codec context
1094  *@return 0 on success, < 0 when decoding failed
1095  */
1097 {
1098  int offset = s->samples_per_frame;
1099  int subframe_len = s->samples_per_frame;
1100  int i;
1101  int total_samples = s->samples_per_frame * s->avctx->channels;
1102  int transmit_coeffs = 0;
1103  int cur_subwoofer_cutoff;
1104 
1105  s->subframe_offset = get_bits_count(&s->gb);
1106 
1107  /** reset channel context and find the next block offset and size
1108  == the next block of the channel with the smallest number of
1109  decoded samples
1110  */
1111  for (i = 0; i < s->avctx->channels; i++) {
1112  s->channel[i].grouped = 0;
1113  if (offset > s->channel[i].decoded_samples) {
1114  offset = s->channel[i].decoded_samples;
1115  subframe_len =
1117  }
1118  }
1119 
1120  av_dlog(s->avctx,
1121  "processing subframe with offset %i len %i\n", offset, subframe_len);
1122 
1123  /** get a list of all channels that contain the estimated block */
1125  for (i = 0; i < s->avctx->channels; i++) {
1126  const int cur_subframe = s->channel[i].cur_subframe;
1127  /** subtract already processed samples */
1128  total_samples -= s->channel[i].decoded_samples;
1129 
1130  /** and count if there are multiple subframes that match our profile */
1131  if (offset == s->channel[i].decoded_samples &&
1132  subframe_len == s->channel[i].subframe_len[cur_subframe]) {
1133  total_samples -= s->channel[i].subframe_len[cur_subframe];
1134  s->channel[i].decoded_samples +=
1135  s->channel[i].subframe_len[cur_subframe];
1138  }
1139  }
1140 
1141  /** check if the frame will be complete after processing the
1142  estimated block */
1143  if (!total_samples)
1144  s->parsed_all_subframes = 1;
1145 
1146 
1147  av_dlog(s->avctx, "subframe is part of %i channels\n",
1149 
1150  /** calculate number of scale factor bands and their offsets */
1151  s->table_idx = av_log2(s->samples_per_frame/subframe_len);
1152  s->num_bands = s->num_sfb[s->table_idx];
1154  cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx];
1155 
1156  /** configure the decoder for the current subframe */
1157  offset += s->samples_per_frame >> 1;
1158 
1159  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1160  int c = s->channel_indexes_for_cur_subframe[i];
1161 
1162  s->channel[c].coeffs = &s->channel[c].out[offset];
1163  }
1164 
1165  s->subframe_len = subframe_len;
1166  s->esc_len = av_log2(s->subframe_len - 1) + 1;
1167 
1168  /** skip extended header if any */
1169  if (get_bits1(&s->gb)) {
1170  int num_fill_bits;
1171  if (!(num_fill_bits = get_bits(&s->gb, 2))) {
1172  int len = get_bits(&s->gb, 4);
1173  num_fill_bits = (len ? get_bits(&s->gb, len) : 0) + 1;
1174  }
1175 
1176  if (num_fill_bits >= 0) {
1177  if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) {
1178  av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n");
1179  return AVERROR_INVALIDDATA;
1180  }
1181 
1182  skip_bits_long(&s->gb, num_fill_bits);
1183  }
1184  }
1185 
1186  /** no idea for what the following bit is used */
1187  if (get_bits1(&s->gb)) {
1188  avpriv_request_sample(s->avctx, "Reserved bit");
1189  return AVERROR_PATCHWELCOME;
1190  }
1191 
1192 
1193  if (decode_channel_transform(s) < 0)
1194  return AVERROR_INVALIDDATA;
1195 
1196 
1197  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1198  int c = s->channel_indexes_for_cur_subframe[i];
1199  if ((s->channel[c].transmit_coefs = get_bits1(&s->gb)))
1200  transmit_coeffs = 1;
1201  }
1202 
1204  if (transmit_coeffs) {
1205  int step;
1206  int quant_step = 90 * s->bits_per_sample >> 4;
1207 
1208  /** decode number of vector coded coefficients */
1209  if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) {
1210  int num_bits = av_log2((s->subframe_len + 3)/4) + 1;
1211  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1212  int c = s->channel_indexes_for_cur_subframe[i];
1213  int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
1214  if (num_vec_coeffs > s->subframe_len) {
1215  av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs);
1216  return AVERROR_INVALIDDATA;
1217  }
1218  av_assert0(num_vec_coeffs + offset <= FF_ARRAY_ELEMS(s->channel[c].out));
1219  s->channel[c].num_vec_coeffs = num_vec_coeffs;
1220  }
1221  } else {
1222  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1223  int c = s->channel_indexes_for_cur_subframe[i];
1225  }
1226  }
1227  /** decode quantization step */
1228  step = get_sbits(&s->gb, 6);
1229  quant_step += step;
1230  if (step == -32 || step == 31) {
1231  const int sign = (step == 31) - 1;
1232  int quant = 0;
1233  while (get_bits_count(&s->gb) + 5 < s->num_saved_bits &&
1234  (step = get_bits(&s->gb, 5)) == 31) {
1235  quant += 31;
1236  }
1237  quant_step += ((quant + step) ^ sign) - sign;
1238  }
1239  if (quant_step < 0) {
1240  av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n");
1241  }
1242 
1243  /** decode quantization step modifiers for every channel */
1244 
1245  if (s->channels_for_cur_subframe == 1) {
1246  s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1247  } else {
1248  int modifier_len = get_bits(&s->gb, 3);
1249  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1250  int c = s->channel_indexes_for_cur_subframe[i];
1251  s->channel[c].quant_step = quant_step;
1252  if (get_bits1(&s->gb)) {
1253  if (modifier_len) {
1254  s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1;
1255  } else
1256  ++s->channel[c].quant_step;
1257  }
1258  }
1259  }
1260 
1261  /** decode scale factors */
1262  if (decode_scale_factors(s) < 0)
1263  return AVERROR_INVALIDDATA;
1264  }
1265 
1266  av_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n",
1267  get_bits_count(&s->gb) - s->subframe_offset);
1268 
1269  /** parse coefficients */
1270  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1271  int c = s->channel_indexes_for_cur_subframe[i];
1272  if (s->channel[c].transmit_coefs &&
1273  get_bits_count(&s->gb) < s->num_saved_bits) {
1274  decode_coeffs(s, c);
1275  } else
1276  memset(s->channel[c].coeffs, 0,
1277  sizeof(*s->channel[c].coeffs) * subframe_len);
1278  }
1279 
1280  av_dlog(s->avctx, "BITSTREAM: subframe length was %i\n",
1281  get_bits_count(&s->gb) - s->subframe_offset);
1282 
1283  if (transmit_coeffs) {
1284  FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS];
1285  /** reconstruct the per channel data */
1287  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1288  int c = s->channel_indexes_for_cur_subframe[i];
1289  const int* sf = s->channel[c].scale_factors;
1290  int b;
1291 
1292  if (c == s->lfe_channel)
1293  memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) *
1294  (subframe_len - cur_subwoofer_cutoff));
1295 
1296  /** inverse quantization and rescaling */
1297  for (b = 0; b < s->num_bands; b++) {
1298  const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len);
1299  const int exp = s->channel[c].quant_step -
1300  (s->channel[c].max_scale_factor - *sf++) *
1301  s->channel[c].scale_factor_step;
1302  const float quant = pow(10.0, exp / 20.0);
1303  int start = s->cur_sfb_offsets[b];
1304  s->fdsp->vector_fmul_scalar(s->tmp + start,
1305  s->channel[c].coeffs + start,
1306  quant, end - start);
1307  }
1308 
1309  /** apply imdct (imdct_half == DCTIV with reverse) */
1310  mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp);
1311  }
1312  }
1313 
1314  /** window and overlapp-add */
1315  wmapro_window(s);
1316 
1317  /** handled one subframe */
1318  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1319  int c = s->channel_indexes_for_cur_subframe[i];
1320  if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
1321  av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n");
1322  return AVERROR_INVALIDDATA;
1323  }
1324  ++s->channel[c].cur_subframe;
1325  }
1326 
1327  return 0;
1328 }
1329 
1330 /**
1331  *@brief Decode one WMA frame.
1332  *@param s codec context
1333  *@return 0 if the trailer bit indicates that this is the last frame,
1334  * 1 if there are additional frames
1335  */
1336 static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
1337 {
1338  AVCodecContext *avctx = s->avctx;
1339  GetBitContext* gb = &s->gb;
1340  int more_frames = 0;
1341  int len = 0;
1342  int i, ret;
1343 
1344  /** get frame length */
1345  if (s->len_prefix)
1346  len = get_bits(gb, s->log2_frame_size);
1347 
1348  av_dlog(s->avctx, "decoding frame with length %x\n", len);
1349 
1350  /** decode tile information */
1351  if (decode_tilehdr(s)) {
1352  s->packet_loss = 1;
1353  return 0;
1354  }
1355 
1356  /** read postproc transform */
1357  if (s->avctx->channels > 1 && get_bits1(gb)) {
1358  if (get_bits1(gb)) {
1359  for (i = 0; i < avctx->channels * avctx->channels; i++)
1360  skip_bits(gb, 4);
1361  }
1362  }
1363 
1364  /** read drc info */
1365  if (s->dynamic_range_compression) {
1366  s->drc_gain = get_bits(gb, 8);
1367  av_dlog(s->avctx, "drc_gain %i\n", s->drc_gain);
1368  }
1369 
1370  /** no idea what these are for, might be the number of samples
1371  that need to be skipped at the beginning or end of a stream */
1372  if (get_bits1(gb)) {
1373  int av_unused skip;
1374 
1375  /** usually true for the first frame */
1376  if (get_bits1(gb)) {
1377  skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1378  av_dlog(s->avctx, "start skip: %i\n", skip);
1379  }
1380 
1381  /** sometimes true for the last frame */
1382  if (get_bits1(gb)) {
1383  skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1384  av_dlog(s->avctx, "end skip: %i\n", skip);
1385  }
1386 
1387  }
1388 
1389  av_dlog(s->avctx, "BITSTREAM: frame header length was %i\n",
1390  get_bits_count(gb) - s->frame_offset);
1391 
1392  /** reset subframe states */
1393  s->parsed_all_subframes = 0;
1394  for (i = 0; i < avctx->channels; i++) {
1395  s->channel[i].decoded_samples = 0;
1396  s->channel[i].cur_subframe = 0;
1397  s->channel[i].reuse_sf = 0;
1398  }
1399 
1400  /** decode all subframes */
1401  while (!s->parsed_all_subframes) {
1402  if (decode_subframe(s) < 0) {
1403  s->packet_loss = 1;
1404  return 0;
1405  }
1406  }
1407 
1408  /* get output buffer */
1409  frame->nb_samples = s->samples_per_frame;
1410  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1411  s->packet_loss = 1;
1412  return 0;
1413  }
1414 
1415  /** copy samples to the output buffer */
1416  for (i = 0; i < avctx->channels; i++)
1417  memcpy(frame->extended_data[i], s->channel[i].out,
1418  s->samples_per_frame * sizeof(*s->channel[i].out));
1419 
1420  for (i = 0; i < avctx->channels; i++) {
1421  /** reuse second half of the IMDCT output for the next frame */
1422  memcpy(&s->channel[i].out[0],
1423  &s->channel[i].out[s->samples_per_frame],
1424  s->samples_per_frame * sizeof(*s->channel[i].out) >> 1);
1425  }
1426 
1427  if (s->skip_frame) {
1428  s->skip_frame = 0;
1429  *got_frame_ptr = 0;
1430  av_frame_unref(frame);
1431  } else {
1432  *got_frame_ptr = 1;
1433  }
1434 
1435  if (s->len_prefix) {
1436  if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
1437  /** FIXME: not sure if this is always an error */
1439  "frame[%"PRIu32"] would have to skip %i bits\n",
1440  s->frame_num,
1441  len - (get_bits_count(gb) - s->frame_offset) - 1);
1442  s->packet_loss = 1;
1443  return 0;
1444  }
1445 
1446  /** skip the rest of the frame data */
1447  skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
1448  } else {
1449  while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
1450  }
1451  }
1452 
1453  /** decode trailer bit */
1454  more_frames = get_bits1(gb);
1455 
1456  ++s->frame_num;
1457  return more_frames;
1458 }
1459 
1460 /**
1461  *@brief Calculate remaining input buffer length.
1462  *@param s codec context
1463  *@param gb bitstream reader context
1464  *@return remaining size in bits
1465  */
1467 {
1468  return s->buf_bit_size - get_bits_count(gb);
1469 }
1470 
1471 /**
1472  *@brief Fill the bit reservoir with a (partial) frame.
1473  *@param s codec context
1474  *@param gb bitstream reader context
1475  *@param len length of the partial frame
1476  *@param append decides whether to reset the buffer or not
1477  */
1479  int append)
1480 {
1481  int buflen;
1482 
1483  /** when the frame data does not need to be concatenated, the input buffer
1484  is reset and additional bits from the previous frame are copied
1485  and skipped later so that a fast byte copy is possible */
1486 
1487  if (!append) {
1488  s->frame_offset = get_bits_count(gb) & 7;
1489  s->num_saved_bits = s->frame_offset;
1491  }
1492 
1493  buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
1494 
1495  if (len <= 0 || buflen > MAX_FRAMESIZE) {
1496  avpriv_request_sample(s->avctx, "Too small input buffer");
1497  s->packet_loss = 1;
1498  return;
1499  }
1500 
1501  av_assert0(len <= put_bits_left(&s->pb));
1502 
1503  s->num_saved_bits += len;
1504  if (!append) {
1505  avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
1506  s->num_saved_bits);
1507  } else {
1508  int align = 8 - (get_bits_count(gb) & 7);
1509  align = FFMIN(align, len);
1510  put_bits(&s->pb, align, get_bits(gb, align));
1511  len -= align;
1512  avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len);
1513  }
1514  skip_bits_long(gb, len);
1515 
1516  {
1517  PutBitContext tmp = s->pb;
1518  flush_put_bits(&tmp);
1519  }
1520 
1522  skip_bits(&s->gb, s->frame_offset);
1523 }
1524 
1525 /**
1526  *@brief Decode a single WMA packet.
1527  *@param avctx codec context
1528  *@param data the output buffer
1529  *@param avpkt input packet
1530  *@return number of bytes that were read from the input buffer
1531  */
1532 static int decode_packet(AVCodecContext *avctx, void *data,
1533  int *got_frame_ptr, AVPacket* avpkt)
1534 {
1535  WMAProDecodeCtx *s = avctx->priv_data;
1536  GetBitContext* gb = &s->pgb;
1537  const uint8_t* buf = avpkt->data;
1538  int buf_size = avpkt->size;
1539  int num_bits_prev_frame;
1540  int packet_sequence_number;
1541 
1542  *got_frame_ptr = 0;
1543 
1544  if (s->packet_done || s->packet_loss) {
1545  s->packet_done = 0;
1546 
1547  /** sanity check for the buffer length */
1548  if (buf_size < avctx->block_align) {
1549  av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
1550  buf_size, avctx->block_align);
1551  return AVERROR_INVALIDDATA;
1552  }
1553 
1554  s->next_packet_start = buf_size - avctx->block_align;
1555  buf_size = avctx->block_align;
1556  s->buf_bit_size = buf_size << 3;
1557 
1558  /** parse packet header */
1559  init_get_bits(gb, buf, s->buf_bit_size);
1560  packet_sequence_number = get_bits(gb, 4);
1561  skip_bits(gb, 2);
1562 
1563  /** get number of bits that need to be added to the previous frame */
1564  num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
1565  av_dlog(avctx, "packet[%d]: nbpf %x\n", avctx->frame_number,
1566  num_bits_prev_frame);
1567 
1568  /** check for packet loss */
1569  if (!s->packet_loss &&
1570  ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1571  s->packet_loss = 1;
1572  av_log(avctx, AV_LOG_ERROR,
1573  "Packet loss detected! seq %"PRIx8" vs %x\n",
1574  s->packet_sequence_number, packet_sequence_number);
1575  }
1576  s->packet_sequence_number = packet_sequence_number;
1577 
1578  if (num_bits_prev_frame > 0) {
1579  int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb);
1580  if (num_bits_prev_frame >= remaining_packet_bits) {
1581  num_bits_prev_frame = remaining_packet_bits;
1582  s->packet_done = 1;
1583  }
1584 
1585  /** append the previous frame data to the remaining data from the
1586  previous packet to create a full frame */
1587  save_bits(s, gb, num_bits_prev_frame, 1);
1588  av_dlog(avctx, "accumulated %x bits of frame data\n",
1589  s->num_saved_bits - s->frame_offset);
1590 
1591  /** decode the cross packet frame if it is valid */
1592  if (!s->packet_loss)
1593  decode_frame(s, data, got_frame_ptr);
1594  } else if (s->num_saved_bits - s->frame_offset) {
1595  av_dlog(avctx, "ignoring %x previously saved bits\n",
1596  s->num_saved_bits - s->frame_offset);
1597  }
1598 
1599  if (s->packet_loss) {
1600  /** reset number of saved bits so that the decoder
1601  does not start to decode incomplete frames in the
1602  s->len_prefix == 0 case */
1603  s->num_saved_bits = 0;
1604  s->packet_loss = 0;
1605  }
1606 
1607  } else {
1608  int frame_size;
1609  s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3;
1610  init_get_bits(gb, avpkt->data, s->buf_bit_size);
1611  skip_bits(gb, s->packet_offset);
1612  if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size &&
1613  (frame_size = show_bits(gb, s->log2_frame_size)) &&
1614  frame_size <= remaining_bits(s, gb)) {
1615  save_bits(s, gb, frame_size, 0);
1616  if (!s->packet_loss)
1617  s->packet_done = !decode_frame(s, data, got_frame_ptr);
1618  } else if (!s->len_prefix
1619  && s->num_saved_bits > get_bits_count(&s->gb)) {
1620  /** when the frames do not have a length prefix, we don't know
1621  the compressed length of the individual frames
1622  however, we know what part of a new packet belongs to the
1623  previous frame
1624  therefore we save the incoming packet first, then we append
1625  the "previous frame" data from the next packet so that
1626  we get a buffer that only contains full frames */
1627  s->packet_done = !decode_frame(s, data, got_frame_ptr);
1628  } else
1629  s->packet_done = 1;
1630  }
1631 
1632  if (remaining_bits(s, gb) < 0) {
1633  av_log(avctx, AV_LOG_ERROR, "Overread %d\n", -remaining_bits(s, gb));
1634  s->packet_loss = 1;
1635  }
1636 
1637  if (s->packet_done && !s->packet_loss &&
1638  remaining_bits(s, gb) > 0) {
1639  /** save the rest of the data so that it can be decoded
1640  with the next packet */
1641  save_bits(s, gb, remaining_bits(s, gb), 0);
1642  }
1643 
1644  s->packet_offset = get_bits_count(gb) & 7;
1645  if (s->packet_loss)
1646  return AVERROR_INVALIDDATA;
1647 
1648  return get_bits_count(gb) >> 3;
1649 }
1650 
1651 /**
1652  *@brief Clear decoder buffers (for seeking).
1653  *@param avctx codec context
1654  */
1655 static void flush(AVCodecContext *avctx)
1656 {
1657  WMAProDecodeCtx *s = avctx->priv_data;
1658  int i;
1659  /** reset output buffer as a part of it is used during the windowing of a
1660  new frame */
1661  for (i = 0; i < avctx->channels; i++)
1662  memset(s->channel[i].out, 0, s->samples_per_frame *
1663  sizeof(*s->channel[i].out));
1664  s->packet_loss = 1;
1665 }
1666 
1667 
1668 /**
1669  *@brief wmapro decoder
1670  */
1672  .name = "wmapro",
1673  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
1674  .type = AVMEDIA_TYPE_AUDIO,
1675  .id = AV_CODEC_ID_WMAPRO,
1676  .priv_data_size = sizeof(WMAProDecodeCtx),
1677  .init = decode_init,
1678  .close = decode_end,
1679  .decode = decode_packet,
1680  .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
1681  .flush = flush,
1682  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1684 };