[Scummvm-cvs-logs] scummvm master -> 547fd1bdcabcba0e741eb31100ba99ff73399d24
clone2727
clone2727 at gmail.com
Fri Jun 3 07:04:36 CEST 2011
This automated email contains information about 2 new commits which have been
pushed to the 'scummvm' repo located at https://github.com/scummvm/scummvm .
Summary:
2e06681698 COMMON: Begin objectifying QuickTimeParser::SampleDesc further
547fd1bdca COMMON: Cleanup QuickTime variable and struct naming
Commit: 2e066816983935b8e365fc555f953bdce9f64e46
https://github.com/scummvm/scummvm/commit/2e066816983935b8e365fc555f953bdce9f64e46
Author: Matthew Hoops (clone2727 at gmail.com)
Date: 2011-06-02T20:44:40-07:00
Commit Message:
COMMON: Begin objectifying QuickTimeParser::SampleDesc further
This is preparation for multiple video and audio tracks
Changed paths:
audio/decoders/quicktime.cpp
audio/decoders/quicktime_intern.h
common/quicktime.cpp
common/quicktime.h
video/qt_decoder.cpp
video/qt_decoder.h
diff --git a/audio/decoders/quicktime.cpp b/audio/decoders/quicktime.cpp
index bdde9db..a22f039 100644
--- a/audio/decoders/quicktime.cpp
+++ b/audio/decoders/quicktime.cpp
@@ -79,13 +79,13 @@ void QuickTimeAudioDecoder::init() {
if (_audioStreamIndex >= 0) {
AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
- if (checkAudioCodecSupport(entry->codecTag, _streams[_audioStreamIndex]->objectTypeMP4)) {
- _audStream = makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
+ if (entry->isAudioCodecSupported()) {
+ _audStream = makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
_curAudioChunk = 0;
// Make sure the bits per sample transfers to the sample size
- if (entry->codecTag == MKTAG('r', 'a', 'w', ' ') || entry->codecTag == MKTAG('t', 'w', 'o', 's'))
- _streams[_audioStreamIndex]->sample_size = (entry->bitsPerSample / 8) * entry->channels;
+ if (entry->getCodecTag() == MKTAG('r', 'a', 'w', ' ') || entry->getCodecTag() == MKTAG('t', 'w', 'o', 's'))
+ _streams[_audioStreamIndex]->sample_size = (entry->_bitsPerSample / 8) * entry->_channels;
}
}
}
@@ -94,32 +94,31 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
if (st->codec_type == CODEC_TYPE_AUDIO) {
debug(0, "Audio Codec FourCC: \'%s\'", tag2str(format));
- AudioSampleDesc *entry = new AudioSampleDesc();
- entry->codecTag = format;
+ AudioSampleDesc *entry = new AudioSampleDesc(st, format);
uint16 stsdVersion = _fd->readUint16BE();
_fd->readUint16BE(); // revision level
_fd->readUint32BE(); // vendor
- entry->channels = _fd->readUint16BE(); // channel count
- entry->bitsPerSample = _fd->readUint16BE(); // sample size
+ entry->_channels = _fd->readUint16BE(); // channel count
+ entry->_bitsPerSample = _fd->readUint16BE(); // sample size
_fd->readUint16BE(); // compression id = 0
_fd->readUint16BE(); // packet size = 0
- entry->sampleRate = (_fd->readUint32BE() >> 16);
+ entry->_sampleRate = (_fd->readUint32BE() >> 16);
debug(0, "stsd version =%d", stsdVersion);
if (stsdVersion == 0) {
// Not used, except in special cases. See below.
- entry->samplesPerFrame = entry->bytesPerFrame = 0;
+ entry->_samplesPerFrame = entry->_bytesPerFrame = 0;
} else if (stsdVersion == 1) {
// Read QT version 1 fields. In version 0 these dont exist.
- entry->samplesPerFrame = _fd->readUint32BE();
- debug(0, "stsd samples_per_frame =%d",entry->samplesPerFrame);
+ entry->_samplesPerFrame = _fd->readUint32BE();
+ debug(0, "stsd samples_per_frame =%d",entry->_samplesPerFrame);
_fd->readUint32BE(); // bytes per packet
- entry->bytesPerFrame = _fd->readUint32BE();
- debug(0, "stsd bytes_per_frame =%d", entry->bytesPerFrame);
+ entry->_bytesPerFrame = _fd->readUint32BE();
+ debug(0, "stsd bytes_per_frame =%d", entry->_bytesPerFrame);
_fd->readUint32BE(); // bytes per sample
} else {
warning("Unsupported QuickTime STSD audio version %d", stsdVersion);
@@ -130,12 +129,12 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
// Version 0 videos (such as the Riven ones) don't have this set,
// but we need it later on. Add it in here.
if (format == MKTAG('i', 'm', 'a', '4')) {
- entry->samplesPerFrame = 64;
- entry->bytesPerFrame = 34 * entry->channels;
+ entry->_samplesPerFrame = 64;
+ entry->_bytesPerFrame = 34 * entry->_channels;
}
- if (entry->sampleRate == 0 && st->time_scale > 1)
- entry->sampleRate = st->time_scale;
+ if (entry->_sampleRate == 0 && st->time_scale > 1)
+ entry->_sampleRate = st->time_scale;
return entry;
}
@@ -143,91 +142,6 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
return 0;
}
-bool QuickTimeAudioDecoder::checkAudioCodecSupport(uint32 tag, byte objectTypeMP4) {
- // Check if the codec is a supported codec
- if (tag == MKTAG('t', 'w', 'o', 's') || tag == MKTAG('r', 'a', 'w', ' ') || tag == MKTAG('i', 'm', 'a', '4'))
- return true;
-
-#ifdef AUDIO_QDM2_H
- if (tag == MKTAG('Q', 'D', 'M', '2'))
- return true;
-#endif
-
- if (tag == MKTAG('m', 'p', '4', 'a')) {
- Common::String audioType;
- switch (objectTypeMP4) {
- case 0x40: // AAC
-#ifdef USE_FAAD
- return true;
-#else
- audioType = "AAC";
- break;
-#endif
- default:
- audioType = "Unknown";
- break;
- }
- warning("No MPEG-4 audio (%s) support", audioType.c_str());
- } else
- warning("Audio Codec Not Supported: \'%s\'", tag2str(tag));
-
- return false;
-}
-
-AudioStream *QuickTimeAudioDecoder::createAudioStream(Common::SeekableReadStream *stream) {
- if (!stream || _audioStreamIndex < 0)
- return NULL;
-
- AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
-
- if (entry->codecTag == MKTAG('t', 'w', 'o', 's') || entry->codecTag == MKTAG('r', 'a', 'w', ' ')) {
- // Fortunately, most of the audio used in Myst videos is raw...
- uint16 flags = 0;
- if (entry->codecTag == MKTAG('r', 'a', 'w', ' '))
- flags |= FLAG_UNSIGNED;
- if (entry->channels == 2)
- flags |= FLAG_STEREO;
- if (entry->bitsPerSample == 16)
- flags |= FLAG_16BITS;
- uint32 dataSize = stream->size();
- byte *data = (byte *)malloc(dataSize);
- stream->read(data, dataSize);
- delete stream;
- return makeRawStream(data, dataSize, entry->sampleRate, flags);
- } else if (entry->codecTag == MKTAG('i', 'm', 'a', '4')) {
- // Riven uses this codec (as do some Myst ME videos)
- return makeADPCMStream(stream, DisposeAfterUse::YES, stream->size(), kADPCMApple, entry->sampleRate, entry->channels, 34);
- } else if (entry->codecTag == MKTAG('m', 'p', '4', 'a')) {
- // The 7th Guest iOS uses an MPEG-4 codec
-#ifdef USE_FAAD
- if (_streams[_audioStreamIndex]->objectTypeMP4 == 0x40)
- return makeAACStream(stream, DisposeAfterUse::YES, _streams[_audioStreamIndex]->extradata);
-#endif
-#ifdef AUDIO_QDM2_H
- } else if (entry->codecTag == MKTAG('Q', 'D', 'M', '2')) {
- // Myst ME uses this codec for many videos
- return makeQDM2Stream(stream, _streams[_audioStreamIndex]->extradata);
-#endif
- }
-
- error("Unsupported audio codec");
-
- return NULL;
-}
-
-uint32 QuickTimeAudioDecoder::getAudioChunkSampleCount(uint chunk) {
- if (_audioStreamIndex < 0)
- return 0;
-
- uint32 sampleCount = 0;
-
- for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++)
- if (chunk >= _streams[_audioStreamIndex]->sample_to_chunk[j].first)
- sampleCount = _streams[_audioStreamIndex]->sample_to_chunk[j].count;
-
- return sampleCount;
-}
-
bool QuickTimeAudioDecoder::isOldDemuxing() const {
assert(_audioStreamIndex >= 0);
return _streams[_audioStreamIndex]->stts_count == 1 && _streams[_audioStreamIndex]->stts_data[0].duration == 1;
@@ -240,7 +154,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
_fd->seek(_streams[_audioStreamIndex]->chunk_offsets[_curAudioChunk]);
// First, we have to get the sample count
- uint32 sampleCount = getAudioChunkSampleCount(_curAudioChunk);
+ uint32 sampleCount = entry->getAudioChunkSampleCount(_curAudioChunk);
assert(sampleCount);
if (isOldDemuxing()) {
@@ -250,12 +164,12 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
while (sampleCount > 0) {
uint32 samples = 0, size = 0;
- if (entry->samplesPerFrame >= 160) {
- samples = entry->samplesPerFrame;
- size = entry->bytesPerFrame;
- } else if (entry->samplesPerFrame > 1) {
- samples = MIN<uint32>((1024 / entry->samplesPerFrame) * entry->samplesPerFrame, sampleCount);
- size = (samples / entry->samplesPerFrame) * entry->bytesPerFrame;
+ if (entry->_samplesPerFrame >= 160) {
+ samples = entry->_samplesPerFrame;
+ size = entry->_bytesPerFrame;
+ } else if (entry->_samplesPerFrame > 1) {
+ samples = MIN<uint32>((1024 / entry->_samplesPerFrame) * entry->_samplesPerFrame, sampleCount);
+ size = (samples / entry->_samplesPerFrame) * entry->_bytesPerFrame;
} else {
samples = MIN<uint32>(1024, sampleCount);
size = samples * _streams[_audioStreamIndex]->sample_size;
@@ -274,7 +188,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
// Find our starting sample
uint32 startSample = 0;
for (uint32 i = 0; i < _curAudioChunk; i++)
- startSample += getAudioChunkSampleCount(i);
+ startSample += entry->getAudioChunkSampleCount(i);
for (uint32 i = 0; i < sampleCount; i++) {
uint32 size = (_streams[_audioStreamIndex]->sample_size != 0) ? _streams[_audioStreamIndex]->sample_size : _streams[_audioStreamIndex]->sample_sizes[i + startSample];
@@ -288,7 +202,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
}
// Now queue the buffer
- _audStream->queueAudioStream(createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES)));
+ _audStream->queueAudioStream(entry->createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES)));
delete wStream;
_curAudioChunk++;
@@ -301,7 +215,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
// Re-create the audio stream
delete _audStream;
Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
- _audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
+ _audStream = Audio::makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
// First, we need to track down what audio sample we need
Audio::Timestamp curAudioTime = where.convertToFramerate(_streams[_audioStreamIndex]->time_scale);
@@ -325,7 +239,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
uint32 totalSamples = 0;
_curAudioChunk = 0;
for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) {
- uint32 chunkSampleCount = getAudioChunkSampleCount(i);
+ uint32 chunkSampleCount = entry->getAudioChunkSampleCount(i);
if (seekSample < totalSamples + chunkSampleCount)
break;
@@ -338,7 +252,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
if (sample != totalSamples) {
// HACK: Skip a certain amount of samples from the stream
// (There's got to be a better way to do this!)
- int skipSamples = (sample - totalSamples) * entry->channels;
+ int skipSamples = (sample - totalSamples) * entry->_channels;
int16 *tempBuffer = new int16[skipSamples];
_audStream->readBuffer(tempBuffer, skipSamples);
@@ -346,11 +260,92 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
}
}
-QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc() : Common::QuickTimeParser::SampleDesc() {
- channels = 0;
- sampleRate = 0;
- samplesPerFrame = 0;
- bytesPerFrame = 0;
+QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
+ _channels = 0;
+ _sampleRate = 0;
+ _samplesPerFrame = 0;
+ _bytesPerFrame = 0;
+ _bitsPerSample = 0;
+}
+
+bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
+ // Check if the codec is a supported codec
+ if (_codecTag == MKTAG('t', 'w', 'o', 's') || _codecTag == MKTAG('r', 'a', 'w', ' ') || _codecTag == MKTAG('i', 'm', 'a', '4'))
+ return true;
+
+#ifdef AUDIO_QDM2_H
+ if (_codecTag == MKTAG('Q', 'D', 'M', '2'))
+ return true;
+#endif
+
+ if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
+ Common::String audioType;
+ switch (_parentStream->objectTypeMP4) {
+ case 0x40: // AAC
+#ifdef USE_FAAD
+ return true;
+#else
+ audioType = "AAC";
+ break;
+#endif
+ default:
+ audioType = "Unknown";
+ break;
+ }
+ warning("No MPEG-4 audio (%s) support", audioType.c_str());
+ } else
+ warning("Audio Codec Not Supported: \'%s\'", tag2str(_codecTag));
+
+ return false;
+}
+
+uint32 QuickTimeAudioDecoder::AudioSampleDesc::getAudioChunkSampleCount(uint chunk) const {
+ uint32 sampleCount = 0;
+
+ for (uint32 j = 0; j < _parentStream->sample_to_chunk_sz; j++)
+ if (chunk >= _parentStream->sample_to_chunk[j].first)
+ sampleCount = _parentStream->sample_to_chunk[j].count;
+
+ return sampleCount;
+}
+
+AudioStream *QuickTimeAudioDecoder::AudioSampleDesc::createAudioStream(Common::SeekableReadStream *stream) const {
+ if (!stream)
+ return 0;
+
+ if (_codecTag == MKTAG('t', 'w', 'o', 's') || _codecTag == MKTAG('r', 'a', 'w', ' ')) {
+ // Fortunately, most of the audio used in Myst videos is raw...
+ uint16 flags = 0;
+ if (_codecTag == MKTAG('r', 'a', 'w', ' '))
+ flags |= FLAG_UNSIGNED;
+ if (_channels == 2)
+ flags |= FLAG_STEREO;
+ if (_bitsPerSample == 16)
+ flags |= FLAG_16BITS;
+ uint32 dataSize = stream->size();
+ byte *data = (byte *)malloc(dataSize);
+ stream->read(data, dataSize);
+ delete stream;
+ return makeRawStream(data, dataSize, _sampleRate, flags);
+ } else if (_codecTag == MKTAG('i', 'm', 'a', '4')) {
+ // Riven uses this codec (as do some Myst ME videos)
+ return makeADPCMStream(stream, DisposeAfterUse::YES, stream->size(), kADPCMApple, _sampleRate, _channels, 34);
+ } else if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
+ // The 7th Guest iOS uses an MPEG-4 codec
+#ifdef USE_FAAD
+ if (_parentStream->objectTypeMP4 == 0x40)
+ return makeAACStream(stream, DisposeAfterUse::YES, _parentStream->extradata);
+#endif
+#ifdef AUDIO_QDM2_H
+ } else if (_codecTag == MKTAG('Q', 'D', 'M', '2')) {
+ // Myst ME uses this codec for many videos
+ return makeQDM2Stream(stream, _parentStream->extradata);
+#endif
+ }
+
+ error("Unsupported audio codec");
+
+ return NULL;
}
/**
diff --git a/audio/decoders/quicktime_intern.h b/audio/decoders/quicktime_intern.h
index 691ef7b..3279ad8 100644
--- a/audio/decoders/quicktime_intern.h
+++ b/audio/decoders/quicktime_intern.h
@@ -65,30 +65,33 @@ public:
bool loadAudioStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle);
protected:
- struct AudioSampleDesc : public Common::QuickTimeParser::SampleDesc {
- AudioSampleDesc();
-
- uint16 channels;
- uint32 sampleRate;
- uint32 samplesPerFrame;
- uint32 bytesPerFrame;
+ class AudioSampleDesc : public Common::QuickTimeParser::SampleDesc {
+ public:
+ AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
+
+ bool isAudioCodecSupported() const;
+ uint32 getAudioChunkSampleCount(uint chunk) const;
+ AudioStream *createAudioStream(Common::SeekableReadStream *stream) const;
+
+ // TODO: Make private in the long run
+ uint16 _bitsPerSample;
+ uint16 _channels;
+ uint32 _sampleRate;
+ uint32 _samplesPerFrame;
+ uint32 _bytesPerFrame;
};
// Common::QuickTimeParser API
virtual Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
- AudioStream *createAudioStream(Common::SeekableReadStream *stream);
- bool checkAudioCodecSupport(uint32 tag, byte objectTypeMP4);
void init();
-
+ void setAudioStreamPos(const Timestamp &where);
+ bool isOldDemuxing() const;
void queueNextAudioChunk();
- uint32 getAudioChunkSampleCount(uint chunk);
- int8 _audioStreamIndex;
+
+ int _audioStreamIndex;
uint _curAudioChunk;
QueuingAudioStream *_audStream;
-
- void setAudioStreamPos(const Timestamp &where);
- bool isOldDemuxing() const;
};
} // End of namespace Audio
diff --git a/common/quicktime.cpp b/common/quicktime.cpp
index cf50584..606e1bb 100644
--- a/common/quicktime.cpp
+++ b/common/quicktime.cpp
@@ -686,7 +686,7 @@ int QuickTimeParser::readWAVE(MOVatom atom) {
if (atom.size > (1 << 30))
return -1;
- if (st->sampleDescs[0]->codecTag == MKTAG('Q', 'D', 'M', '2')) // Read extradata for QDM2
+ if (st->sampleDescs[0]->getCodecTag() == MKTAG('Q', 'D', 'M', '2')) // Read extradata for QDM2
st->extradata = _fd->readStream(atom.size - 8);
else if (atom.size > 8)
return readDefault(atom);
@@ -773,9 +773,9 @@ void QuickTimeParser::close() {
_fd = 0;
}
-QuickTimeParser::SampleDesc::SampleDesc() {
- codecTag = 0;
- bitsPerSample = 0;
+QuickTimeParser::SampleDesc::SampleDesc(MOVStreamContext *parentStream, uint32 codecTag) {
+ _parentStream = parentStream;
+ _codecTag = codecTag;
}
QuickTimeParser::MOVStreamContext::MOVStreamContext() {
diff --git a/common/quicktime.h b/common/quicktime.h
index a5903bc..2bd461e 100644
--- a/common/quicktime.h
+++ b/common/quicktime.h
@@ -116,12 +116,18 @@ protected:
Common::Rational mediaRate;
};
- struct SampleDesc {
- SampleDesc();
+ struct MOVStreamContext;
+
+ class SampleDesc {
+ public:
+ SampleDesc(MOVStreamContext *parentStream, uint32 codecTag);
virtual ~SampleDesc() {}
- uint32 codecTag;
- uint16 bitsPerSample;
+ uint32 getCodecTag() const { return _codecTag; }
+
+ protected:
+ MOVStreamContext *_parentStream;
+ uint32 _codecTag;
};
enum CodecType {
diff --git a/video/qt_decoder.cpp b/video/qt_decoder.cpp
index 328c3fb..dbf8603 100644
--- a/video/qt_decoder.cpp
+++ b/video/qt_decoder.cpp
@@ -203,38 +203,6 @@ void QuickTimeDecoder::seekToTime(Audio::Timestamp time) {
seekToFrame(frame);
}
-Codec *QuickTimeDecoder::createCodec(uint32 codecTag, byte bitsPerPixel) {
- if (codecTag == MKTAG('c','v','i','d')) {
- // Cinepak: As used by most Myst and all Riven videos as well as some Myst ME videos. "The Chief" videos also use this.
- return new CinepakDecoder(bitsPerPixel);
- } else if (codecTag == MKTAG('r','p','z','a')) {
- // Apple Video ("Road Pizza"): Used by some Myst videos.
- return new RPZADecoder(getWidth(), getHeight());
- } else if (codecTag == MKTAG('r','l','e',' ')) {
- // QuickTime RLE: Used by some Myst ME videos.
- return new QTRLEDecoder(getWidth(), getHeight(), bitsPerPixel);
- } else if (codecTag == MKTAG('s','m','c',' ')) {
- // Apple SMC: Used by some Myst videos.
- return new SMCDecoder(getWidth(), getHeight());
- } else if (codecTag == MKTAG('S','V','Q','1')) {
- // Sorenson Video 1: Used by some Myst ME videos.
- warning("Sorenson Video 1 not yet supported");
- } else if (codecTag == MKTAG('S','V','Q','3')) {
- // Sorenson Video 3: Used by some Myst ME videos.
- warning("Sorenson Video 3 not yet supported");
- } else if (codecTag == MKTAG('j','p','e','g')) {
- // Motion JPEG: Used by some Myst ME 10th Anniversary videos.
- return new JPEGDecoder();
- } else if (codecTag == MKTAG('Q','k','B','k')) {
- // CDToons: Used by most of the Broderbund games.
- return new CDToonsDecoder(getWidth(), getHeight());
- } else {
- warning("Unsupported codec \'%s\'", tag2str(codecTag));
- }
-
- return NULL;
-}
-
void QuickTimeDecoder::startAudio() {
if (_audStream) {
updateAudioBuffer();
@@ -256,7 +224,7 @@ Codec *QuickTimeDecoder::findDefaultVideoCodec() const {
if (_videoStreamIndex < 0 || _streams[_videoStreamIndex]->sampleDescs.empty())
return 0;
- return ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[0])->videoCodec;
+ return ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[0])->_videoCodec;
}
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
@@ -282,22 +250,22 @@ const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
// Find which video description entry we want
VideoSampleDesc *entry = (VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[descId - 1];
- if (!entry->videoCodec)
+ if (!entry->_videoCodec)
return 0;
- const Graphics::Surface *frame = entry->videoCodec->decodeImage(frameData);
+ const Graphics::Surface *frame = entry->_videoCodec->decodeImage(frameData);
delete frameData;
// Update the palette
- if (entry->videoCodec->containsPalette()) {
+ if (entry->_videoCodec->containsPalette()) {
// The codec itself contains a palette
- if (entry->videoCodec->hasDirtyPalette()) {
- _palette = entry->videoCodec->getPalette();
+ if (entry->_videoCodec->hasDirtyPalette()) {
+ _palette = entry->_videoCodec->getPalette();
_dirtyPalette = true;
}
} else {
// Check if the video description has been updated
- byte *palette = entry->palette;
+ byte *palette = entry->_palette;
if (palette != _palette) {
_palette = palette;
@@ -381,10 +349,8 @@ void QuickTimeDecoder::init() {
// Initialize video, if present
if (_videoStreamIndex >= 0) {
- for (uint32 i = 0; i < _streams[_videoStreamIndex]->sampleDescs.size(); i++) {
- VideoSampleDesc *entry = (VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[i];
- entry->videoCodec = createCodec(entry->codecTag, entry->bitsPerSample & 0x1F);
- }
+ for (uint32 i = 0; i < _streams[_videoStreamIndex]->sampleDescs.size(); i++)
+ ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[i])->initCodec();
if (getScaleFactorX() != 1 || getScaleFactorY() != 1) {
// We have to initialize the scaled surface
@@ -398,8 +364,7 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
if (st->codec_type == CODEC_TYPE_VIDEO) {
debug(0, "Video Codec FourCC: \'%s\'", tag2str(format));
- VideoSampleDesc *entry = new VideoSampleDesc();
- entry->codecTag = format;
+ VideoSampleDesc *entry = new VideoSampleDesc(st, format);
_fd->readUint16BE(); // version
_fd->readUint16BE(); // revision level
@@ -426,24 +391,24 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
byte codec_name[32];
_fd->read(codec_name, 32); // codec name, pascal string (FIXME: true for mp4?)
if (codec_name[0] <= 31) {
- memcpy(entry->codecName, &codec_name[1], codec_name[0]);
- entry->codecName[codec_name[0]] = 0;
+ memcpy(entry->_codecName, &codec_name[1], codec_name[0]);
+ entry->_codecName[codec_name[0]] = 0;
}
- entry->bitsPerSample = _fd->readUint16BE(); // depth
- entry->colorTableId = _fd->readUint16BE(); // colortable id
+ entry->_bitsPerSample = _fd->readUint16BE(); // depth
+ entry->_colorTableId = _fd->readUint16BE(); // colortable id
// figure out the palette situation
- byte colorDepth = entry->bitsPerSample & 0x1F;
- bool colorGreyscale = (entry->bitsPerSample & 0x20) != 0;
+ byte colorDepth = entry->_bitsPerSample & 0x1F;
+ bool colorGreyscale = (entry->_bitsPerSample & 0x20) != 0;
debug(0, "color depth: %d", colorDepth);
// if the depth is 2, 4, or 8 bpp, file is palettized
if (colorDepth == 2 || colorDepth == 4 || colorDepth == 8) {
// Initialize the palette
- entry->palette = new byte[256 * 3];
- memset(entry->palette, 0, 256 * 3);
+ entry->_palette = new byte[256 * 3];
+ memset(entry->_palette, 0, 256 * 3);
if (colorGreyscale) {
debug(0, "Greyscale palette");
@@ -453,12 +418,12 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
int16 colorIndex = 255;
byte colorDec = 256 / (colorCount - 1);
for (byte j = 0; j < colorCount; j++) {
- entry->palette[j * 3] = entry->palette[j * 3 + 1] = entry->palette[j * 3 + 2] = colorIndex;
+ entry->_palette[j * 3] = entry->_palette[j * 3 + 1] = entry->_palette[j * 3 + 2] = colorIndex;
colorIndex -= colorDec;
if (colorIndex < 0)
colorIndex = 0;
}
- } else if (entry->colorTableId & 0x08) {
+ } else if (entry->_colorTableId & 0x08) {
// if flag bit 3 is set, use the default palette
//uint16 colorCount = 1 << colorDepth;
@@ -476,11 +441,11 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
// up front
_fd->readByte();
_fd->readByte();
- entry->palette[j * 3] = _fd->readByte();
+ entry->_palette[j * 3] = _fd->readByte();
_fd->readByte();
- entry->palette[j * 3 + 1] = _fd->readByte();
+ entry->_palette[j * 3 + 1] = _fd->readByte();
_fd->readByte();
- entry->palette[j * 3 + 2] = _fd->readByte();
+ entry->_palette[j * 3 + 2] = _fd->readByte();
_fd->readByte();
}
}
@@ -581,10 +546,10 @@ void QuickTimeDecoder::updateAudioBuffer() {
uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams();
for (; timeFilled < timeToNextFrame && curAudioChunk < _streams[_audioStreamIndex]->chunk_count; numberOfChunksNeeded++, curAudioChunk++) {
- uint32 sampleCount = getAudioChunkSampleCount(curAudioChunk);
+ uint32 sampleCount = entry->getAudioChunkSampleCount(curAudioChunk);
assert(sampleCount);
- timeFilled += sampleCount * 1000 / entry->sampleRate;
+ timeFilled += sampleCount * 1000 / entry->_sampleRate;
}
// Add a couple extra to ensure we don't underrun
@@ -596,16 +561,56 @@ void QuickTimeDecoder::updateAudioBuffer() {
queueNextAudioChunk();
}
-QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc() : Common::QuickTimeParser::SampleDesc() {
- memset(codecName, 0, 32);
- colorTableId = 0;
- palette = 0;
- videoCodec = 0;
+QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
+ memset(_codecName, 0, 32);
+ _colorTableId = 0;
+ _palette = 0;
+ _videoCodec = 0;
+ _bitsPerSample = 0;
}
QuickTimeDecoder::VideoSampleDesc::~VideoSampleDesc() {
- delete[] palette;
- delete videoCodec;
+ delete[] _palette;
+ delete _videoCodec;
+}
+
+void QuickTimeDecoder::VideoSampleDesc::initCodec() {
+ switch (_codecTag) {
+ case MKTAG('c','v','i','d'):
+ // Cinepak: As used by most Myst and all Riven videos as well as some Myst ME videos. "The Chief" videos also use this.
+ _videoCodec = new CinepakDecoder(_bitsPerSample & 0x1f);
+ break;
+ case MKTAG('r','p','z','a'):
+ // Apple Video ("Road Pizza"): Used by some Myst videos.
+ _videoCodec = new RPZADecoder(_parentStream->width, _parentStream->height);
+ break;
+ case MKTAG('r','l','e',' '):
+ // QuickTime RLE: Used by some Myst ME videos.
+ _videoCodec = new QTRLEDecoder(_parentStream->width, _parentStream->height, _bitsPerSample & 0x1f);
+ break;
+ case MKTAG('s','m','c',' '):
+ // Apple SMC: Used by some Myst videos.
+ _videoCodec = new SMCDecoder(_parentStream->width, _parentStream->height);
+ break;
+ case MKTAG('S','V','Q','1'):
+ // Sorenson Video 1: Used by some Myst ME videos.
+ warning("Sorenson Video 1 not yet supported");
+ break;
+ case MKTAG('S','V','Q','3'):
+ // Sorenson Video 3: Used by some Myst ME videos.
+ warning("Sorenson Video 3 not yet supported");
+ break;
+ case MKTAG('j','p','e','g'):
+ // Motion JPEG: Used by some Myst ME 10th Anniversary videos.
+ _videoCodec = new JPEGDecoder();
+ break;
+ case MKTAG('Q','k','B','k'):
+ // CDToons: Used by most of the Broderbund games.
+ _videoCodec = new CDToonsDecoder(_parentStream->width, _parentStream->height);
+ break;
+ default:
+ warning("Unsupported codec \'%s\'", tag2str(_codecTag));
+ }
}
} // End of namespace Video
diff --git a/video/qt_decoder.h b/video/qt_decoder.h
index d8beda0..e3955d7 100644
--- a/video/qt_decoder.h
+++ b/video/qt_decoder.h
@@ -114,14 +114,19 @@ public:
uint32 getDuration() const { return _duration * 1000 / _timeScale; }
protected:
- struct VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {
- VideoSampleDesc();
+ class VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {
+ public:
+ VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
~VideoSampleDesc();
- char codecName[32];
- uint16 colorTableId;
- byte *palette;
- Codec *videoCodec;
+ void initCodec();
+
+ // TODO: Make private in the long run
+ uint16 _bitsPerSample;
+ char _codecName[32];
+ uint16 _colorTableId;
+ byte *_palette;
+ Codec *_videoCodec;
};
Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
Commit: 547fd1bdcabcba0e741eb31100ba99ff73399d24
https://github.com/scummvm/scummvm/commit/547fd1bdcabcba0e741eb31100ba99ff73399d24
Author: Matthew Hoops (clone2727 at gmail.com)
Date: 2011-06-02T21:58:29-07:00
Commit Message:
COMMON: Cleanup QuickTime variable and struct naming
Changed paths:
audio/decoders/quicktime.cpp
audio/decoders/quicktime_intern.h
common/quicktime.cpp
common/quicktime.h
video/qt_decoder.cpp
video/qt_decoder.h
diff --git a/audio/decoders/quicktime.cpp b/audio/decoders/quicktime.cpp
index a22f039..0f2e766 100644
--- a/audio/decoders/quicktime.cpp
+++ b/audio/decoders/quicktime.cpp
@@ -68,16 +68,16 @@ bool QuickTimeAudioDecoder::loadAudioStream(Common::SeekableReadStream *stream,
void QuickTimeAudioDecoder::init() {
Common::QuickTimeParser::init();
- _audioStreamIndex = -1;
+ _audioTrackIndex = -1;
// Find an audio stream
- for (uint32 i = 0; i < _numStreams; i++)
- if (_streams[i]->codec_type == CODEC_TYPE_AUDIO && _audioStreamIndex < 0)
- _audioStreamIndex = i;
+ for (uint32 i = 0; i < _tracks.size(); i++)
+ if (_tracks[i]->codecType == CODEC_TYPE_AUDIO && _audioTrackIndex < 0)
+ _audioTrackIndex = i;
// Initialize audio, if present
- if (_audioStreamIndex >= 0) {
- AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
+ if (_audioTrackIndex >= 0) {
+ AudioSampleDesc *entry = (AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
if (entry->isAudioCodecSupported()) {
_audStream = makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
@@ -85,16 +85,16 @@ void QuickTimeAudioDecoder::init() {
// Make sure the bits per sample transfers to the sample size
if (entry->getCodecTag() == MKTAG('r', 'a', 'w', ' ') || entry->getCodecTag() == MKTAG('t', 'w', 'o', 's'))
- _streams[_audioStreamIndex]->sample_size = (entry->_bitsPerSample / 8) * entry->_channels;
+ _tracks[_audioTrackIndex]->sampleSize = (entry->_bitsPerSample / 8) * entry->_channels;
}
}
}
-Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVStreamContext *st, uint32 format) {
- if (st->codec_type == CODEC_TYPE_AUDIO) {
+Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(Track *track, uint32 format) {
+ if (track->codecType == CODEC_TYPE_AUDIO) {
debug(0, "Audio Codec FourCC: \'%s\'", tag2str(format));
- AudioSampleDesc *entry = new AudioSampleDesc(st, format);
+ AudioSampleDesc *entry = new AudioSampleDesc(track, format);
uint16 stsdVersion = _fd->readUint16BE();
_fd->readUint16BE(); // revision level
@@ -133,8 +133,8 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
entry->_bytesPerFrame = 34 * entry->_channels;
}
- if (entry->_sampleRate == 0 && st->time_scale > 1)
- entry->_sampleRate = st->time_scale;
+ if (entry->_sampleRate == 0 && track->timeScale > 1)
+ entry->_sampleRate = track->timeScale;
return entry;
}
@@ -143,15 +143,15 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
}
bool QuickTimeAudioDecoder::isOldDemuxing() const {
- assert(_audioStreamIndex >= 0);
- return _streams[_audioStreamIndex]->stts_count == 1 && _streams[_audioStreamIndex]->stts_data[0].duration == 1;
+ assert(_audioTrackIndex >= 0);
+ return _tracks[_audioTrackIndex]->timeToSampleCount == 1 && _tracks[_audioTrackIndex]->timeToSample[0].duration == 1;
}
void QuickTimeAudioDecoder::queueNextAudioChunk() {
- AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
+ AudioSampleDesc *entry = (AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
Common::MemoryWriteStreamDynamic *wStream = new Common::MemoryWriteStreamDynamic();
- _fd->seek(_streams[_audioStreamIndex]->chunk_offsets[_curAudioChunk]);
+ _fd->seek(_tracks[_audioTrackIndex]->chunkOffsets[_curAudioChunk]);
// First, we have to get the sample count
uint32 sampleCount = entry->getAudioChunkSampleCount(_curAudioChunk);
@@ -172,7 +172,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
size = (samples / entry->_samplesPerFrame) * entry->_bytesPerFrame;
} else {
samples = MIN<uint32>(1024, sampleCount);
- size = samples * _streams[_audioStreamIndex]->sample_size;
+ size = samples * _tracks[_audioTrackIndex]->sampleSize;
}
// Now, we read in the data for this data and output it
@@ -191,7 +191,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
startSample += entry->getAudioChunkSampleCount(i);
for (uint32 i = 0; i < sampleCount; i++) {
- uint32 size = (_streams[_audioStreamIndex]->sample_size != 0) ? _streams[_audioStreamIndex]->sample_size : _streams[_audioStreamIndex]->sample_sizes[i + startSample];
+ uint32 size = (_tracks[_audioTrackIndex]->sampleSize != 0) ? _tracks[_audioTrackIndex]->sampleSize : _tracks[_audioTrackIndex]->sampleSizes[i + startSample];
// Now, we read in the data for this data and output it
byte *data = (byte *)malloc(size);
@@ -214,31 +214,31 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
// Re-create the audio stream
delete _audStream;
- Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
+ Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
_audStream = Audio::makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
// First, we need to track down what audio sample we need
- Audio::Timestamp curAudioTime = where.convertToFramerate(_streams[_audioStreamIndex]->time_scale);
+ Audio::Timestamp curAudioTime = where.convertToFramerate(_tracks[_audioTrackIndex]->timeScale);
uint32 sample = curAudioTime.totalNumberOfFrames();
uint32 seekSample = sample;
if (!isOldDemuxing()) {
// We shouldn't have audio samples that are a different duration
// That would be quite bad!
- if (_streams[_audioStreamIndex]->stts_count != 1) {
+ if (_tracks[_audioTrackIndex]->timeToSampleCount != 1) {
warning("Failed seeking");
return;
}
// Note that duration is in terms of *one* channel
// This eases calculation a bit
- seekSample /= _streams[_audioStreamIndex]->stts_data[0].duration;
+ seekSample /= _tracks[_audioTrackIndex]->timeToSample[0].duration;
}
// Now to track down what chunk it's in
uint32 totalSamples = 0;
_curAudioChunk = 0;
- for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) {
+ for (uint32 i = 0; i < _tracks[_audioTrackIndex]->chunkCount; i++, _curAudioChunk++) {
uint32 chunkSampleCount = entry->getAudioChunkSampleCount(i);
if (seekSample < totalSamples + chunkSampleCount)
@@ -260,7 +260,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
}
}
-QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
+QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
_channels = 0;
_sampleRate = 0;
_samplesPerFrame = 0;
@@ -280,7 +280,7 @@ bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
Common::String audioType;
- switch (_parentStream->objectTypeMP4) {
+ switch (_parentTrack->objectTypeMP4) {
case 0x40: // AAC
#ifdef USE_FAAD
return true;
@@ -302,9 +302,9 @@ bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
uint32 QuickTimeAudioDecoder::AudioSampleDesc::getAudioChunkSampleCount(uint chunk) const {
uint32 sampleCount = 0;
- for (uint32 j = 0; j < _parentStream->sample_to_chunk_sz; j++)
- if (chunk >= _parentStream->sample_to_chunk[j].first)
- sampleCount = _parentStream->sample_to_chunk[j].count;
+ for (uint32 j = 0; j < _parentTrack->sampleToChunkCount; j++)
+ if (chunk >= _parentTrack->sampleToChunk[j].first)
+ sampleCount = _parentTrack->sampleToChunk[j].count;
return sampleCount;
}
@@ -333,13 +333,13 @@ AudioStream *QuickTimeAudioDecoder::AudioSampleDesc::createAudioStream(Common::S
} else if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
// The 7th Guest iOS uses an MPEG-4 codec
#ifdef USE_FAAD
- if (_parentStream->objectTypeMP4 == 0x40)
- return makeAACStream(stream, DisposeAfterUse::YES, _parentStream->extradata);
+ if (_parentTrack->objectTypeMP4 == 0x40)
+ return makeAACStream(stream, DisposeAfterUse::YES, _parentTrack->extraData);
#endif
#ifdef AUDIO_QDM2_H
} else if (_codecTag == MKTAG('Q', 'D', 'M', '2')) {
// Myst ME uses this codec for many videos
- return makeQDM2Stream(stream, _parentStream->extradata);
+ return makeQDM2Stream(stream, _parentTrack->extraData);
#endif
}
@@ -357,11 +357,11 @@ public:
~QuickTimeAudioStream() {}
bool openFromFile(const Common::String &filename) {
- return QuickTimeAudioDecoder::loadAudioFile(filename) && _audioStreamIndex >= 0 && _audStream;
+ return QuickTimeAudioDecoder::loadAudioFile(filename) && _audioTrackIndex >= 0 && _audStream;
}
bool openFromStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle) {
- return QuickTimeAudioDecoder::loadAudioStream(stream, disposeFileHandle) && _audioStreamIndex >= 0 && _audStream;
+ return QuickTimeAudioDecoder::loadAudioStream(stream, disposeFileHandle) && _audioTrackIndex >= 0 && _audStream;
}
// AudioStream API
@@ -380,7 +380,7 @@ public:
bool isStereo() const { return _audStream->isStereo(); }
int getRate() const { return _audStream->getRate(); }
- bool endOfData() const { return _curAudioChunk >= _streams[_audioStreamIndex]->chunk_count && _audStream->endOfData(); }
+ bool endOfData() const { return _curAudioChunk >= _tracks[_audioTrackIndex]->chunkCount && _audStream->endOfData(); }
// SeekableAudioStream API
bool seek(const Timestamp &where) {
@@ -392,7 +392,7 @@ public:
}
Timestamp getLength() const {
- return Timestamp(0, _streams[_audioStreamIndex]->duration, _streams[_audioStreamIndex]->time_scale);
+ return Timestamp(0, _tracks[_audioTrackIndex]->duration, _tracks[_audioTrackIndex]->timeScale);
}
};
diff --git a/audio/decoders/quicktime_intern.h b/audio/decoders/quicktime_intern.h
index 3279ad8..f288d56 100644
--- a/audio/decoders/quicktime_intern.h
+++ b/audio/decoders/quicktime_intern.h
@@ -67,7 +67,7 @@ public:
protected:
class AudioSampleDesc : public Common::QuickTimeParser::SampleDesc {
public:
- AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
+ AudioSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag);
bool isAudioCodecSupported() const;
uint32 getAudioChunkSampleCount(uint chunk) const;
@@ -82,14 +82,14 @@ protected:
};
// Common::QuickTimeParser API
- virtual Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
+ virtual Common::QuickTimeParser::SampleDesc *readSampleDesc(Track *track, uint32 format);
void init();
void setAudioStreamPos(const Timestamp &where);
bool isOldDemuxing() const;
void queueNextAudioChunk();
- int _audioStreamIndex;
+ int _audioTrackIndex;
uint _curAudioChunk;
QueuingAudioStream *_audStream;
};
diff --git a/common/quicktime.cpp b/common/quicktime.cpp
index 606e1bb..57534b3 100644
--- a/common/quicktime.cpp
+++ b/common/quicktime.cpp
@@ -48,7 +48,6 @@ namespace Common {
QuickTimeParser::QuickTimeParser() {
_beginOffset = 0;
- _numStreams = 0;
_fd = 0;
_scaleFactorX = 1;
_scaleFactorY = 1;
@@ -68,10 +67,9 @@ bool QuickTimeParser::parseFile(const Common::String &filename) {
return false;
_foundMOOV = false;
- _numStreams = 0;
_disposeFileHandle = DisposeAfterUse::YES;
- MOVatom atom = { 0, 0, 0xffffffff };
+ Atom atom = { 0, 0, 0xffffffff };
if (_resFork->hasResFork()) {
// Search for a 'moov' resource
@@ -104,10 +102,9 @@ bool QuickTimeParser::parseFile(const Common::String &filename) {
bool QuickTimeParser::parseStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle) {
_fd = stream;
_foundMOOV = false;
- _numStreams = 0;
_disposeFileHandle = disposeFileHandle;
- MOVatom atom = { 0, 0, 0xffffffff };
+ Atom atom = { 0, 0, 0xffffffff };
if (readDefault(atom) < 0 || !_foundMOOV) {
close();
@@ -119,21 +116,19 @@ bool QuickTimeParser::parseStream(Common::SeekableReadStream *stream, DisposeAft
}
void QuickTimeParser::init() {
- // Remove unknown/unhandled streams
- for (uint32 i = 0; i < _numStreams;) {
- if (_streams[i]->codec_type == CODEC_TYPE_MOV_OTHER) {
- delete _streams[i];
- for (uint32 j = i + 1; j < _numStreams; j++)
- _streams[j - 1] = _streams[j];
- _numStreams--;
- } else
- i++;
+ // Remove unknown/unhandled tracks
+ for (uint32 i = 0; i < _tracks.size(); i++) {
+ if (_tracks[i]->codecType == CODEC_TYPE_MOV_OTHER) {
+ delete _tracks[i];
+ _tracks.remove_at(i);
+ i--;
+ }
}
// Adjust time scale
- for (uint32 i = 0; i < _numStreams; i++)
- if (!_streams[i]->time_scale)
- _streams[i]->time_scale = _timeScale;
+ for (uint32 i = 0; i < _tracks.size(); i++)
+ if (!_tracks[i]->timeScale)
+ _tracks[i]->timeScale = _timeScale;
}
void QuickTimeParser::initParseTable() {
@@ -170,9 +165,9 @@ void QuickTimeParser::initParseTable() {
_parseTable = p;
}
-int QuickTimeParser::readDefault(MOVatom atom) {
+int QuickTimeParser::readDefault(Atom atom) {
uint32 total_size = 0;
- MOVatom a;
+ Atom a;
int err = 0;
a.offset = atom.offset;
@@ -240,14 +235,14 @@ int QuickTimeParser::readDefault(MOVatom atom) {
return err;
}
-int QuickTimeParser::readLeaf(MOVatom atom) {
+int QuickTimeParser::readLeaf(Atom atom) {
if (atom.size > 1)
_fd->seek(atom.size, SEEK_SET);
return 0;
}
-int QuickTimeParser::readMOOV(MOVatom atom) {
+int QuickTimeParser::readMOOV(Atom atom) {
if (readDefault(atom) < 0)
return -1;
@@ -256,7 +251,7 @@ int QuickTimeParser::readMOOV(MOVatom atom) {
return 1;
}
-int QuickTimeParser::readCMOV(MOVatom atom) {
+int QuickTimeParser::readCMOV(Atom atom) {
#ifdef USE_ZLIB
// Read in the dcom atom
_fd->readUint32BE();
@@ -294,7 +289,7 @@ int QuickTimeParser::readCMOV(MOVatom atom) {
_fd = new Common::MemoryReadStream(uncompressedData, uncompressedSize, DisposeAfterUse::YES);
// Read the contents of the uncompressed data
- MOVatom a = { MKTAG('m', 'o', 'o', 'v'), 0, uncompressedSize };
+ Atom a = { MKTAG('m', 'o', 'o', 'v'), 0, uncompressedSize };
int err = readDefault(a);
// Assign the file handle back to the original handle
@@ -309,7 +304,7 @@ int QuickTimeParser::readCMOV(MOVatom atom) {
#endif
}
-int QuickTimeParser::readMVHD(MOVatom atom) {
+int QuickTimeParser::readMVHD(Atom atom) {
byte version = _fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
@@ -358,21 +353,21 @@ int QuickTimeParser::readMVHD(MOVatom atom) {
return 0;
}
-int QuickTimeParser::readTRAK(MOVatom atom) {
- MOVStreamContext *sc = new MOVStreamContext();
+int QuickTimeParser::readTRAK(Atom atom) {
+ Track *track = new Track();
- if (!sc)
+ if (!track)
return -1;
- sc->codec_type = CODEC_TYPE_MOV_OTHER;
- sc->start_time = 0; // XXX: check
- _streams[_numStreams++] = sc;
+ track->codecType = CODEC_TYPE_MOV_OTHER;
+ track->startTime = 0; // XXX: check
+ _tracks.push_back(track);
return readDefault(atom);
}
-int QuickTimeParser::readTKHD(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readTKHD(Atom atom) {
+ Track *track = _tracks.back();
byte version = _fd->readByte();
_fd->readByte(); _fd->readByte();
@@ -392,9 +387,9 @@ int QuickTimeParser::readTKHD(MOVatom atom) {
_fd->readUint32BE(); // modification time
}
- /* st->id = */_fd->readUint32BE(); // track id (NOT 0 !)
+ /* track->id = */_fd->readUint32BE(); // track id (NOT 0 !)
_fd->readUint32BE(); // reserved
- //st->start_time = 0; // check
+ //track->startTime = 0; // check
(version == 1) ? (_fd->readUint32BE(), _fd->readUint32BE()) : _fd->readUint32BE(); // highlevel (considering edits) duration in movie timebase
_fd->readUint32BE(); // reserved
_fd->readUint32BE(); // reserved
@@ -411,11 +406,11 @@ int QuickTimeParser::readTKHD(MOVatom atom) {
uint32 yMod = _fd->readUint32BE();
_fd->skip(16);
- st->scaleFactorX = Common::Rational(0x10000, xMod);
- st->scaleFactorY = Common::Rational(0x10000, yMod);
+ track->scaleFactorX = Common::Rational(0x10000, xMod);
+ track->scaleFactorY = Common::Rational(0x10000, yMod);
- st->scaleFactorX.debugPrint(1, "readTKHD(): scaleFactorX =");
- st->scaleFactorY.debugPrint(1, "readTKHD(): scaleFactorY =");
+ track->scaleFactorX.debugPrint(1, "readTKHD(): scaleFactorX =");
+ track->scaleFactorY.debugPrint(1, "readTKHD(): scaleFactorY =");
// these are fixed-point, 16:16
// uint32 tkWidth = _fd->readUint32BE() >> 16; // track width
@@ -425,33 +420,33 @@ int QuickTimeParser::readTKHD(MOVatom atom) {
}
// edit list atom
-int QuickTimeParser::readELST(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readELST(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
- st->editCount = _fd->readUint32BE();
- st->editList = new EditListEntry[st->editCount];
+ track->editCount = _fd->readUint32BE();
+ track->editList = new EditListEntry[track->editCount];
- debug(2, "Track %d edit list count: %d", _numStreams - 1, st->editCount);
+ debug(2, "Track %d edit list count: %d", _tracks.size() - 1, track->editCount);
- for (uint32 i = 0; i < st->editCount; i++){
- st->editList[i].trackDuration = _fd->readUint32BE();
- st->editList[i].mediaTime = _fd->readSint32BE();
- st->editList[i].mediaRate = Common::Rational(_fd->readUint32BE(), 0x10000);
- debugN(3, "\tDuration = %d, Media Time = %d, ", st->editList[i].trackDuration, st->editList[i].mediaTime);
- st->editList[i].mediaRate.debugPrint(3, "Media Rate =");
+ for (uint32 i = 0; i < track->editCount; i++){
+ track->editList[i].trackDuration = _fd->readUint32BE();
+ track->editList[i].mediaTime = _fd->readSint32BE();
+ track->editList[i].mediaRate = Common::Rational(_fd->readUint32BE(), 0x10000);
+ debugN(3, "\tDuration = %d, Media Time = %d, ", track->editList[i].trackDuration, track->editList[i].mediaTime);
+ track->editList[i].mediaRate.debugPrint(3, "Media Rate =");
}
- if (st->editCount != 1)
+ if (track->editCount != 1)
warning("Multiple edit list entries. Things may go awry");
return 0;
}
-int QuickTimeParser::readHDLR(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readHDLR(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
@@ -469,9 +464,9 @@ int QuickTimeParser::readHDLR(MOVatom atom) {
debug(0, "MPEG-4 detected");
if (type == MKTAG('v', 'i', 'd', 'e'))
- st->codec_type = CODEC_TYPE_VIDEO;
+ track->codecType = CODEC_TYPE_VIDEO;
else if (type == MKTAG('s', 'o', 'u', 'n'))
- st->codec_type = CODEC_TYPE_AUDIO;
+ track->codecType = CODEC_TYPE_AUDIO;
_fd->readUint32BE(); // component manufacture
_fd->readUint32BE(); // component flags
@@ -489,8 +484,8 @@ int QuickTimeParser::readHDLR(MOVatom atom) {
return 0;
}
-int QuickTimeParser::readMDHD(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readMDHD(Atom atom) {
+ Track *track = _tracks.back();
byte version = _fd->readByte();
if (version > 1)
@@ -507,8 +502,8 @@ int QuickTimeParser::readMDHD(MOVatom atom) {
_fd->readUint32BE(); // modification time
}
- st->time_scale = _fd->readUint32BE();
- st->duration = (version == 1) ? (_fd->readUint32BE(), _fd->readUint32BE()) : _fd->readUint32BE(); // duration
+ track->timeScale = _fd->readUint32BE();
+ track->duration = (version == 1) ? (_fd->readUint32BE(), _fd->readUint32BE()) : _fd->readUint32BE(); // duration
_fd->readUint16BE(); // language
_fd->readUint16BE(); // quality
@@ -516,17 +511,17 @@ int QuickTimeParser::readMDHD(MOVatom atom) {
return 0;
}
-int QuickTimeParser::readSTSD(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readSTSD(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
uint32 entryCount = _fd->readUint32BE();
- st->sampleDescs.resize(entryCount);
+ track->sampleDescs.resize(entryCount);
for (uint32 i = 0; i < entryCount; i++) { // Parsing Sample description table
- MOVatom a = { 0, 0, 0 };
+ Atom a = { 0, 0, 0 };
uint32 start_pos = _fd->pos();
int size = _fd->readUint32BE(); // size
uint32 format = _fd->readUint32BE(); // data format
@@ -535,11 +530,11 @@ int QuickTimeParser::readSTSD(MOVatom atom) {
_fd->readUint16BE(); // reserved
_fd->readUint16BE(); // index
- st->sampleDescs[i] = readSampleDesc(st, format);
+ track->sampleDescs[i] = readSampleDesc(track, format);
- debug(0, "size=%d 4CC= %s codec_type=%d", size, tag2str(format), st->codec_type);
+ debug(0, "size=%d 4CC= %s codec_type=%d", size, tag2str(format), track->codecType);
- if (!st->sampleDescs[i]) {
+ if (!track->sampleDescs[i]) {
// other codec type, just skip (rtp, mp4s, tmcd ...)
_fd->seek(size - (_fd->pos() - start_pos), SEEK_CUR);
}
@@ -555,139 +550,139 @@ int QuickTimeParser::readSTSD(MOVatom atom) {
return 0;
}
-int QuickTimeParser::readSTSC(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readSTSC(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
- st->sample_to_chunk_sz = _fd->readUint32BE();
+ track->sampleToChunkCount = _fd->readUint32BE();
- debug(0, "track[%i].stsc.entries = %i", _numStreams - 1, st->sample_to_chunk_sz);
+ debug(0, "track[%i].stsc.entries = %i", _tracks.size() - 1, track->sampleToChunkCount);
- st->sample_to_chunk = new MOVstsc[st->sample_to_chunk_sz];
+ track->sampleToChunk = new SampleToChunkEntry[track->sampleToChunkCount];
- if (!st->sample_to_chunk)
+ if (!track->sampleToChunk)
return -1;
- for (uint32 i = 0; i < st->sample_to_chunk_sz; i++) {
- st->sample_to_chunk[i].first = _fd->readUint32BE() - 1;
- st->sample_to_chunk[i].count = _fd->readUint32BE();
- st->sample_to_chunk[i].id = _fd->readUint32BE();
- //warning("Sample to Chunk[%d]: First = %d, Count = %d", i, st->sample_to_chunk[i].first, st->sample_to_chunk[i].count);
+ for (uint32 i = 0; i < track->sampleToChunkCount; i++) {
+ track->sampleToChunk[i].first = _fd->readUint32BE() - 1;
+ track->sampleToChunk[i].count = _fd->readUint32BE();
+ track->sampleToChunk[i].id = _fd->readUint32BE();
+ //warning("Sample to Chunk[%d]: First = %d, Count = %d", i, track->sampleToChunk[i].first, track->sampleToChunk[i].count);
}
return 0;
}
-int QuickTimeParser::readSTSS(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readSTSS(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
- st->keyframe_count = _fd->readUint32BE();
+ track->keyframeCount = _fd->readUint32BE();
- debug(0, "keyframe_count = %d", st->keyframe_count);
+ debug(0, "keyframeCount = %d", track->keyframeCount);
- st->keyframes = new uint32[st->keyframe_count];
+ track->keyframes = new uint32[track->keyframeCount];
- if (!st->keyframes)
+ if (!track->keyframes)
return -1;
- for (uint32 i = 0; i < st->keyframe_count; i++) {
- st->keyframes[i] = _fd->readUint32BE() - 1; // Adjust here, the frames are based on 1
- debug(6, "keyframes[%d] = %d", i, st->keyframes[i]);
+ for (uint32 i = 0; i < track->keyframeCount; i++) {
+ track->keyframes[i] = _fd->readUint32BE() - 1; // Adjust here, the frames are based on 1
+ debug(6, "keyframes[%d] = %d", i, track->keyframes[i]);
}
return 0;
}
-int QuickTimeParser::readSTSZ(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readSTSZ(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
- st->sample_size = _fd->readUint32BE();
- st->sample_count = _fd->readUint32BE();
+ track->sampleSize = _fd->readUint32BE();
+ track->sampleCount = _fd->readUint32BE();
- debug(5, "sample_size = %d sample_count = %d", st->sample_size, st->sample_count);
+ debug(5, "sampleSize = %d sampleCount = %d", track->sampleSize, track->sampleCount);
- if (st->sample_size)
+ if (track->sampleSize)
return 0; // there isn't any table following
- st->sample_sizes = new uint32[st->sample_count];
+ track->sampleSizes = new uint32[track->sampleCount];
- if (!st->sample_sizes)
+ if (!track->sampleSizes)
return -1;
- for(uint32 i = 0; i < st->sample_count; i++) {
- st->sample_sizes[i] = _fd->readUint32BE();
- debug(6, "sample_sizes[%d] = %d", i, st->sample_sizes[i]);
+ for(uint32 i = 0; i < track->sampleCount; i++) {
+ track->sampleSizes[i] = _fd->readUint32BE();
+ debug(6, "sampleSizes[%d] = %d", i, track->sampleSizes[i]);
}
return 0;
}
-int QuickTimeParser::readSTTS(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readSTTS(Atom atom) {
+ Track *track = _tracks.back();
uint32 totalSampleCount = 0;
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
- st->stts_count = _fd->readUint32BE();
- st->stts_data = new MOVstts[st->stts_count];
+ track->timeToSampleCount = _fd->readUint32BE();
+ track->timeToSample = new TimeToSampleEntry[track->timeToSampleCount];
- debug(0, "track[%d].stts.entries = %d", _numStreams - 1, st->stts_count);
+ debug(0, "track[%d].stts.entries = %d", _tracks.size() - 1, track->timeToSampleCount);
- for (int32 i = 0; i < st->stts_count; i++) {
- st->stts_data[i].count = _fd->readUint32BE();
- st->stts_data[i].duration = _fd->readUint32BE();
+ for (int32 i = 0; i < track->timeToSampleCount; i++) {
+ track->timeToSample[i].count = _fd->readUint32BE();
+ track->timeToSample[i].duration = _fd->readUint32BE();
- debug(1, "\tCount = %d, Duration = %d", st->stts_data[i].count, st->stts_data[i].duration);
+ debug(1, "\tCount = %d, Duration = %d", track->timeToSample[i].count, track->timeToSample[i].duration);
- totalSampleCount += st->stts_data[i].count;
+ totalSampleCount += track->timeToSample[i].count;
}
- st->nb_frames = totalSampleCount;
+ track->frameCount = totalSampleCount;
return 0;
}
-int QuickTimeParser::readSTCO(MOVatom atom) {
- MOVStreamContext *st = _streams[_numStreams - 1];
+int QuickTimeParser::readSTCO(Atom atom) {
+ Track *track = _tracks.back();
_fd->readByte(); // version
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
- st->chunk_count = _fd->readUint32BE();
- st->chunk_offsets = new uint32[st->chunk_count];
+ track->chunkCount = _fd->readUint32BE();
+ track->chunkOffsets = new uint32[track->chunkCount];
- if (!st->chunk_offsets)
+ if (!track->chunkOffsets)
return -1;
- for (uint32 i = 0; i < st->chunk_count; i++) {
+ for (uint32 i = 0; i < track->chunkCount; i++) {
// WORKAROUND/HACK: The offsets in Riven videos (ones inside the Mohawk archives themselves)
// have offsets relative to the archive and not the video. This is quite nasty. We subtract
// the initial offset of the stream to get the correct value inside of the stream.
- st->chunk_offsets[i] = _fd->readUint32BE() - _beginOffset;
+ track->chunkOffsets[i] = _fd->readUint32BE() - _beginOffset;
}
return 0;
}
-int QuickTimeParser::readWAVE(MOVatom atom) {
- if (_numStreams < 1)
+int QuickTimeParser::readWAVE(Atom atom) {
+ if (_tracks.empty())
return 0;
- MOVStreamContext *st = _streams[_numStreams - 1];
+ Track *track = _tracks.back();
if (atom.size > (1 << 30))
return -1;
- if (st->sampleDescs[0]->getCodecTag() == MKTAG('Q', 'D', 'M', '2')) // Read extradata for QDM2
- st->extradata = _fd->readStream(atom.size - 8);
+ if (track->sampleDescs[0]->getCodecTag() == MKTAG('Q', 'D', 'M', '2')) // Read extra data for QDM2
+ track->extraData = _fd->readStream(atom.size - 8);
else if (atom.size > 8)
return readDefault(atom);
else
@@ -723,11 +718,11 @@ static void readMP4Desc(Common::SeekableReadStream *stream, byte &tag, int &leng
length = readMP4DescLength(stream);
}
-int QuickTimeParser::readESDS(MOVatom atom) {
- if (_numStreams < 1)
+int QuickTimeParser::readESDS(Atom atom) {
+ if (_tracks.empty())
return 0;
- MOVStreamContext *st = _streams[_numStreams - 1];
+ Track *track = _tracks.back();
_fd->readUint32BE(); // version + flags
@@ -744,7 +739,7 @@ int QuickTimeParser::readESDS(MOVatom atom) {
if (tag != kMP4DecConfigDescTag)
return 0;
- st->objectTypeMP4 = _fd->readByte();
+ track->objectTypeMP4 = _fd->readByte();
_fd->readByte(); // stream type
_fd->readUint16BE(); _fd->readByte(); // buffer size
_fd->readUint32BE(); // max bitrate
@@ -755,17 +750,17 @@ int QuickTimeParser::readESDS(MOVatom atom) {
if (tag != kMP4DecSpecificDescTag)
return 0;
- st->extradata = _fd->readStream(length);
+ track->extraData = _fd->readStream(length);
- debug(0, "MPEG-4 object type = %02x", st->objectTypeMP4);
+ debug(0, "MPEG-4 object type = %02x", track->objectTypeMP4);
return 0;
}
void QuickTimeParser::close() {
- for (uint32 i = 0; i < _numStreams; i++)
- delete _streams[i];
+ for (uint32 i = 0; i < _tracks.size(); i++)
+ delete _tracks[i];
- _numStreams = 0;
+ _tracks.clear();
if (_disposeFileHandle == DisposeAfterUse::YES)
delete _fd;
@@ -773,44 +768,44 @@ void QuickTimeParser::close() {
_fd = 0;
}
-QuickTimeParser::SampleDesc::SampleDesc(MOVStreamContext *parentStream, uint32 codecTag) {
- _parentStream = parentStream;
+QuickTimeParser::SampleDesc::SampleDesc(Track *parentTrack, uint32 codecTag) {
+ _parentTrack = parentTrack;
_codecTag = codecTag;
}
-QuickTimeParser::MOVStreamContext::MOVStreamContext() {
- chunk_count = 0;
- chunk_offsets = 0;
- stts_count = 0;
- stts_data = 0;
- sample_to_chunk_sz = 0;
- sample_to_chunk = 0;
- sample_size = 0;
- sample_count = 0;
- sample_sizes = 0;
- keyframe_count = 0;
+QuickTimeParser::Track::Track() {
+ chunkCount = 0;
+ chunkOffsets = 0;
+ timeToSampleCount = 0;
+ timeToSample = 0;
+ sampleToChunkCount = 0;
+ sampleToChunk = 0;
+ sampleSize = 0;
+ sampleCount = 0;
+ sampleSizes = 0;
+ keyframeCount = 0;
keyframes = 0;
- time_scale = 0;
+ timeScale = 0;
width = 0;
height = 0;
- codec_type = CODEC_TYPE_MOV_OTHER;
+ codecType = CODEC_TYPE_MOV_OTHER;
editCount = 0;
editList = 0;
- extradata = 0;
- nb_frames = 0;
+ extraData = 0;
+ frameCount = 0;
duration = 0;
- start_time = 0;
+ startTime = 0;
objectTypeMP4 = 0;
}
-QuickTimeParser::MOVStreamContext::~MOVStreamContext() {
- delete[] chunk_offsets;
- delete[] stts_data;
- delete[] sample_to_chunk;
- delete[] sample_sizes;
+QuickTimeParser::Track::~Track() {
+ delete[] chunkOffsets;
+ delete[] timeToSample;
+ delete[] sampleToChunk;
+ delete[] sampleSizes;
delete[] keyframes;
delete[] editList;
- delete extradata;
+ delete extraData;
for (uint32 i = 0; i < sampleDescs.size(); i++)
delete sampleDescs[i];
diff --git a/common/quicktime.h b/common/quicktime.h
index 2bd461e..cb2bed1 100644
--- a/common/quicktime.h
+++ b/common/quicktime.h
@@ -88,23 +88,23 @@ protected:
DisposeAfterUse::Flag _disposeFileHandle;
- struct MOVatom {
+ struct Atom {
uint32 type;
uint32 offset;
uint32 size;
};
struct ParseTable {
- int (QuickTimeParser::*func)(MOVatom atom);
+ int (QuickTimeParser::*func)(Atom atom);
uint32 type;
};
- struct MOVstts {
+ struct TimeToSampleEntry {
int count;
int duration;
};
- struct MOVstsc {
+ struct SampleToChunkEntry {
uint32 first;
uint32 count;
uint32 id;
@@ -116,17 +116,17 @@ protected:
Common::Rational mediaRate;
};
- struct MOVStreamContext;
+ struct Track;
class SampleDesc {
public:
- SampleDesc(MOVStreamContext *parentStream, uint32 codecTag);
+ SampleDesc(Track *parentTrack, uint32 codecTag);
virtual ~SampleDesc() {}
uint32 getCodecTag() const { return _codecTag; }
protected:
- MOVStreamContext *_parentStream;
+ Track *_parentTrack;
uint32 _codecTag;
};
@@ -136,77 +136,76 @@ protected:
CODEC_TYPE_AUDIO
};
- struct MOVStreamContext {
- MOVStreamContext();
- ~MOVStreamContext();
-
- uint32 chunk_count;
- uint32 *chunk_offsets;
- int stts_count;
- MOVstts *stts_data;
- uint32 sample_to_chunk_sz;
- MOVstsc *sample_to_chunk;
- uint32 sample_size;
- uint32 sample_count;
- uint32 *sample_sizes;
- uint32 keyframe_count;
+ struct Track {
+ Track();
+ ~Track();
+
+ uint32 chunkCount;
+ uint32 *chunkOffsets;
+ int timeToSampleCount;
+ TimeToSampleEntry *timeToSample;
+ uint32 sampleToChunkCount;
+ SampleToChunkEntry *sampleToChunk;
+ uint32 sampleSize;
+ uint32 sampleCount;
+ uint32 *sampleSizes;
+ uint32 keyframeCount;
uint32 *keyframes;
- int32 time_scale;
+ int32 timeScale;
uint16 width;
uint16 height;
- CodecType codec_type;
+ CodecType codecType;
Common::Array<SampleDesc *> sampleDescs;
uint32 editCount;
EditListEntry *editList;
- Common::SeekableReadStream *extradata;
+ Common::SeekableReadStream *extraData;
- uint32 nb_frames;
+ uint32 frameCount;
uint32 duration;
- uint32 start_time;
+ uint32 startTime;
Common::Rational scaleFactorX;
Common::Rational scaleFactorY;
byte objectTypeMP4;
};
- virtual SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format) = 0;
+ virtual SampleDesc *readSampleDesc(Track *track, uint32 format) = 0;
const ParseTable *_parseTable;
bool _foundMOOV;
uint32 _timeScale;
uint32 _duration;
- uint32 _numStreams;
Common::Rational _scaleFactorX;
Common::Rational _scaleFactorY;
- MOVStreamContext *_streams[20];
+ Common::Array<Track *> _tracks;
uint32 _beginOffset;
Common::MacResManager *_resFork;
void initParseTable();
void init();
- int readDefault(MOVatom atom);
- int readLeaf(MOVatom atom);
- int readELST(MOVatom atom);
- int readHDLR(MOVatom atom);
- int readMDHD(MOVatom atom);
- int readMOOV(MOVatom atom);
- int readMVHD(MOVatom atom);
- int readTKHD(MOVatom atom);
- int readTRAK(MOVatom atom);
- int readSTCO(MOVatom atom);
- int readSTSC(MOVatom atom);
- int readSTSD(MOVatom atom);
- int readSTSS(MOVatom atom);
- int readSTSZ(MOVatom atom);
- int readSTTS(MOVatom atom);
- int readCMOV(MOVatom atom);
- int readWAVE(MOVatom atom);
- int readESDS(MOVatom atom);
+ int readDefault(Atom atom);
+ int readLeaf(Atom atom);
+ int readELST(Atom atom);
+ int readHDLR(Atom atom);
+ int readMDHD(Atom atom);
+ int readMOOV(Atom atom);
+ int readMVHD(Atom atom);
+ int readTKHD(Atom atom);
+ int readTRAK(Atom atom);
+ int readSTCO(Atom atom);
+ int readSTSC(Atom atom);
+ int readSTSD(Atom atom);
+ int readSTSS(Atom atom);
+ int readSTSZ(Atom atom);
+ int readSTTS(Atom atom);
+ int readCMOV(Atom atom);
+ int readWAVE(Atom atom);
+ int readESDS(Atom atom);
};
} // End of namespace Common
diff --git a/video/qt_decoder.cpp b/video/qt_decoder.cpp
index dbf8603..9575845 100644
--- a/video/qt_decoder.cpp
+++ b/video/qt_decoder.cpp
@@ -69,50 +69,50 @@ QuickTimeDecoder::~QuickTimeDecoder() {
}
uint16 QuickTimeDecoder::getWidth() const {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return 0;
- return (Common::Rational(_streams[_videoStreamIndex]->width) / getScaleFactorX()).toInt();
+ return (Common::Rational(_tracks[_videoTrackIndex]->width) / getScaleFactorX()).toInt();
}
uint16 QuickTimeDecoder::getHeight() const {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return 0;
- return (Common::Rational(_streams[_videoStreamIndex]->height) / getScaleFactorY()).toInt();
+ return (Common::Rational(_tracks[_videoTrackIndex]->height) / getScaleFactorY()).toInt();
}
uint32 QuickTimeDecoder::getFrameCount() const {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return 0;
- return _streams[_videoStreamIndex]->nb_frames;
+ return _tracks[_videoTrackIndex]->frameCount;
}
Common::Rational QuickTimeDecoder::getScaleFactorX() const {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return 1;
- return (_scaleFactorX * _streams[_videoStreamIndex]->scaleFactorX);
+ return (_scaleFactorX * _tracks[_videoTrackIndex]->scaleFactorX);
}
Common::Rational QuickTimeDecoder::getScaleFactorY() const {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return 1;
- return (_scaleFactorY * _streams[_videoStreamIndex]->scaleFactorY);
+ return (_scaleFactorY * _tracks[_videoTrackIndex]->scaleFactorY);
}
uint32 QuickTimeDecoder::getFrameDuration() {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return 0;
uint32 curFrameIndex = 0;
- for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count; i++) {
- curFrameIndex += _streams[_videoStreamIndex]->stts_data[i].count;
+ for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount; i++) {
+ curFrameIndex += _tracks[_videoTrackIndex]->timeToSample[i].count;
if ((uint32)_curFrame < curFrameIndex) {
// Ok, now we have what duration this frame has.
- return _streams[_videoStreamIndex]->stts_data[i].duration;
+ return _tracks[_videoTrackIndex]->timeToSample[i].duration;
}
}
@@ -131,17 +131,17 @@ Graphics::PixelFormat QuickTimeDecoder::getPixelFormat() const {
}
uint32 QuickTimeDecoder::findKeyFrame(uint32 frame) const {
- for (int i = _streams[_videoStreamIndex]->keyframe_count - 1; i >= 0; i--)
- if (_streams[_videoStreamIndex]->keyframes[i] <= frame)
- return _streams[_videoStreamIndex]->keyframes[i];
+ for (int i = _tracks[_videoTrackIndex]->keyframeCount - 1; i >= 0; i--)
+ if (_tracks[_videoTrackIndex]->keyframes[i] <= frame)
+ return _tracks[_videoTrackIndex]->keyframes[i];
// If none found, we'll assume the requested frame is a key frame
return frame;
}
void QuickTimeDecoder::seekToFrame(uint32 frame) {
- assert(_videoStreamIndex >= 0);
- assert(frame < _streams[_videoStreamIndex]->nb_frames);
+ assert(_videoTrackIndex >= 0);
+ assert(frame < _tracks[_videoTrackIndex]->frameCount);
// Stop all audio (for now)
stopAudio();
@@ -155,20 +155,20 @@ void QuickTimeDecoder::seekToFrame(uint32 frame) {
_nextFrameStartTime = 0;
uint32 curFrame = 0;
- for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && curFrame < frame; i++) {
- for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count && curFrame < frame; j++) {
+ for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && curFrame < frame; i++) {
+ for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count && curFrame < frame; j++) {
curFrame++;
- _nextFrameStartTime += _streams[_videoStreamIndex]->stts_data[i].duration;
+ _nextFrameStartTime += _tracks[_videoTrackIndex]->timeToSample[i].duration;
}
}
// Adjust the video starting point
- const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _streams[_videoStreamIndex]->time_scale);
+ const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _tracks[_videoTrackIndex]->timeScale);
_startTime = g_system->getMillis() - curVideoTime.msecs();
resetPauseStartTime();
// Adjust the audio starting point
- if (_audioStreamIndex >= 0) {
+ if (_audioTrackIndex >= 0) {
_audioStartOffset = curVideoTime;
// Seek to the new audio location
@@ -181,17 +181,17 @@ void QuickTimeDecoder::seekToFrame(uint32 frame) {
void QuickTimeDecoder::seekToTime(Audio::Timestamp time) {
// Use makeQuickTimeStream() instead
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
error("Audio-only seeking not supported");
// Try to find the last frame that should have been decoded
uint32 frame = 0;
- Audio::Timestamp totalDuration(0, _streams[_videoStreamIndex]->time_scale);
+ Audio::Timestamp totalDuration(0, _tracks[_videoTrackIndex]->timeScale);
bool done = false;
- for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && !done; i++) {
- for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count; j++) {
- totalDuration = totalDuration.addFrames(_streams[_videoStreamIndex]->stts_data[i].duration);
+ for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && !done; i++) {
+ for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count; j++) {
+ totalDuration = totalDuration.addFrames(_tracks[_videoTrackIndex]->timeToSample[i].duration);
if (totalDuration > time) {
done = true;
break;
@@ -221,14 +221,14 @@ void QuickTimeDecoder::pauseVideoIntern(bool pause) {
}
Codec *QuickTimeDecoder::findDefaultVideoCodec() const {
- if (_videoStreamIndex < 0 || _streams[_videoStreamIndex]->sampleDescs.empty())
+ if (_videoTrackIndex < 0 || _tracks[_videoTrackIndex]->sampleDescs.empty())
return 0;
- return ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[0])->_videoCodec;
+ return ((VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[0])->_videoCodec;
}
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
- if (_videoStreamIndex < 0 || _curFrame >= (int32)getFrameCount() - 1)
+ if (_videoTrackIndex < 0 || _curFrame >= (int32)getFrameCount() - 1)
return 0;
if (_startTime == 0)
@@ -244,11 +244,11 @@ const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
uint32 descId;
Common::SeekableReadStream *frameData = getNextFramePacket(descId);
- if (!frameData || !descId || descId > _streams[_videoStreamIndex]->sampleDescs.size())
+ if (!frameData || !descId || descId > _tracks[_videoTrackIndex]->sampleDescs.size())
return 0;
// Find which video description entry we want
- VideoSampleDesc *entry = (VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[descId - 1];
+ VideoSampleDesc *entry = (VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[descId - 1];
if (!entry->_videoCodec)
return 0;
@@ -305,7 +305,7 @@ uint32 QuickTimeDecoder::getTimeToNextFrame() const {
return 0;
// Convert from the QuickTime rate base to 1000
- uint32 nextFrameStartTime = _nextFrameStartTime * 1000 / _streams[_videoStreamIndex]->time_scale;
+ uint32 nextFrameStartTime = _nextFrameStartTime * 1000 / _tracks[_videoTrackIndex]->timeScale;
uint32 elapsedTime = getElapsedTime();
if (nextFrameStartTime <= elapsedTime)
@@ -333,13 +333,13 @@ bool QuickTimeDecoder::loadStream(Common::SeekableReadStream *stream) {
void QuickTimeDecoder::init() {
Audio::QuickTimeAudioDecoder::init();
- _videoStreamIndex = -1;
+ _videoTrackIndex = -1;
_startTime = 0;
// Find video streams
- for (uint32 i = 0; i < _numStreams; i++)
- if (_streams[i]->codec_type == CODEC_TYPE_VIDEO && _videoStreamIndex < 0)
- _videoStreamIndex = i;
+ for (uint32 i = 0; i < _tracks.size(); i++)
+ if (_tracks[i]->codecType == CODEC_TYPE_VIDEO && _videoTrackIndex < 0)
+ _videoTrackIndex = i;
// Start the audio codec if we've got one that we can handle
if (_audStream) {
@@ -348,9 +348,9 @@ void QuickTimeDecoder::init() {
}
// Initialize video, if present
- if (_videoStreamIndex >= 0) {
- for (uint32 i = 0; i < _streams[_videoStreamIndex]->sampleDescs.size(); i++)
- ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[i])->initCodec();
+ if (_videoTrackIndex >= 0) {
+ for (uint32 i = 0; i < _tracks[_videoTrackIndex]->sampleDescs.size(); i++)
+ ((VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[i])->initCodec();
if (getScaleFactorX() != 1 || getScaleFactorY() != 1) {
// We have to initialize the scaled surface
@@ -360,11 +360,11 @@ void QuickTimeDecoder::init() {
}
}
-Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamContext *st, uint32 format) {
- if (st->codec_type == CODEC_TYPE_VIDEO) {
+Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Track *track, uint32 format) {
+ if (track->codecType == CODEC_TYPE_VIDEO) {
debug(0, "Video Codec FourCC: \'%s\'", tag2str(format));
- VideoSampleDesc *entry = new VideoSampleDesc(st, format);
+ VideoSampleDesc *entry = new VideoSampleDesc(track, format);
_fd->readUint16BE(); // version
_fd->readUint16BE(); // revision level
@@ -378,21 +378,21 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
// The width is most likely invalid for entries after the first one
// so only set the overall width if it is not zero here.
if (width)
- st->width = width;
+ track->width = width;
if (height)
- st->height = height;
+ track->height = height;
_fd->readUint32BE(); // horiz resolution
_fd->readUint32BE(); // vert resolution
_fd->readUint32BE(); // data size, always 0
_fd->readUint16BE(); // frames per samples
- byte codec_name[32];
- _fd->read(codec_name, 32); // codec name, pascal string (FIXME: true for mp4?)
- if (codec_name[0] <= 31) {
- memcpy(entry->_codecName, &codec_name[1], codec_name[0]);
- entry->_codecName[codec_name[0]] = 0;
+ byte codecName[32];
+ _fd->read(codecName, 32); // codec name, pascal string (FIXME: true for mp4?)
+ if (codecName[0] <= 31) {
+ memcpy(entry->_codecName, &codecName[1], codecName[0]);
+ entry->_codecName[codecName[0]] = 0;
}
entry->_bitsPerSample = _fd->readUint16BE(); // depth
@@ -455,7 +455,7 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
}
// Pass it on up
- return Audio::QuickTimeAudioDecoder::readSampleDesc(st, format);
+ return Audio::QuickTimeAudioDecoder::readSampleDesc(track, format);
}
void QuickTimeDecoder::close() {
@@ -472,7 +472,7 @@ void QuickTimeDecoder::close() {
}
Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId) {
- if (_videoStreamIndex < 0)
+ if (_videoTrackIndex < 0)
return NULL;
// First, we have to track down which chunk holds the sample and which sample in the chunk contains the frame we are looking for.
@@ -480,22 +480,22 @@ Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId)
int32 sampleInChunk = 0;
int32 actualChunk = -1;
- for (uint32 i = 0; i < _streams[_videoStreamIndex]->chunk_count; i++) {
+ for (uint32 i = 0; i < _tracks[_videoTrackIndex]->chunkCount; i++) {
int32 sampleToChunkIndex = -1;
- for (uint32 j = 0; j < _streams[_videoStreamIndex]->sample_to_chunk_sz; j++)
- if (i >= _streams[_videoStreamIndex]->sample_to_chunk[j].first)
+ for (uint32 j = 0; j < _tracks[_videoTrackIndex]->sampleToChunkCount; j++)
+ if (i >= _tracks[_videoTrackIndex]->sampleToChunk[j].first)
sampleToChunkIndex = j;
if (sampleToChunkIndex < 0)
error("This chunk (%d) is imaginary", sampleToChunkIndex);
- totalSampleCount += _streams[_videoStreamIndex]->sample_to_chunk[sampleToChunkIndex].count;
+ totalSampleCount += _tracks[_videoTrackIndex]->sampleToChunk[sampleToChunkIndex].count;
if (totalSampleCount > getCurFrame()) {
actualChunk = i;
- descId = _streams[_videoStreamIndex]->sample_to_chunk[sampleToChunkIndex].id;
- sampleInChunk = _streams[_videoStreamIndex]->sample_to_chunk[sampleToChunkIndex].count - totalSampleCount + getCurFrame();
+ descId = _tracks[_videoTrackIndex]->sampleToChunk[sampleToChunkIndex].id;
+ sampleInChunk = _tracks[_videoTrackIndex]->sampleToChunk[sampleToChunkIndex].count - totalSampleCount + getCurFrame();
break;
}
}
@@ -506,23 +506,23 @@ Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId)
}
// Next seek to that frame
- _fd->seek(_streams[_videoStreamIndex]->chunk_offsets[actualChunk]);
+ _fd->seek(_tracks[_videoTrackIndex]->chunkOffsets[actualChunk]);
// Then, if the chunk holds more than one frame, seek to where the frame we want is located
for (int32 i = getCurFrame() - sampleInChunk; i < getCurFrame(); i++) {
- if (_streams[_videoStreamIndex]->sample_size != 0)
- _fd->skip(_streams[_videoStreamIndex]->sample_size);
+ if (_tracks[_videoTrackIndex]->sampleSize != 0)
+ _fd->skip(_tracks[_videoTrackIndex]->sampleSize);
else
- _fd->skip(_streams[_videoStreamIndex]->sample_sizes[i]);
+ _fd->skip(_tracks[_videoTrackIndex]->sampleSizes[i]);
}
// Finally, read in the raw data for the frame
- //printf ("Frame Data[%d]: Offset = %d, Size = %d\n", getCurFrame(), _fd->pos(), _streams[_videoStreamIndex]->sample_sizes[getCurFrame()]);
+ //printf ("Frame Data[%d]: Offset = %d, Size = %d\n", getCurFrame(), _fd->pos(), _tracks[_videoTrackIndex]->sampleSizes[getCurFrame()]);
- if (_streams[_videoStreamIndex]->sample_size != 0)
- return _fd->readStream(_streams[_videoStreamIndex]->sample_size);
+ if (_tracks[_videoTrackIndex]->sampleSize != 0)
+ return _fd->readStream(_tracks[_videoTrackIndex]->sampleSize);
- return _fd->readStream(_streams[_videoStreamIndex]->sample_sizes[getCurFrame()]);
+ return _fd->readStream(_tracks[_videoTrackIndex]->sampleSizes[getCurFrame()]);
}
void QuickTimeDecoder::updateAudioBuffer() {
@@ -531,21 +531,21 @@ void QuickTimeDecoder::updateAudioBuffer() {
uint32 numberOfChunksNeeded = 0;
- if (_videoStreamIndex < 0 || _curFrame == (int32)_streams[_videoStreamIndex]->nb_frames - 1) {
+ if (_videoTrackIndex < 0 || _curFrame == (int32)_tracks[_videoTrackIndex]->frameCount - 1) {
// If we have no video, there's nothing to base our buffer against
// However, one must ask why a QuickTimeDecoder is being used instead of the nice makeQuickTimeStream() function
// If we're on the last frame, make sure all audio remaining is buffered
- numberOfChunksNeeded = _streams[_audioStreamIndex]->chunk_count;
+ numberOfChunksNeeded = _tracks[_audioTrackIndex]->chunkCount;
} else {
- Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
+ Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
// Calculate the amount of chunks we need in memory until the next frame
uint32 timeToNextFrame = getTimeToNextFrame();
uint32 timeFilled = 0;
uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams();
- for (; timeFilled < timeToNextFrame && curAudioChunk < _streams[_audioStreamIndex]->chunk_count; numberOfChunksNeeded++, curAudioChunk++) {
+ for (; timeFilled < timeToNextFrame && curAudioChunk < _tracks[_audioTrackIndex]->chunkCount; numberOfChunksNeeded++, curAudioChunk++) {
uint32 sampleCount = entry->getAudioChunkSampleCount(curAudioChunk);
assert(sampleCount);
@@ -557,11 +557,11 @@ void QuickTimeDecoder::updateAudioBuffer() {
}
// Keep three streams in buffer so that if/when the first two end, it goes right into the next
- while (_audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _streams[_audioStreamIndex]->chunk_count)
+ while (_audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _tracks[_audioTrackIndex]->chunkCount)
queueNextAudioChunk();
}
-QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
+QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
memset(_codecName, 0, 32);
_colorTableId = 0;
_palette = 0;
@@ -582,15 +582,15 @@ void QuickTimeDecoder::VideoSampleDesc::initCodec() {
break;
case MKTAG('r','p','z','a'):
// Apple Video ("Road Pizza"): Used by some Myst videos.
- _videoCodec = new RPZADecoder(_parentStream->width, _parentStream->height);
+ _videoCodec = new RPZADecoder(_parentTrack->width, _parentTrack->height);
break;
case MKTAG('r','l','e',' '):
// QuickTime RLE: Used by some Myst ME videos.
- _videoCodec = new QTRLEDecoder(_parentStream->width, _parentStream->height, _bitsPerSample & 0x1f);
+ _videoCodec = new QTRLEDecoder(_parentTrack->width, _parentTrack->height, _bitsPerSample & 0x1f);
break;
case MKTAG('s','m','c',' '):
// Apple SMC: Used by some Myst videos.
- _videoCodec = new SMCDecoder(_parentStream->width, _parentStream->height);
+ _videoCodec = new SMCDecoder(_parentTrack->width, _parentTrack->height);
break;
case MKTAG('S','V','Q','1'):
// Sorenson Video 1: Used by some Myst ME videos.
@@ -606,7 +606,7 @@ void QuickTimeDecoder::VideoSampleDesc::initCodec() {
break;
case MKTAG('Q','k','B','k'):
// CDToons: Used by most of the Broderbund games.
- _videoCodec = new CDToonsDecoder(_parentStream->width, _parentStream->height);
+ _videoCodec = new CDToonsDecoder(_parentTrack->width, _parentTrack->height);
break;
default:
warning("Unsupported codec \'%s\'", tag2str(_codecTag));
diff --git a/video/qt_decoder.h b/video/qt_decoder.h
index e3955d7..b51fd04 100644
--- a/video/qt_decoder.h
+++ b/video/qt_decoder.h
@@ -116,7 +116,7 @@ public:
protected:
class VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {
public:
- VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
+ VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag);
~VideoSampleDesc();
void initCodec();
@@ -129,7 +129,7 @@ protected:
Codec *_videoCodec;
};
- Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
+ Common::QuickTimeParser::SampleDesc *readSampleDesc(Track *track, uint32 format);
private:
Common::SeekableReadStream *getNextFramePacket(uint32 &descId);
@@ -146,7 +146,7 @@ private:
Codec *createCodec(uint32 codecTag, byte bitsPerPixel);
Codec *findDefaultVideoCodec() const;
uint32 _nextFrameStartTime;
- int8 _videoStreamIndex;
+ int _videoTrackIndex;
uint32 findKeyFrame(uint32 frame) const;
bool _dirtyPalette;
More information about the Scummvm-git-logs
mailing list