21#include "lazperf/Extractor.hpp"
22#include "lazperf/filestream.hpp"
23#include "lazperf/header.hpp"
24#include "lazperf/readers.hpp"
25#include "lazperf/vlr.hpp"
26#include "lazperf/writers.hpp"
63 std::vector<char> buf( 32 );
64 int numEntries =
static_cast<int>( size / 32 );
65 file.seekg(
static_cast<int64_t
>( offset ) );
66 while ( numEntries-- )
68 file.read( buf.data(),
static_cast<long>( buf.size() ) );
69 lazperf::LeExtractor s( buf.data(), buf.size() );
73 s >> d >> x >> y >> z;
83bool QgsCopcUpdate::write(
const QString &outputFilename,
const QHash<QgsPointCloudNodeId, UpdatedChunk> &updatedChunks )
86 m_f.open( QgsLazDecoder::toNativePath( outputFilename ), std::ios::out | std::ios::binary );
91 std::vector<char> allHeaderData;
92 allHeaderData.resize( mHeader.point_offset );
93 mFile.read( allHeaderData.data(),
static_cast<long>( allHeaderData.size() ) );
94 m_f.write( allHeaderData.data(),
static_cast<long>( allHeaderData.size() ) );
96 m_f.write(
"XXXXXXXX", 8 );
98 uint64_t currentChunkOffset = mHeader.point_offset + 8;
99 mFile.seekg(
static_cast<long>( currentChunkOffset ) );
107 QHash<QgsPointCloudNodeId, uint64_t> voxelToNewOffset;
110 for ( lazperf::chunk ch : mChunks )
112 Q_ASSERT( mOffsetToVoxel.contains( currentChunkOffset ) );
115 uint64_t newOffset = m_f.tellp();
116 voxelToNewOffset[n] = newOffset;
119 if ( updatedChunks.contains( n ) )
124 mFile.seekg(
static_cast<long>( mFile.tellg() ) +
static_cast<long>( ch.offset ) );
129 mChunks[chIndex].offset = updatedChunk.
chunkData.size();
134 std::vector<char> originalChunkData;
135 originalChunkData.resize( ch.offset );
136 mFile.read( originalChunkData.data(),
static_cast<long>( originalChunkData.size() ) );
137 m_f.write( originalChunkData.data(),
static_cast<long>( originalChunkData.size() ) );
140 currentChunkOffset += ch.offset;
146 const uint64_t newChunkTableOffset = m_f.tellp();
148 m_f.write(
"\0\0\0\0", 4 );
149 m_f.write(
reinterpret_cast<const char *
>( &mChunkCount ),
sizeof( mChunkCount ) );
151 lazperf::OutFileStream outStream( m_f );
152 lazperf::compress_chunk_table( outStream.cb(), mChunks,
true );
160 const long hierPositionShift =
static_cast<long>( m_f.tellp() ) + 60 -
static_cast<long>( mHierarchyOffset );
163 const int nEntries =
static_cast<int>( mHierarchyBlob.size() / 32 );
164 for (
int i = 0; i < nEntries; ++i )
170 Q_ASSERT( voxelToNewOffset.contains( e.
key ) );
173 if ( updatedChunks.contains( e.
key ) )
175 uint64_t newByteSize = updatedChunks[e.
key].chunkData.size();
176 e.
byteSize =
static_cast<int>( newByteSize );
182 e.
offset += hierPositionShift;
193 const uint64_t newEvlrOffset = m_f.tellp();
195 lazperf::evlr_header outCopcHierEvlr;
196 outCopcHierEvlr.reserved = 0;
197 outCopcHierEvlr.user_id =
"copc";
198 outCopcHierEvlr.record_id = 1000;
199 outCopcHierEvlr.data_length = mHierarchyBlob.size();
200 outCopcHierEvlr.description =
"EPT Hierarchy";
202 outCopcHierEvlr.write( m_f );
203 m_f.write( mHierarchyBlob.data(),
static_cast<long>( mHierarchyBlob.size() ) );
207 for (
size_t i = 0; i < mEvlrHeaders.size(); ++i )
209 lazperf::evlr_header evlrHeader = mEvlrHeaders[i];
210 std::vector<char> evlrBody = mEvlrData[i];
212 evlrHeader.write( m_f );
213 m_f.write( evlrBody.data(),
static_cast<long>( evlrBody.size() ) );
219 m_f.write(
reinterpret_cast<const char *
>( &newEvlrOffset ), 8 );
221 const uint64_t newRootHierOffset = mCopcVlr.root_hier_offset + hierPositionShift;
223 m_f.write(
reinterpret_cast<const char *
>( &newRootHierOffset ), 8 );
225 m_f.seekp( mHeader.point_offset );
226 m_f.write(
reinterpret_cast<const char *
>( &newChunkTableOffset ), 8 );
235 mInputFilename = inputFilename;
237 mFile.open( QgsLazDecoder::toNativePath( inputFilename ), std::ios::binary | std::ios::in );
240 mErrorMessage = QStringLiteral(
"Could not open file for reading: %1" ).arg( inputFilename );
254bool QgsCopcUpdate::readHeader()
257 mHeader = lazperf::header14::create( mFile );
260 mErrorMessage = QStringLiteral(
"Error reading COPC header" );
264 lazperf::vlr_header vh = lazperf::vlr_header::create( mFile );
265 mCopcVlr = lazperf::copc_info_vlr::create( mFile );
267 int baseCount = lazperf::baseCount( mHeader.point_format_id );
268 if ( baseCount == 0 )
270 mErrorMessage = QStringLiteral(
"Bad point record format: %1" ).arg( mHeader.point_format_id );
278void QgsCopcUpdate::readChunkTable()
280 uint64_t chunkTableOffset;
282 mFile.seekg( mHeader.point_offset );
283 mFile.read(
reinterpret_cast<char *
>( &chunkTableOffset ),
sizeof( chunkTableOffset ) );
284 mFile.seekg(
static_cast<long>( chunkTableOffset ) + 4 );
285 mFile.read(
reinterpret_cast<char *
>( &mChunkCount ),
sizeof( mChunkCount ) );
291 bool variable =
true;
294 std::ifstream copcFileTmp;
295 copcFileTmp.open( QgsLazDecoder::toNativePath( mInputFilename ), std::ios::binary | std::ios::in );
296 copcFileTmp.seekg( mFile.tellg() );
297 lazperf::InFileStream copcInFileStream( copcFileTmp );
299 mChunks = lazperf::decompress_chunk_table( copcInFileStream.cb(), mChunkCount, variable );
300 std::vector<lazperf::chunk> chunksWithAbsoluteOffsets;
301 uint64_t nextChunkOffset = mHeader.point_offset + 8;
302 for ( lazperf::chunk ch : mChunks )
304 chunksWithAbsoluteOffsets.push_back( {nextChunkOffset, ch.count} );
305 nextChunkOffset += ch.offset;
310void QgsCopcUpdate::readHierarchy()
315 childEntriesToProcess.push_back( HierarchyEntry
317 QgsPointCloudNodeId( 0, 0, 0, 0 ),
318 mCopcVlr.root_hier_offset,
319 static_cast<int32_t
>( mCopcVlr.root_hier_size ),
322 while ( !childEntriesToProcess.empty() )
324 HierarchyEntry childEntry = childEntriesToProcess.back();
325 childEntriesToProcess.pop_back();
329 for (
const HierarchyEntry &e : page )
331 if ( e.pointCount > 0 )
333 Q_ASSERT( !mOffsetToVoxel.contains( e.offset ) );
334 mOffsetToVoxel[e.offset] = e.key;
336 else if ( e.pointCount < 0 )
338 childEntriesToProcess.push_back( e );
343 lazperf::evlr_header evlr1;
344 mFile.seekg(
static_cast<long>( mHeader.evlr_offset ) );
346 mHierarchyOffset = 0;
348 for ( uint32_t i = 0; i < mHeader.evlr_count; ++i )
351 if ( evlr1.user_id ==
"copc" && evlr1.record_id == 1000 )
353 mHierarchyBlob.resize( evlr1.data_length );
354 mHierarchyOffset = mFile.tellg();
355 mFile.read( mHierarchyBlob.data(),
static_cast<long>( evlr1.data_length ) );
360 mEvlrHeaders.push_back( evlr1 );
361 std::vector<char> evlrBlob;
362 evlrBlob.resize( evlr1.data_length );
363 mFile.read( evlrBlob.data(),
static_cast<long>( evlrBlob.size() ) );
364 mEvlrData.emplace_back( std::move( evlrBlob ) );
368 Q_ASSERT( !mHierarchyBlob.empty() );
373 const QString &outputFilename,
374 const QHash<QgsPointCloudNodeId, UpdatedChunk> &updatedChunks,
378 if ( !copcUpdate.
read( inputFilename ) )
385 if ( !copcUpdate.
write( outputFilename, updatedChunks ) )
Handles update operations to a COPC file.
QString errorMessage() const
Returns error message.
static bool writeUpdatedFile(const QString &inputFilename, const QString &outputFilename, const QHash< QgsPointCloudNodeId, UpdatedChunk > &updatedChunks, QString *errorMessage=nullptr)
Convenience function to do the whole process in one go: load a COPC file, then write a new COPC file ...
bool read(const QString &inputFilename)
Reads input COPC file and initializes all the members.
bool write(const QString &outputFilename, const QHash< QgsPointCloudNodeId, UpdatedChunk > &updatedChunks)
Writes a COPC file with updated chunks.
Represents an indexed point cloud node's position in octree.
QVector< HierarchyEntry > HierarchyEntries
HierarchyEntries getHierarchyPage(std::ifstream &file, uint64_t offset, uint64_t size)
Keeps one entry of COPC hierarchy.
QgsPointCloudNodeId key
Key of the data to which this entry corresponds.
uint64_t offset
Absolute offset to the data chunk if the pointCount > 0.
int32_t pointCount
If > 0, represents the number of points in the data chunk.
int32_t byteSize
Size of the data chunk in bytes (compressed size) if the pointCount > 0.
Keeps information how points of a single chunk has been modified.
QByteArray chunkData
Data of the chunk (compressed already with LAZ compressor).