QGIS API Documentation 3.99.0-Master (2fe06baccd8)
Loading...
Searching...
No Matches
qgscopcupdate.cpp
Go to the documentation of this file.
1/***************************************************************************
2 qgscopcupdate.cpp
3 ---------------------
4 begin : January 2025
5 copyright : (C) 2025 by Martin Dobias
6 email : wonder dot sk at gmail dot com
7 ***************************************************************************
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 ***************************************************************************/
15
16#include "qgscopcupdate.h"
17
18#include <fstream>
19#include <iostream>
20
21#include "lazperf/Extractor.hpp"
22#include "lazperf/filestream.hpp"
23#include "lazperf/header.hpp"
24#include "lazperf/readers.hpp"
25#include "lazperf/vlr.hpp"
26#include "lazperf/writers.hpp"
27#include "qgslazdecoder.h"
28
31{
34
40 uint64_t offset;
41
47 int32_t byteSize;
48
54 int32_t pointCount;
55};
56
57typedef QVector<HierarchyEntry> HierarchyEntries;
58
59
60HierarchyEntries getHierarchyPage( std::ifstream &file, uint64_t offset, uint64_t size )
61{
63 std::vector<char> buf( 32 );
64 int numEntries = static_cast<int>( size / 32 );
65 file.seekg( static_cast<int64_t>( offset ) );
66 while ( numEntries-- )
67 {
68 file.read( buf.data(), static_cast<long>( buf.size() ) );
69 lazperf::LeExtractor s( buf.data(), buf.size() );
70
72 int d, x, y, z;
73 s >> d >> x >> y >> z;
74 s >> e.offset >> e.byteSize >> e.pointCount;
75 e.key = QgsPointCloudNodeId( d, x, y, z );
76
77 page.push_back( e );
78 }
79 return page;
80}
81
82
83bool QgsCopcUpdate::write( const QString &outputFilename, const QHash<QgsPointCloudNodeId, UpdatedChunk> &updatedChunks )
84{
85 std::ofstream m_f;
86 m_f.open( QgsLazDecoder::toNativePath( outputFilename ), std::ios::out | std::ios::binary );
87
88 // write header and all VLRs all the way to point offset
89 // (then we patch what we need)
90 mFile.seekg( 0 );
91 std::vector<char> allHeaderData;
92 allHeaderData.resize( mHeader.point_offset );
93 mFile.read( allHeaderData.data(), static_cast<long>( allHeaderData.size() ) );
94 m_f.write( allHeaderData.data(), static_cast<long>( allHeaderData.size() ) );
95
96 m_f.write( "XXXXXXXX", 8 ); // placeholder for chunk table offset
97
98 uint64_t currentChunkOffset = mHeader.point_offset + 8;
99 mFile.seekg( static_cast<long>( currentChunkOffset ) ); // this is where first chunk starts
100
101 // now, let's write chunks:
102 // - iterate through original chunk table, write out chunks
103 // - if chunk is updated, use that instead
104 // - keep updating hierarchy as we go
105 // - keep updating chunk table as we go
106
107 QHash<QgsPointCloudNodeId, uint64_t> voxelToNewOffset;
108
109 int chIndex = 0;
110 for ( lazperf::chunk ch : mChunks )
111 {
112 Q_ASSERT( mOffsetToVoxel.contains( currentChunkOffset ) );
113 QgsPointCloudNodeId n = mOffsetToVoxel[currentChunkOffset];
114
115 uint64_t newOffset = m_f.tellp();
116 voxelToNewOffset[n] = newOffset;
117
118 // check whether the chunk is modified
119 if ( updatedChunks.contains( n ) )
120 {
121 const UpdatedChunk &updatedChunk = updatedChunks[n];
122
123 // use updated one and skip in the original file
124 mFile.seekg( static_cast<long>( mFile.tellg() ) + static_cast<long>( ch.offset ) );
125
126 m_f.write( updatedChunk.chunkData.constData(), updatedChunk.chunkData.size() );
127
128 // update sizes
129 mChunks[chIndex].offset = updatedChunk.chunkData.size();
130 }
131 else
132 {
133 // use as is
134 std::vector<char> originalChunkData;
135 originalChunkData.resize( ch.offset );
136 mFile.read( originalChunkData.data(), static_cast<long>( originalChunkData.size() ) );
137 m_f.write( originalChunkData.data(), static_cast<long>( originalChunkData.size() ) );
138 }
139
140 currentChunkOffset += ch.offset;
141 ++chIndex;
142 }
143
144 // write chunk table: size in bytes + point count of each chunk
145
146 const uint64_t newChunkTableOffset = m_f.tellp();
147
148 m_f.write( "\0\0\0\0", 4 ); // chunk table version
149 m_f.write( reinterpret_cast<const char *>( &mChunkCount ), sizeof( mChunkCount ) );
150
151 lazperf::OutFileStream outStream( m_f );
152 lazperf::compress_chunk_table( outStream.cb(), mChunks, true );
153
154 // update hierarchy
155
156 // NOTE: one big assumption we're doing here is that existing hierarchy pages
157 // are packed one after another, with no gaps. if that's not the case, things
158 // will break apart
159
160 const long hierPositionShift = static_cast<long>( m_f.tellp() ) + 60 - static_cast<long>( mHierarchyOffset );
161
162 HierarchyEntry *oldCopcHierarchyBlobEntries = reinterpret_cast<HierarchyEntry *>( mHierarchyBlob.data() );
163 const int nEntries = static_cast<int>( mHierarchyBlob.size() / 32 );
164 for ( int i = 0; i < nEntries; ++i )
165 {
166 HierarchyEntry &e = oldCopcHierarchyBlobEntries[i];
167 if ( e.pointCount > 0 )
168 {
169 // update entry to new offset
170 Q_ASSERT( voxelToNewOffset.contains( e.key ) );
171 e.offset = voxelToNewOffset[e.key];
172
173 if ( updatedChunks.contains( e.key ) )
174 {
175 uint64_t newByteSize = updatedChunks[e.key].chunkData.size();
176 e.byteSize = static_cast<int>( newByteSize );
177 }
178 }
179 else if ( e.pointCount < 0 )
180 {
181 // move hierarchy pages to new offset
182 e.offset += hierPositionShift;
183 }
184 else // pointCount == 0
185 {
186 // nothing to do - byte size and offset should be zero
187 }
188
189 }
190
191 // write hierarchy eVLR
192
193 const uint64_t newEvlrOffset = m_f.tellp();
194
195 lazperf::evlr_header outCopcHierEvlr;
196 outCopcHierEvlr.reserved = 0;
197 outCopcHierEvlr.user_id = "copc";
198 outCopcHierEvlr.record_id = 1000;
199 outCopcHierEvlr.data_length = mHierarchyBlob.size();
200 outCopcHierEvlr.description = "EPT Hierarchy";
201
202 outCopcHierEvlr.write( m_f );
203 m_f.write( mHierarchyBlob.data(), static_cast<long>( mHierarchyBlob.size() ) );
204
205 // write other eVLRs
206
207 for ( size_t i = 0; i < mEvlrHeaders.size(); ++i )
208 {
209 lazperf::evlr_header evlrHeader = mEvlrHeaders[i];
210 std::vector<char> evlrBody = mEvlrData[i];
211
212 evlrHeader.write( m_f );
213 m_f.write( evlrBody.data(), static_cast<long>( evlrBody.size() ) );
214 }
215
216 // patch header
217
218 m_f.seekp( 235 );
219 m_f.write( reinterpret_cast<const char *>( &newEvlrOffset ), 8 );
220
221 const uint64_t newRootHierOffset = mCopcVlr.root_hier_offset + hierPositionShift;
222 m_f.seekp( 469 );
223 m_f.write( reinterpret_cast<const char *>( &newRootHierOffset ), 8 );
224
225 m_f.seekp( mHeader.point_offset );
226 m_f.write( reinterpret_cast<const char *>( &newChunkTableOffset ), 8 );
227
228 return true;
229}
230
231
232
233bool QgsCopcUpdate::read( const QString &inputFilename )
234{
235 mInputFilename = inputFilename;
236
237 mFile.open( QgsLazDecoder::toNativePath( inputFilename ), std::ios::binary | std::ios::in );
238 if ( mFile.fail() )
239 {
240 mErrorMessage = QStringLiteral( "Could not open file for reading: %1" ).arg( inputFilename );
241 return false;
242 }
243
244 if ( !readHeader() )
245 return false;
246
247 readChunkTable();
248 readHierarchy();
249
250 return true;
251}
252
253
254bool QgsCopcUpdate::readHeader()
255{
256 // read header and COPC VLR
257 mHeader = lazperf::header14::create( mFile );
258 if ( !mFile )
259 {
260 mErrorMessage = QStringLiteral( "Error reading COPC header" );
261 return false;
262 }
263
264 lazperf::vlr_header vh = lazperf::vlr_header::create( mFile );
265 mCopcVlr = lazperf::copc_info_vlr::create( mFile );
266
267 int baseCount = lazperf::baseCount( mHeader.point_format_id );
268 if ( baseCount == 0 )
269 {
270 mErrorMessage = QStringLiteral( "Bad point record format: %1" ).arg( mHeader.point_format_id );
271 return false;
272 }
273
274 return true;
275}
276
277
278void QgsCopcUpdate::readChunkTable()
279{
280 uint64_t chunkTableOffset;
281
282 mFile.seekg( mHeader.point_offset );
283 mFile.read( reinterpret_cast<char *>( &chunkTableOffset ), sizeof( chunkTableOffset ) );
284 mFile.seekg( static_cast<long>( chunkTableOffset ) + 4 ); // The first 4 bytes are the version, then the chunk count.
285 mFile.read( reinterpret_cast<char *>( &mChunkCount ), sizeof( mChunkCount ) );
286
287 //
288 // read chunk table
289 //
290
291 bool variable = true;
292
293 // TODO: not sure why, but after decompress_chunk_table() the input stream seems to be dead, so we create a temporary one
294 std::ifstream copcFileTmp;
295 copcFileTmp.open( QgsLazDecoder::toNativePath( mInputFilename ), std::ios::binary | std::ios::in );
296 copcFileTmp.seekg( mFile.tellg() );
297 lazperf::InFileStream copcInFileStream( copcFileTmp );
298
299 mChunks = lazperf::decompress_chunk_table( copcInFileStream.cb(), mChunkCount, variable );
300 std::vector<lazperf::chunk> chunksWithAbsoluteOffsets;
301 uint64_t nextChunkOffset = mHeader.point_offset + 8;
302 for ( lazperf::chunk ch : mChunks )
303 {
304 chunksWithAbsoluteOffsets.push_back( {nextChunkOffset, ch.count} );
305 nextChunkOffset += ch.offset;
306 }
307}
308
309
310void QgsCopcUpdate::readHierarchy()
311{
312 // get all hierarchy pages
313
314 HierarchyEntries childEntriesToProcess;
315 childEntriesToProcess.push_back( HierarchyEntry
316 {
317 QgsPointCloudNodeId( 0, 0, 0, 0 ),
318 mCopcVlr.root_hier_offset,
319 static_cast<int32_t>( mCopcVlr.root_hier_size ),
320 -1 } );
321
322 while ( !childEntriesToProcess.empty() )
323 {
324 HierarchyEntry childEntry = childEntriesToProcess.back();
325 childEntriesToProcess.pop_back();
326
327 HierarchyEntries page = getHierarchyPage( mFile, childEntry.offset, childEntry.byteSize );
328
329 for ( const HierarchyEntry &e : page )
330 {
331 if ( e.pointCount > 0 ) // it's a non-empty node
332 {
333 Q_ASSERT( !mOffsetToVoxel.contains( e.offset ) );
334 mOffsetToVoxel[e.offset] = e.key;
335 }
336 else if ( e.pointCount < 0 ) // referring to a child page
337 {
338 childEntriesToProcess.push_back( e );
339 }
340 }
341 }
342
343 lazperf::evlr_header evlr1;
344 mFile.seekg( static_cast<long>( mHeader.evlr_offset ) );
345
346 mHierarchyOffset = 0; // where the hierarchy eVLR payload starts
347
348 for ( uint32_t i = 0; i < mHeader.evlr_count; ++i )
349 {
350 evlr1.read( mFile );
351 if ( evlr1.user_id == "copc" && evlr1.record_id == 1000 )
352 {
353 mHierarchyBlob.resize( evlr1.data_length );
354 mHierarchyOffset = mFile.tellg();
355 mFile.read( mHierarchyBlob.data(), static_cast<long>( evlr1.data_length ) );
356 }
357 else
358 {
359 // keep for later
360 mEvlrHeaders.push_back( evlr1 );
361 std::vector<char> evlrBlob;
362 evlrBlob.resize( evlr1.data_length );
363 mFile.read( evlrBlob.data(), static_cast<long>( evlrBlob.size() ) );
364 mEvlrData.emplace_back( std::move( evlrBlob ) );
365 }
366 }
367
368 Q_ASSERT( !mHierarchyBlob.empty() );
369}
370
371
372bool QgsCopcUpdate::writeUpdatedFile( const QString &inputFilename,
373 const QString &outputFilename,
374 const QHash<QgsPointCloudNodeId, UpdatedChunk> &updatedChunks,
375 QString *errorMessage )
376{
377 QgsCopcUpdate copcUpdate;
378 if ( !copcUpdate.read( inputFilename ) )
379 {
380 if ( errorMessage )
381 *errorMessage = copcUpdate.errorMessage();
382 return false;
383 }
384
385 if ( !copcUpdate.write( outputFilename, updatedChunks ) )
386 {
387 if ( errorMessage )
388 *errorMessage = copcUpdate.errorMessage();
389 return false;
390 }
391
392 return true;
393}
Handles update operations to a COPC file.
QString errorMessage() const
Returns error message.
static bool writeUpdatedFile(const QString &inputFilename, const QString &outputFilename, const QHash< QgsPointCloudNodeId, UpdatedChunk > &updatedChunks, QString *errorMessage=nullptr)
Convenience function to do the whole process in one go: load a COPC file, then write a new COPC file ...
bool read(const QString &inputFilename)
Reads input COPC file and initializes all the members.
bool write(const QString &outputFilename, const QHash< QgsPointCloudNodeId, UpdatedChunk > &updatedChunks)
Writes a COPC file with updated chunks.
Represents an indexed point cloud node's position in octree.
QVector< HierarchyEntry > HierarchyEntries
HierarchyEntries getHierarchyPage(std::ifstream &file, uint64_t offset, uint64_t size)
Keeps one entry of COPC hierarchy.
QgsPointCloudNodeId key
Key of the data to which this entry corresponds.
uint64_t offset
Absolute offset to the data chunk if the pointCount > 0.
int32_t pointCount
If > 0, represents the number of points in the data chunk.
int32_t byteSize
Size of the data chunk in bytes (compressed size) if the pointCount > 0.
Keeps information how points of a single chunk has been modified.
QByteArray chunkData
Data of the chunk (compressed already with LAZ compressor).