Debugger: Also support boost 1.55 unordered_maps

Change-Id: Id9ac4ecc8c61d4224f5840df2ec0a75c3c305825
Reviewed-by: hjk <hjk@qt.io>
This commit is contained in:
hjk
2016-11-18 12:07:55 +01:00
parent 3b1308bb43
commit 3c3bccf0fb
2 changed files with 35 additions and 16 deletions

View File

@@ -109,29 +109,35 @@ def qdump__boost__posix_time__time_duration(d, value):
def qdump__boost__unordered__unordered_set(d, value): def qdump__boost__unordered__unordered_set(d, value):
innerType = value.type[0] innerType = value.type[0]
newer = value.type.size() == 6 * d.ptrSize() # 48 for boost 1.58, 40 for boost 1.48 if value.type.size() == 6 * d.ptrSize(): # 48 for boost 1.55+, 40 for 1.48
if newer: # boost 1.58 or 1.55
# boost 1.58
# bases are 3? bytes, and mlf is actually a float, but since # bases are 3? bytes, and mlf is actually a float, but since
# its followed by size_t maxload, it's # effectively padded to a size_t # its followed by size_t maxload, it's # effectively padded to a size_t
bases, bucketCount, size, mlf, maxload, buckets = value.split('tttttp') bases, bucketCount, size, mlf, maxload, buckets = value.split('tttttp')
code = 'pp{%s}' % innerType.name # Distinguish 1.58 and 1.55. 1.58 used one template argument, 1.55 two.
ittype = d.lookupType(value.type.name + '::iterator').target()
forward = len(ittype.templateArguments()) == 1
else: else:
# boost 1.48 # boost 1.48
# Values are stored before the next pointers. Determine the offset. # Values are stored before the next pointers. Determine the offset.
buckets, bucketCount, size, mlf, maxload = value.split('ptttt') buckets, bucketCount, size, mlf, maxload = value.split('ptttt')
forward = False
if forward:
# boost 1.58
code = 'pp{%s}' % innerType.name
def children(p):
while True:
p, dummy, val = d.split(code, p)
yield val
else:
# boost 1.48 or 1.55
code = '{%s}@p' % innerType.name code = '{%s}@p' % innerType.name
(pp, ssize, fields) = d.describeStruct(code) (pp, ssize, fields) = d.describeStruct(code)
offset = fields[2].offset() offset = fields[2].offset()
def children(p):
d.putItemCount(size) while True:
val, pad, p = d.split(code, p - offset)
if d.isExpanded(): yield val
p = d.extractPointer(buckets + bucketCount * d.ptrSize()) p = d.extractPointer(buckets + bucketCount * d.ptrSize())
with Children(d, size, maxNumChild=10000): d.putItems(size, children(p), maxNumChild = 10000)
for j in d.childRange():
if newer:
p, dummy, val = d.split(code, p)
else:
val, pad, p = d.split(code, p - offset)
d.putSubItem(j, val)

View File

@@ -2586,6 +2586,13 @@ class DumperBase:
return True return True
return False return False
def putItems(self, count, generator, maxNumChild=10000):
self.putItemCount(count)
if self.isExpanded():
with Children(self, count, maxNumChild=maxNumChild):
for i, val in zip(self.childRange(), generator):
self.putSubItem(i, val)
def putItem(self, value): def putItem(self, value):
self.preping('putItem') self.preping('putItem')
self.putItemX(value) self.putItemX(value)
@@ -3317,6 +3324,12 @@ class DumperBase:
def unqualified(self): def unqualified(self):
return self return self
def templateArguments(self):
tdata = self.typeData()
if tdata is None:
return self.dumper.listTemplateParameters(self.typeId)
return tdata.templateArguments
def templateArgument(self, position): def templateArgument(self, position):
tdata = self.typeData() tdata = self.typeData()
#warn('TDATA: %s' % tdata) #warn('TDATA: %s' % tdata)