Debugger: Introduce and use type ids instead of type in dumpers

Change-Id: I569d13d4f5d66cf1606b2b5d047b415659de539b
Reviewed-by: Christian Stenger <christian.stenger@qt.io>
This commit is contained in:
hjk
2016-10-25 15:32:13 +02:00
parent a6427453f4
commit bd2653fbaa
8 changed files with 1163 additions and 653 deletions

View File

@@ -117,25 +117,25 @@ def qdump__boost__unordered__unordered_set(d, value):
innerType = value.type[0] innerType = value.type[0]
bucketCount = d.extractInt(base + ptrSize) bucketCount = d.extractInt(base + ptrSize)
#warn("A BUCKET COUNT: %s" % bucketCount) #warn("A BUCKET COUNT: %s" % bucketCount)
#warn("X BUCKET COUNT: %s" % d.parseAndEvaluate("s1.table_.bucket_count_")) #warn("X BUCKET COUNT: %s" % d.parseAndEvaluate("s1.table_.bucket_count_").value())
try: try:
# boost 1.58 # boost 1.58
table = value["table_"] table = value["table_"]
bucketsAddr = table["buckets_"].integer() bucketsAddr = table["buckets_"].integer()
#warn("A BUCKETS: 0x%x" % bucketsAddr) #warn("A BUCKETS: 0x%x" % bucketsAddr)
#warn("X BUCKETS: %s" % d.parseAndEvaluate("s1.table_.buckets_")) #warn("X BUCKETS: 0x%x" % d.parseAndEvaluate("s1.table_.buckets_").pointer())
lastBucketAddr = bucketsAddr + bucketCount * ptrSize lastBucketAddr = bucketsAddr + bucketCount * ptrSize
#warn("A LAST BUCKET: 0x%x" % lastBucketAddr) #warn("A LAST BUCKET: 0x%x" % lastBucketAddr)
#warn("X LAST BUCKET: %s" % d.parseAndEvaluate("s1.table_.get_bucket(s1.table_.bucket_count_)")) #warn("X LAST BUCKET: 0x%x" % d.parseAndEvaluate("s1.table_.get_bucket(s1.table_.bucket_count_)").pointer())
previousStartAddr = lastBucketAddr previousStartAddr = lastBucketAddr
#warn("A PREVIOUS START: 0x%x" % previousStartAddr) #warn("A PREVIOUS START: 0x%x" % previousStartAddr)
#warn("X PREVIOUS START: %s" % d.parseAndEvaluate("s1.table_.get_previous_start()")) #warn("X PREVIOUS START: 0x%x" % d.parseAndEvaluate("s1.table_.get_previous_start()").pointer())
item = d.extractPointer(previousStartAddr) item = d.extractPointer(previousStartAddr)
#warn("A KEY ADDR: 0x%x" % item) #warn("A KEY ADDR: 0x%x" % item)
#warn("X KEY ADDR: %s" % d.parseAndEvaluate("s1.table_.get_previous_start()->next_")) #warn("X KEY ADDR: 0x%x" % d.parseAndEvaluate("s1.table_.get_previous_start()->next_").pointer())
item = d.extractPointer(previousStartAddr) item = d.extractPointer(previousStartAddr)
#warn("A VALUE: %x" % d.extractInt(item + ptrSize)) #warn("A VALUE: %x" % d.extractInt(item + ptrSize))
#warn("X VALUE: %s" % d.parseAndEvaluate("*(int*)(s1.table_.get_previous_start()->next_ + 1)")) #warn("X VALUE: %x" % d.parseAndEvaluate("*(int*)(s1.table_.get_previous_start()->next_ + 1)").integer())
with Children(d, size, maxNumChild=10000): with Children(d, size, maxNumChild=10000):
for j in d.childRange(): for j in d.childRange():
d.putSubItem(j, d.createValue(item + 2 * ptrSize, innerType)) d.putSubItem(j, d.createValue(item + 2 * ptrSize, innerType))

File diff suppressed because it is too large Load Diff

View File

@@ -205,17 +205,46 @@ class Dumper(DumperBase):
self.output = [] self.output = []
self.setVariableFetchingOptions(args) self.setVariableFetchingOptions(args)
def fromNativeDowncastableValue(self, nativeValue): def fromFrameValue(self, nativeValue):
val = nativeValue
if self.useDynamicType: if self.useDynamicType:
try: try:
return self.fromNativeValue(nativeValue.cast(nativeValue.dynamic_type)) val = nativeValue.cast(nativeValue.dynamic_type)
except: except:
pass pass
return self.fromNativeValue(nativeValue) return self.fromNativeValue(val)
def fromNativeValue(self, nativeValue): def fromNativeValue(self, nativeValue):
#self.check(isinstance(nativeValue, gdb.Value)) #warn("FROM NATIVE VALUE: %s" % nativeValue)
self.check(isinstance(nativeValue, gdb.Value))
nativeType = nativeValue.type nativeType = nativeValue.type
code = nativeType.code
if code == gdb.TYPE_CODE_REF:
targetType = self.fromNativeType(nativeType.target().unqualified())
val = self.createReferenceValue(toInteger(nativeValue.address), targetType)
#warn("CREATED REF: %s" % val)
return val
if code == gdb.TYPE_CODE_PTR:
targetType = self.fromNativeType(nativeType.target().unqualified())
val = self.createPointerValue(toInteger(nativeValue), targetType)
#warn("CREATED PTR 1: %s" % val)
if not nativeValue.address is None:
val.laddress = toInteger(nativeValue.address)
#warn("CREATED PTR 2: %s" % val)
return val
if code == gdb.TYPE_CODE_TYPEDEF:
targetType = nativeType.strip_typedefs().unqualified()
#warn("TARGET TYPE: %s" % targetType)
if targetType.code == gdb.TYPE_CODE_ARRAY:
val = self.Value(self)
val.laddress = toInteger(nativeValue.address)
else:
# Cast may fail (e.g for arrays, see test for Bug5799)
val = self.fromNativeValue(nativeValue.cast(targetType))
val.type = self.fromNativeType(nativeType)
#warn("CREATED TYPEDEF: %s" % val)
return val
val = self.Value(self) val = self.Value(self)
if not nativeValue.address is None: if not nativeValue.address is None:
val.laddress = toInteger(nativeValue.address) val.laddress = toInteger(nativeValue.address)
@@ -238,82 +267,131 @@ class Dumper(DumperBase):
val.ldisplay += ' (%s)' % intval val.ldisplay += ' (%s)' % intval
elif code == gdb.TYPE_CODE_COMPLEX: elif code == gdb.TYPE_CODE_COMPLEX:
val.ldisplay = str(nativeValue) val.ldisplay = str(nativeValue)
elif code == gdb.TYPE_CODE_ARRAY: #elif code == gdb.TYPE_CODE_ARRAY:
val.type.ltarget = nativeValue[0].type.unqualified() # val.type.ltarget = nativeValue[0].type.unqualified()
return val return val
def ptrSize(self):
result = gdb.lookup_type('void').pointer().sizeof
self.ptrSize = lambda: result
return result
def fromNativeType(self, nativeType): def fromNativeType(self, nativeType):
self.check(isinstance(nativeType, gdb.Type)) self.check(isinstance(nativeType, gdb.Type))
typeobj = self.Type(self) code = nativeType.code
typeobj.nativeType = nativeType.unqualified() #warn('FROM NATIVE TYPE: %s' % nativeType)
typeobj.name = str(typeobj.nativeType) #nativeType = nativeType.unqualified()
typeobj.lbitsize = nativeType.sizeof * 8
typeobj.code = { if code == gdb.TYPE_CODE_PTR:
gdb.TYPE_CODE_TYPEDEF : TypeCodeTypedef, #warn('PTR')
targetType = self.fromNativeType(nativeType.target().unqualified())
return self.createPointerType(targetType)
if code == gdb.TYPE_CODE_REF:
#warn('REF')
targetType = self.fromNativeType(nativeType.target().unqualified())
return self.createReferenceType(targetType)
if code == gdb.TYPE_CODE_ARRAY:
#warn('ARRAY')
nativeTargetType = nativeType.target().unqualified()
targetType = self.fromNativeType(nativeTargetType)
count = nativeType.sizeof // nativeTargetType.sizeof
return self.createArrayType(targetType, count)
if code == gdb.TYPE_CODE_TYPEDEF:
#warn('TYPEDEF')
nativeTargetType = nativeType.unqualified()
while nativeTargetType.code == gdb.TYPE_CODE_TYPEDEF:
nativeTargetType = nativeTargetType.strip_typedefs().unqualified()
targetType = self.fromNativeType(nativeTargetType)
return self.createTypedefedType(targetType, str(nativeType))
if code == gdb.TYPE_CODE_ERROR:
warn('Type error: %s' % nativeType)
return self.Type(self, '')
typeId = self.nativeTypeId(nativeType)
res = self.typeData.get(typeId, None)
if res is None:
tdata = self.TypeData(self)
tdata.name = str(nativeType)
tdata.typeId = typeId
tdata.lbitsize = nativeType.sizeof * 8
tdata.code = {
#gdb.TYPE_CODE_TYPEDEF : TypeCodeTypedef, # Handled above.
gdb.TYPE_CODE_METHOD : TypeCodeFunction, gdb.TYPE_CODE_METHOD : TypeCodeFunction,
gdb.TYPE_CODE_VOID : TypeCodeVoid, gdb.TYPE_CODE_VOID : TypeCodeVoid,
gdb.TYPE_CODE_FUNC : TypeCodeFunction, gdb.TYPE_CODE_FUNC : TypeCodeFunction,
gdb.TYPE_CODE_METHODPTR : TypeCodeFunction, gdb.TYPE_CODE_METHODPTR : TypeCodeFunction,
gdb.TYPE_CODE_MEMBERPTR : TypeCodeFunction, gdb.TYPE_CODE_MEMBERPTR : TypeCodeFunction,
gdb.TYPE_CODE_PTR : TypeCodePointer, #gdb.TYPE_CODE_PTR : TypeCodePointer, # Handled above.
gdb.TYPE_CODE_REF : TypeCodeReference, #gdb.TYPE_CODE_REF : TypeCodeReference, # Handled above.
gdb.TYPE_CODE_BOOL : TypeCodeIntegral, gdb.TYPE_CODE_BOOL : TypeCodeIntegral,
gdb.TYPE_CODE_CHAR : TypeCodeIntegral, gdb.TYPE_CODE_CHAR : TypeCodeIntegral,
gdb.TYPE_CODE_INT : TypeCodeIntegral, gdb.TYPE_CODE_INT : TypeCodeIntegral,
gdb.TYPE_CODE_FLT : TypeCodeFloat, gdb.TYPE_CODE_FLT : TypeCodeFloat,
gdb.TYPE_CODE_ENUM : TypeCodeEnum, gdb.TYPE_CODE_ENUM : TypeCodeEnum,
gdb.TYPE_CODE_ARRAY : TypeCodeArray, #gdb.TYPE_CODE_ARRAY : TypeCodeArray,
gdb.TYPE_CODE_STRUCT : TypeCodeStruct, gdb.TYPE_CODE_STRUCT : TypeCodeStruct,
gdb.TYPE_CODE_UNION : TypeCodeStruct, gdb.TYPE_CODE_UNION : TypeCodeStruct,
gdb.TYPE_CODE_COMPLEX : TypeCodeComplex, gdb.TYPE_CODE_COMPLEX : TypeCodeComplex,
gdb.TYPE_CODE_STRING : TypeCodeFortranString, gdb.TYPE_CODE_STRING : TypeCodeFortranString,
}[nativeType.code] }[code]
return typeobj if tdata.code == TypeCodeEnum:
tdata.enumDisplay = lambda intval: self.nativeTypeEnumDisplay(nativeType, intval)
self.registerType(typeId, tdata) # Prevent recursion in fields.
tdata.lfields = self.listFields(nativeType, self.Type(self, typeId))
tdata.templateArguments = self.listTemplateParameters(nativeType)
self.registerType(typeId, tdata) # Fix up fields and template args
# warn('CREATE TYPE: %s' % typeId)
#else:
# warn('REUSE TYPE: %s' % typeId)
return self.Type(self, typeId)
def nativeTypeDereference(self, nativeType): def listTemplateParameters(self, nativeType):
return self.fromNativeType(nativeType.strip_typedefs().target()) targs = []
pos = 0
def nativeTypeUnqualified(self, nativeType): while True:
return self.fromNativeType(nativeType.unqualified()) try:
targ = nativeType.template_argument(pos)
def nativeTypePointer(self, nativeType): except:
return self.fromNativeType(nativeType.pointer()) break
if isinstance(targ, gdb.Type):
def nativeTypeTarget(self, nativeType): targs.append(self.fromNativeType(targ.unqualified()))
while nativeType.code == gdb.TYPE_CODE_TYPEDEF: elif isinstance(targ, gdb.Value):
nativeType = nativeType.strip_typedefs().unqualified() #targs.append(self.fromNativeValue(targ))
return self.fromNativeType(nativeType.target()) targs.append(self.fromNativeValue(targ).value())
else:
def nativeTypeFirstBase(self, nativeType): error('CRAP')
nativeFields = nativeType.fields() pos += 1
if len(nativeFields) and nativeFields[0].is_base_class: return targs
return self.fromNativeType(nativeFields[0].type)
def nativeTypeEnumDisplay(self, nativeType, intval): def nativeTypeEnumDisplay(self, nativeType, intval):
try: try:
val = gdb.parse_and_eval("(%s)%d" % (nativeType, intval)) val = gdb.parse_and_eval('(%s)%d' % (nativeType, intval))
return "%s (%d)" % (val, intval) return '%s (%d)' % (val, intval)
except: except:
return "%d" % intval return '%d' % intval
def nativeTypeFields(self, nativeType): def nativeTypeId(self, nativeType):
if nativeType.code == gdb.TYPE_CODE_TYPEDEF: name = str(nativeType)
return self.nativeTypeFields(nativeType.strip_typedefs()) if len(name) == 0:
c = '0'
elif name == 'struct {...}':
c = 's'
elif name == 'union {...}':
c = 'u'
else:
return name
typeId = c + ''.join(['{%s:%s}' % (f.name, self.nativeTypeId(f.type)) for f in nativeType.fields()])
return typeId
def listFields(self, nativeType, parentType):
#if nativeType.code == gdb.TYPE_CODE_TYPEDEF:
# return self.listFields(nativeType.strip_typedefs(), parentType)
fields = [] fields = []
if nativeType.code == gdb.TYPE_CODE_ARRAY:
# An array.
typeobj = nativeType.strip_typedefs()
innerType = typeobj.target()
for i in xrange(int(typeobj.sizeof / innerType.sizeof)):
field = self.Field(self)
field.ltype = self.fromNativeType(innerType)
field.parentType = self.fromNativeType(nativeType)
field.isBaseClass = False
field.lbitsize = innerType.sizeof
field.lbitpos = i * innerType.sizeof * 8
fields.append(field)
return fields
if not nativeType.code in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION): if not nativeType.code in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION):
return fields return fields
@@ -321,18 +399,20 @@ class Dumper(DumperBase):
nativeIndex = 0 nativeIndex = 0
baseIndex = 0 baseIndex = 0
nativeFields = nativeType.fields() nativeFields = nativeType.fields()
#warn("NATIVE FIELDS: %s" % nativeFields) #warn('NATIVE FIELDS: %s' % nativeFields)
anonNumber = 0
for nativeField in nativeFields: for nativeField in nativeFields:
#warn("FIELD: %s" % nativeField) #warn('FIELD: %s' % nativeField)
#warn(" DIR: %s" % dir(nativeField)) #warn(' DIR: %s' % dir(nativeField))
#warn(" BITSIZE: %s" % nativeField.bitsize) #warn(' BITSIZE: %s' % nativeField.bitsize)
#warn(" ARTIFICIAL: %s" % nativeField.artificial) #warn(' ARTIFICIAL: %s' % nativeField.artificial)
#warn("FIELD NAME: %s" % nativeField.name) #warn('FIELD NAME: %s' % nativeField.name)
#warn("FIELD TYPE: %s" % nativeField.type) #warn('FIELD TYPE: %s' % nativeField.type)
#warn('FIELD TYPE ID: %s' % self.nativeTypeId(nativeField.type))
#self.check(isinstance(nativeField, gdb.Field)) #self.check(isinstance(nativeField, gdb.Field))
field = self.Field(self) field = self.Field(self)
field.ltype = self.fromNativeType(nativeField.type) field.ltype = self.fromNativeType(nativeField.type.unqualified())
field.parentType = self.fromNativeType(nativeType) field.parentType = parentType
field.name = nativeField.name field.name = nativeField.name
field.isBaseClass = nativeField.is_base_class field.isBaseClass = nativeField.is_base_class
if hasattr(nativeField, 'bitpos'): if hasattr(nativeField, 'bitpos'):
@@ -356,36 +436,31 @@ class Dumper(DumperBase):
# Something without a name. # Something without a name.
# Anonymous union? We need a dummy name to distinguish # Anonymous union? We need a dummy name to distinguish
# multiple anonymous unions in the struct. # multiple anonymous unions in the struct.
self.anonNumber += 1 anonNumber += 1
field.name = "#%s" % self.anonNumber field.name = '#%s' % anonNumber
else: else:
# Normal named field. # Normal named field.
field.name = nativeField.name field.name = nativeField.name
field.nativeIndex = nativeIndex field.nativeIndex = nativeIndex
#warn('FIELD RESULT: %s' % field)
fields.append(field) fields.append(field)
nativeIndex += 1 nativeIndex += 1
#warn("FIELDS: %s" % fields) #warn('FIELDS: %s' % fields)
return fields return fields
def nativeTypeStripTypedefs(self, typeobj):
typeobj = typeobj.unqualified()
while typeobj.code == gdb.TYPE_CODE_TYPEDEF:
typeobj = typeobj.strip_typedefs().unqualified()
return self.fromNativeType(typeobj)
def listOfLocals(self, partialVar): def listOfLocals(self, partialVar):
frame = gdb.selected_frame() frame = gdb.selected_frame()
try: try:
block = frame.block() block = frame.block()
#warn("BLOCK: %s " % block) #warn('BLOCK: %s ' % block)
except RuntimeError as error: except RuntimeError as error:
#warn("BLOCK IN FRAME NOT ACCESSIBLE: %s" % error) #warn('BLOCK IN FRAME NOT ACCESSIBLE: %s' % error)
return [] return []
except: except:
warn("BLOCK NOT ACCESSIBLE FOR UNKNOWN REASONS") warn('BLOCK NOT ACCESSIBLE FOR UNKNOWN REASONS')
return [] return []
items = [] items = []
@@ -409,10 +484,18 @@ class Dumper(DumperBase):
# "NotImplementedError: Symbol type not yet supported in # "NotImplementedError: Symbol type not yet supported in
# Python scripts." # Python scripts."
#warn("SYMBOL %s (%s, %s)): " % (symbol, name, symbol.name)) #warn("SYMBOL %s (%s, %s)): " % (symbol, name, symbol.name))
try: if False and self.passExceptions:
value = self.fromNativeDowncastableValue(frame.read_var(name, block)) value = self.fromFrameValue(frame.read_var(name, block))
#warn("READ 1: %s" % value)
value.name = name value.name = name
#warn("READ 1: %s" % value.stringify())
items.append(value)
continue
try:
# Same as above, but for production.
value = self.fromFrameValue(frame.read_var(name, block))
value.name = name
#warn("READ 1: %s" % value.stringify())
items.append(value) items.append(value)
continue continue
except: except:
@@ -420,7 +503,7 @@ class Dumper(DumperBase):
try: try:
#warn("READ 2: %s" % item.value) #warn("READ 2: %s" % item.value)
value = self.fromNativeDowncastableValue(frame.read_var(name)) value = self.fromFrameValue(frame.read_var(name))
value.name = name value.name = name
items.append(value) items.append(value)
continue continue
@@ -435,7 +518,8 @@ class Dumper(DumperBase):
try: try:
#warn("READ 3: %s %s" % (name, item.value)) #warn("READ 3: %s %s" % (name, item.value))
#warn("ITEM 3: %s" % item.value) #warn("ITEM 3: %s" % item.value)
value = self.fromNativeDowncastableValue(gdb.parse_and_eval(name)) value = self.fromFrameValue(gdb.parse_and_eval(name))
value.name = name
items.append(value) items.append(value)
except: except:
# Can happen in inlined code (see last line of # Can happen in inlined code (see last line of
@@ -550,10 +634,11 @@ class Dumper(DumperBase):
exp = "((%s*)0x%x)->%s(%s)" % (typeName, addr, function, arg) exp = "((%s*)0x%x)->%s(%s)" % (typeName, addr, function, arg)
#warn("CALL: %s" % exp) #warn("CALL: %s" % exp)
result = gdb.parse_and_eval(exp) result = gdb.parse_and_eval(exp)
#warn(" -> %s" % result) warn(" -> %s" % result)
res = self.fromNativeValue(result)
if not value.address(): if not value.address():
gdb.parse_and_eval("free((void*)0x%x)" % addr) gdb.parse_and_eval("free((void*)0x%x)" % addr)
return self.fromNativeValue(result) return res
def makeExpression(self, value): def makeExpression(self, value):
typename = "::" + value.type.name typename = "::" + value.type.name
@@ -573,13 +658,6 @@ class Dumper(DumperBase):
#warn(" VALUE: %s" % value) #warn(" VALUE: %s" % value)
return value return value
def nativeTypeTemplateArgument(self, nativeType, position, numeric):
#warn("NATIVE TYPE: %s" % dir(nativeType))
arg = nativeType.template_argument(position)
if numeric:
return int(str(arg))
return self.fromNativeType(arg)
def pokeValue(self, value): def pokeValue(self, value):
# Allocates inferior memory and copies the contents of value. # Allocates inferior memory and copies the contents of value.
# Returns a pointer to the copy. # Returns a pointer to the copy.
@@ -885,9 +963,6 @@ class Dumper(DumperBase):
def enumExpression(self, enumType, enumValue): def enumExpression(self, enumType, enumValue):
return self.qtNamespace() + "Qt::" + enumValue return self.qtNamespace() + "Qt::" + enumValue
def lookupType(self, typeName):
return self.fromNativeType(self.lookupNativeType(typeName))
def lookupNativeType(self, typeName): def lookupNativeType(self, typeName):
nativeType = self.lookupNativeTypeHelper(typeName) nativeType = self.lookupNativeTypeHelper(typeName)
if not nativeType is None: if not nativeType is None:
@@ -1154,7 +1229,7 @@ class CliDumper(Dumper):
def putNumChild(self, numchild): def putNumChild(self, numchild):
pass pass
def putOriginalAddress(self, value): def putOriginalAddress(self, address):
pass pass
def fetchVariables(self, args): def fetchVariables(self, args):
@@ -1205,4 +1280,4 @@ class InterpreterMessageBreakpoint(gdb.Breakpoint):
print("Interpreter event received.") print("Interpreter event received.")
return theDumper.handleInterpreterMessage() return theDumper.handleInterpreterMessage()
InterpreterMessageBreakpoint() #InterpreterMessageBreakpoint()

File diff suppressed because it is too large Load Diff

View File

@@ -121,7 +121,7 @@ def qdump__Eigen__Matrix(d, value):
storage = value['m_storage'] storage = value['m_storage']
nrows = storage['m_rows'].integer() if argRow == -1 else argRow nrows = storage['m_rows'].integer() if argRow == -1 else argRow
ncols = storage['m_cols'].integer() if argCol == -1 else argCol ncols = storage['m_cols'].integer() if argCol == -1 else argCol
p = storage['m_data'].integer() p = storage['m_data'].pointer()
innerSize = innerType.size() innerSize = innerType.size()
d.putValue('(%s x %s), %s' % (nrows, ncols, ['ColumnMajor', 'RowMajor'][rowMajor])) d.putValue('(%s x %s), %s' % (nrows, ncols, ['ColumnMajor', 'RowMajor'][rowMajor]))
d.putField('keeporder', '1') d.putField('keeporder', '1')

View File

@@ -152,7 +152,7 @@ def qdump_X_QModelIndex(d, value):
except: except:
p = value['i'] p = value['i']
m = value['m'] m = value['m']
if m.integer() == 0 or r < 0 or c < 0: if m.pointer() == 0 or r < 0 or c < 0:
d.putValue('(invalid)') d.putValue('(invalid)')
d.putPlainChildren(value) d.putPlainChildren(value)
return return
@@ -196,7 +196,7 @@ def qdump_X_QModelIndex(d, value):
def qdump__QDate(d, value): def qdump__QDate(d, value):
jd = value.integer() jd = value.pointer()
if jd: if jd:
d.putValue(jd, 'juliandate') d.putValue(jd, 'juliandate')
d.putNumChild(1) d.putNumChild(1)
@@ -332,6 +332,7 @@ def qdump__QDateTime(d, value):
d.putNumChild(0) d.putNumChild(0)
return return
d.putNumChild(1)
if d.isExpanded(): if d.isExpanded():
with Children(d): with Children(d):
ns = d.qtNamespace() ns = d.qtNamespace()
@@ -1120,7 +1121,7 @@ def qdump__QRegion(d, value):
def qdump__QScopedPointer(d, value): def qdump__QScopedPointer(d, value):
if value.integer() == 0: if value.pointer() == 0:
d.putValue('(null)') d.putValue('(null)')
d.putNumChild(0) d.putNumChild(0)
else: else:
@@ -1187,13 +1188,13 @@ def qdump__QSet(d, value):
def qdump__QSharedData(d, value): def qdump__QSharedData(d, value):
d.putValue('ref: %s' % d.extractInt(value['ref'].address)) d.putValue('ref: %s' % value.to('i'))
d.putNumChild(0) d.putNumChild(0)
def qdump__QSharedDataPointer(d, value): def qdump__QSharedDataPointer(d, value):
d_ptr = value['d'] d_ptr = value['d']
if d_ptr.integer() == 0: if d_ptr.pointer() == 0:
d.putValue('(null)') d.putValue('(null)')
d.putNumChild(0) d.putNumChild(0)
else: else:
@@ -1206,7 +1207,7 @@ def qdump__QSharedDataPointer(d, value):
d.putPlainChildren(value) d.putPlainChildren(value)
return return
d.putBetterType(d.currentType) d.putBetterType(d.currentType)
d.putItem(d_ptr.cast(innerType.pointer()).dereference()) d.putItem(d_ptr.dereference())
@@ -1404,6 +1405,7 @@ def qdump__QUrl(d, value):
if displayFormat == SeparateFormat: if displayFormat == SeparateFormat:
d.putDisplay('utf16:separate', url) d.putDisplay('utf16:separate', url)
d.putNumChild(1)
if d.isExpanded(): if d.isExpanded():
with Children(d): with Children(d):
d.putIntItem('port', port) d.putIntItem('port', port)
@@ -1684,12 +1686,12 @@ def qdump__QVariant(d, value):
ptr = p.pointer() ptr = p.pointer()
(elided, blob) = d.encodeCArray(ptr, 1, 100) (elided, blob) = d.encodeCArray(ptr, 1, 100)
typeName = d.hexdecode(blob) typeName = d.hexdecode(blob)
base = data.extractPointer()
# Prefer namespaced version. # Prefer namespaced version.
if len(ns) > 0: if len(ns) > 0:
if not d.lookupNativeType(ns + typeName) is None: if not d.lookupNativeType(ns + typeName) is None:
typeName = ns + typeName typeName = ns + typeName
d.putSubItem('data', d.createValue(base, d.createType(typeName))) data.type = d.createType(typeName + ' *')
d.putSubItem('data', data)
if not typeName is None: if not typeName is None:
d.putBetterType('%sQVariant (%s)' % (ns, typeName)) d.putBetterType('%sQVariant (%s)' % (ns, typeName))
return None return None
@@ -1706,7 +1708,7 @@ def qedit__QVector(d, value, data):
base = value['d'].address() + offset base = value['d'].address() + offset
except: except:
# Qt 4. # Qt 4.
base = value['p']['array'].integer() base = value['p']['array'].pointer()
d.setValues(base, innerType, values) d.setValues(base, innerType, values)
@@ -1766,6 +1768,7 @@ def qdump_QWeakPointerHelper(d, value, isWeak):
def qdump__QXmlAttributes__Attribute(d, value): def qdump__QXmlAttributes__Attribute(d, value):
d.putEmptyValue() d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded(): if d.isExpanded():
with Children(d): with Children(d):
(qname, uri, localname, val) = value.split('{QString}' * 4) (qname, uri, localname, val) = value.split('{QString}' * 4)
@@ -2345,7 +2348,7 @@ def qdump__QScriptValue(d, value):
#d.putEmptyValue() #d.putEmptyValue()
dd = value['d_ptr']['d'] dd = value['d_ptr']['d']
ns = d.qtNamespace() ns = d.qtNamespace()
if dd.integer() == 0: if dd.pointer() == 0:
d.putValue('(invalid)') d.putValue('(invalid)')
d.putNumChild(0) d.putNumChild(0)
return return
@@ -2621,9 +2624,9 @@ def qdump__QJsonValue(d, value):
def qdump__QJsonArray(d, value): def qdump__QJsonArray(d, value):
qdumpHelper_QJsonArray(d, value['d'].integer(), value['a'].integer()) qdumpHelper_QJsonArray(d, value['d'].pointer(), value['a'].pointer())
def qdump__QJsonObject(d, value): def qdump__QJsonObject(d, value):
qdumpHelper_QJsonObject(d, value['d'].integer(), value['o'].integer()) qdumpHelper_QJsonObject(d, value['d'].pointer(), value['o'].pointer())

View File

@@ -76,17 +76,17 @@ def qdump__std__deque(d, value):
impl = value["_M_impl"] impl = value["_M_impl"]
start = impl["_M_start"] start = impl["_M_start"]
finish = impl["_M_finish"] finish = impl["_M_finish"]
size = bufsize * int((finish["_M_node"].integer() - start["_M_node"].integer()) / d.ptrSize() - 1) size = bufsize * ((finish["_M_node"].pointer() - start["_M_node"].pointer()) // d.ptrSize() - 1)
size += int((finish["_M_cur"].integer() - finish["_M_first"].integer()) / innerSize) size += ((finish["_M_cur"].pointer() - finish["_M_first"].pointer()) // innerSize)
size += int((start["_M_last"].integer() - start["_M_cur"].integer()) / innerSize) size += ((start["_M_last"].pointer() - start["_M_cur"].pointer()) // innerSize)
d.check(0 <= size and size <= 1000 * 1000 * 1000) d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size) d.putItemCount(size)
if d.isExpanded(): if d.isExpanded():
with Children(d, size, maxNumChild=2000, childType=innerType): with Children(d, size, maxNumChild=2000, childType=innerType):
pcur = start["_M_cur"].integer() pcur = start["_M_cur"].pointer()
pfirst = start["_M_first"] pfirst = start["_M_first"]
plast = start["_M_last"].integer() plast = start["_M_last"].pointer()
pnode = start["_M_node"] pnode = start["_M_node"]
for i in d.childRange(): for i in d.childRange():
d.putSubItem(i, d.createValue(pcur, innerType)) d.putSubItem(i, d.createValue(pcur, innerType))
@@ -99,7 +99,7 @@ def qdump__std__deque(d, value):
#warn("NEWNODE: 0x%x %s" % (newnode.pointer(), newnode)) #warn("NEWNODE: 0x%x %s" % (newnode.pointer(), newnode))
pnode = newnode pnode = newnode
#warn("PNODE 2: 0x%x %s" % (pnode.pointer(), pnode)) #warn("PNODE 2: 0x%x %s" % (pnode.pointer(), pnode))
pfirst = newnode.dereference().integer() pfirst = newnode.dereference().pointer()
plast = pfirst + bufsize * d.ptrSize() plast = pfirst + bufsize * d.ptrSize()
pcur = pfirst pcur = pfirst
@@ -237,7 +237,6 @@ def qdump__std__map(d, value):
if d.isExpanded(): if d.isExpanded():
pairType = value.type[3][0] pairType = value.type[3][0]
pairPointer = pairType.pointer()
with PairedChildren(d, size, pairType=pairType, maxNumChild=1000): with PairedChildren(d, size, pairType=pairType, maxNumChild=1000):
node = value["_M_t"]["_M_impl"]["_M_header"]["_M_left"] node = value["_M_t"]["_M_impl"]["_M_header"]["_M_left"]
nodeSize = node.dereference().type.size() nodeSize = node.dereference().type.size()
@@ -245,10 +244,10 @@ def qdump__std__map(d, value):
for i in d.childRange(): for i in d.childRange():
(pad1, key, pad2, value) = d.split(typeCode, node.pointer() + nodeSize) (pad1, key, pad2, value) = d.split(typeCode, node.pointer() + nodeSize)
d.putPairItem(i, (key, value)) d.putPairItem(i, (key, value))
if node["_M_right"].integer() == 0: if node["_M_right"].pointer() == 0:
parent = node["_M_parent"] parent = node["_M_parent"]
while True: while True:
if node.integer() != parent["_M_right"].integer(): if node.pointer() != parent["_M_right"].pointer():
break break
node = parent node = parent
parent = parent["_M_parent"] parent = parent["_M_parent"]
@@ -257,7 +256,7 @@ def qdump__std__map(d, value):
else: else:
node = node["_M_right"] node = node["_M_right"]
while True: while True:
if node["_M_left"].integer() == 0: if node["_M_left"].pointer() == 0:
break break
node = node["_M_left"] node = node["_M_left"]
@@ -271,13 +270,13 @@ def qdump_std__map__helper(d, size, value):
for i in d.childRange(): for i in d.childRange():
pair = node.cast(nodeType).dereference()['_Myval'] pair = node.cast(nodeType).dereference()['_Myval']
d.putPairItem(i, pair) d.putPairItem(i, pair)
if node['_Right']['_Isnil'].integer() == 0: if node['_Right']['_Isnil'].pointer() == 0:
node = node['_Right'] node = node['_Right']
while node['_Left']['_Isnil'].integer() == 0: while node['_Left']['_Isnil'].pointer() == 0:
node = node['_Left'] node = node['_Left']
else: else:
parent = node['_Parent'] parent = node['_Parent']
while node and parent['_Right']['_Isnil'].integer() == 0: while node and parent['_Right']['_Isnil'].pointer() == 0:
node = parent node = parent
parent = parent['_Parent'] parent = parent['_Parent']
if node['_Right'] != parent: if node['_Right'] != parent:
@@ -377,7 +376,7 @@ def qdump__std__set(d, value):
for i in d.childRange(): for i in d.childRange():
(pad, val) = d.split(typeCode, node.pointer() + nodeSize) (pad, val) = d.split(typeCode, node.pointer() + nodeSize)
d.putSubItem(i, val) d.putSubItem(i, val)
if node["_M_right"].integer() == 0: if node["_M_right"].pointer() == 0:
parent = node["_M_parent"] parent = node["_M_parent"]
while node == parent["_M_right"]: while node == parent["_M_right"]:
node = parent node = parent
@@ -386,7 +385,7 @@ def qdump__std__set(d, value):
node = parent node = parent
else: else:
node = node["_M_right"] node = node["_M_right"]
while node["_M_left"].integer() != 0: while node["_M_left"].pointer() != 0:
node = node["_M_left"] node = node["_M_left"]
def qdump__std__set__QNX(d, value): def qdump__std__set__QNX(d, value):
@@ -419,7 +418,7 @@ def std1TreeMin(d, node):
# return __x; # return __x;
# #
left = node['__left_'] left = node['__left_']
if left.integer(): if left.pointer():
node = left node = left
return node return node
@@ -428,7 +427,7 @@ def std1TreeIsLeftChild(d, node):
# return __x == __x->__parent_->__left_; # return __x == __x->__parent_->__left_;
# #
other = node['__parent_']['__left_'] other = node['__parent_']['__left_']
return node.integer() == other.integer() return node.pointer() == other.pointer()
def std1TreeNext(d, node): def std1TreeNext(d, node):
@@ -440,7 +439,7 @@ def std1TreeNext(d, node):
# return __x->__parent_; # return __x->__parent_;
# #
right = node['__right_'] right = node['__right_']
if right.integer(): if right.pointer():
return std1TreeMin(d, right) return std1TreeMin(d, right)
while not std1TreeIsLeftChild(d, node): while not std1TreeIsLeftChild(d, node):
node = node['__parent_'] node = node['__parent_']
@@ -612,18 +611,18 @@ def qdump__std____1__wstring(d, value):
def qdump__std__shared_ptr(d, value): def qdump__std__shared_ptr(d, value):
if d.isMsvcTarget: if d.isMsvcTarget():
i = value["_Ptr"] i = value["_Ptr"]
else: else:
i = value["_M_ptr"] i = value["_M_ptr"]
if i.integer() == 0: if i.pointer() == 0:
d.putValue("(null)") d.putValue("(null)")
d.putNumChild(0) d.putNumChild(0)
return return
with Children(d): with Children(d):
short = d.putSubItem("data", i) short = d.putSubItem("data", i.dereference())
if d.isMsvcTarget: if d.isMsvcTarget():
refcount = value["_Rep"] refcount = value["_Rep"]
d.putIntItem("usecount", refcount["_Uses"]) d.putIntItem("usecount", refcount["_Uses"])
d.putIntItem("weakcount", refcount["_Weaks"]) d.putIntItem("weakcount", refcount["_Weaks"])
@@ -635,7 +634,7 @@ def qdump__std__shared_ptr(d, value):
def qdump__std____1__shared_ptr(d, value): def qdump__std____1__shared_ptr(d, value):
i = value["__ptr_"] i = value["__ptr_"]
if i.integer() == 0: if i.pointer() == 0:
d.putValue("(null)") d.putValue("(null)")
d.putNumChild(0) d.putNumChild(0)
return return
@@ -733,7 +732,7 @@ def qdump__std__unordered_set(d, value):
d.putItemCount(size) d.putItemCount(size)
if d.isExpanded(): if d.isExpanded():
p = start.integer() p = start.pointer()
valueType = value.type[0] valueType = value.type[0]
with Children(d, size, childType=valueType): with Children(d, size, childType=valueType):
ptrSize = d.ptrSize() ptrSize = d.ptrSize()
@@ -974,10 +973,10 @@ def qdump____gnu_cxx__hash_set(d, value):
bucketFinish = buckets["_M_finish"] bucketFinish = buckets["_M_finish"]
p = bucketStart p = bucketStart
itemCount = 0 itemCount = 0
for i in xrange(int((bucketFinish.integer() - bucketStart.integer()) / d.ptrSize())): for i in xrange((bucketFinish.pointer() - bucketStart.pointer()) // d.ptrSize()):
if p.dereference().integer(): if p.dereference().pointer():
cur = p.dereference() cur = p.dereference()
while cur.integer(): while cur.pointer():
d.putSubItem(itemCount, cur["_M_val"]) d.putSubItem(itemCount, cur["_M_val"])
cur = cur["_M_next"] cur = cur["_M_next"]
itemCount += 1 itemCount += 1

View File

@@ -1903,9 +1903,9 @@ void tst_Dumpers::dumper_data()
+ Check("h1.2.value.1", "[1]", "2", "int") + Check("h1.2.value.1", "[1]", "2", "int")
+ Check("h2", "<3 items>", "@QHash<int, float>") + Check("h2", "<3 items>", "@QHash<int, float>")
+ Check("h2.0", "[0] 0", FloatValue("33"), "float") + Check("h2.0", "[0] 0", FloatValue("33"), "")
+ Check("h2.1", "[1] 22", FloatValue("22"), "float") + Check("h2.1", "[1] 22", FloatValue("22"), "")
+ Check("h2.2", "[2] 11", FloatValue("11"), "float") + Check("h2.2", "[2] 11", FloatValue("11"), "")
+ Check("h3", "<1 items>", "@QHash<@QString, int>") + Check("h3", "<1 items>", "@QHash<@QString, int>")
+ Check("h3.0.key", "key", "\"22.0\"", "@QString") + Check("h3.0.key", "key", "\"22.0\"", "@QString")
@@ -1934,7 +1934,7 @@ void tst_Dumpers::dumper_data()
+ CheckType("h7.2.value", "@QPointer<@QObject>") + CheckType("h7.2.value", "@QPointer<@QObject>")
+ Check("h8", "<3 items>", "Hash") + Check("h8", "<3 items>", "Hash")
+ Check("h8.0", "[0] 22", FloatValue("22"), "float") + Check("h8.0", "[0] 22", FloatValue("22"), "")
+ Check("it1.key", "22", "int") + Check("it1.key", "22", "int")
+ Check("it1.value", FloatValue("22"), "float") + Check("it1.value", FloatValue("22"), "float")
+ Check("it3.key", "33", "int") + Check("it3.key", "33", "int")
@@ -2347,8 +2347,8 @@ void tst_Dumpers::dumper_data()
+ Check("m1.1.value.0", "[0]", "\"22\"", "@QString") + Check("m1.1.value.0", "[0]", "\"22\"", "@QString")
+ Check("m2", "<2 items>", "@QMap<unsigned int, float>") + Check("m2", "<2 items>", "@QMap<unsigned int, float>")
+ Check("m2.0", "[0] 11", FloatValue("31.0"), "float") + Check("m2.0", "[0] 11", FloatValue("31.0"), "")
+ Check("m2.1", "[1] 22", FloatValue("32.0"), "float") + Check("m2.1", "[1] 22", FloatValue("32.0"), "")
+ Check("m3", "<2 items>", "T") + Check("m3", "<2 items>", "T")
@@ -2432,8 +2432,8 @@ void tst_Dumpers::dumper_data()
+ Check("m0", "<0 items>", "@QMultiMap<int, int>") + Check("m0", "<0 items>", "@QMultiMap<int, int>")
+ Check("m1", "<6 items>", "@QMultiMap<unsigned int, float>") + Check("m1", "<6 items>", "@QMultiMap<unsigned int, float>")
+ Check("m1.0", "[0] 11", FloatValue("11"), "float") + Check("m1.0", "[0] 11", FloatValue("11"), "")
+ Check("m1.5", "[5] 22", FloatValue("22"), "float") + Check("m1.5", "[5] 22", FloatValue("22"), "")
+ Check("m2", "<1 items>", "@QMultiMap<@QString, float>") + Check("m2", "<1 items>", "@QMultiMap<@QString, float>")
+ Check("m2.0.key", "\"22.0\"", "@QString") + Check("m2.0.key", "\"22.0\"", "@QString")
@@ -4120,23 +4120,23 @@ void tst_Dumpers::dumper_data()
"map4.insert(std::pair<unsigned int, float>(22, 25.0));\n") "map4.insert(std::pair<unsigned int, float>(22, 25.0));\n")
+ Check("map1", "<2 items>", "std::map<unsigned int, unsigned int>") + Check("map1", "<2 items>", "std::map<unsigned int, unsigned int>")
+ Check("map1.0", "[0] 11", "1", "unsigned int") + Check("map1.0", "[0] 11", "1", "")
+ Check("map1.1", "[1] 22", "2", "unsigned int") + Check("map1.1", "[1] 22", "2", "")
+ Check("map2", "<2 items>", "std::map<unsigned int, float>") + Check("map2", "<2 items>", "std::map<unsigned int, float>")
+ Check("map2.0", "[0] 11", FloatValue("11"), "float") + Check("map2.0", "[0] 11", FloatValue("11"), "")
+ Check("map2.1", "[1] 22", FloatValue("22"), "float") + Check("map2.1", "[1] 22", FloatValue("22"), "")
+ Check("map3", "<6 items>", "Map") + Check("map3", "<6 items>", "Map")
+ Check("map3.0", "[0] 11", FloatValue("11"), "float") + Check("map3.0", "[0] 11", FloatValue("11"), "")
+ Check("it1.first", "11", "int") + Check("it1.first", "11", "int")
+ Check("it1.second", FloatValue("11"), "float") + Check("it1.second", FloatValue("11"), "float")
+ Check("it6.first", "66", "int") + Check("it6.first", "66", "int")
+ Check("it6.second", FloatValue("66"), "float") + Check("it6.second", FloatValue("66"), "float")
+ Check("map4", "<5 items>", "std::multimap<unsigned int, float>") + Check("map4", "<5 items>", "std::multimap<unsigned int, float>")
+ Check("map4.0", "[0] 11", FloatValue("11"), "float") + Check("map4.0", "[0] 11", FloatValue("11"), "")
+ Check("map4.4", "[4] 22", FloatValue("25"), "float"); + Check("map4.4", "[4] 22", FloatValue("25"), "");
QTest::newRow("StdMapQt") QTest::newRow("StdMapQt")
@@ -4188,33 +4188,33 @@ void tst_Dumpers::dumper_data()
+ CoreProfile() + CoreProfile()
+ Check("map1", "<3 items>", "std::map<@QString, Foo>") + Check("map1", "<3 items>", "std::map<@QString, Foo>")
+ Check("map1.0", "[0] \"22.0\"", "", "std::pair<@QString const, Foo>") + Check("map1.0", "[0] \"22.0\"", "", "")
+ Check("map1.0.first", "\"22.0\"", "@QString") + Check("map1.0.first", "\"22.0\"", "@QString")
+ Check("map1.0.second", "", "Foo") + Check("map1.0.second", "", "Foo")
+ Check("map1.0.second.a", "22", "int") + Check("map1.0.second.a", "22", "int")
+ Check("map1.1", "[1] \"33.0\"", "", "std::pair<@QString const, Foo>") + Check("map1.1", "[1] \"33.0\"", "", "")
+ Check("map1.2.first", "\"44.0\"", "@QString") + Check("map1.2.first", "\"44.0\"", "@QString")
+ Check("map1.2.second", "", "Foo") + Check("map1.2.second", "", "Foo")
+ Check("map1.2.second.a", "44", "int") + Check("map1.2.second.a", "44", "int")
+ Check("map2", "<2 items>", "std::map<char const*, Foo>") + Check("map2", "<2 items>", "std::map<char const*, Foo>")
+ Check("map2.0", "[0] \"22.0\"", "", "std::pair<char const* const, Foo>") + Check("map2.0", "[0] \"22.0\"", "", "")
+ Check("map2.0.first", "\"22.0\"", "char *") + Check("map2.0.first", "\"22.0\"", "char *")
+ Check("map2.0.first.0", "[0]", "50", "char") + Check("map2.0.first.0", "[0]", "50", "char")
+ Check("map2.0.second", "", "Foo") + Check("map2.0.second", "", "Foo")
+ Check("map2.0.second.a", "22", "int") + Check("map2.0.second.a", "22", "int")
+ Check("map2.1", "[1] \"33.0\"", "", "std::pair<char const* const, Foo>") + Check("map2.1", "[1] \"33.0\"", "", "")
+ Check("map2.1.first", "\"33.0\"", "char *") + Check("map2.1.first", "\"33.0\"", "char *")
+ Check("map2.1.first.0", "[0]", "51", "char") + Check("map2.1.first.0", "[0]", "51", "char")
+ Check("map2.1.second", "", "Foo") + Check("map2.1.second", "", "Foo")
+ Check("map2.1.second.a", "33", "int") + Check("map2.1.second.a", "33", "int")
+ Check("map3", "<2 items>", "std::map<unsigned int, @QStringList>") + Check("map3", "<2 items>", "std::map<unsigned int, @QStringList>")
+ Check("map3.0", "[0] 11", "<1 items>", "std::pair<unsigned int const, @QStringList>") + Check("map3.0", "[0] 11", "<1 items>", "")
+ Check("map3.0.first", "11", "unsigned int") + Check("map3.0.first", "11", "unsigned int")
+ Check("map3.0.second", "<1 items>", "@QStringList") + Check("map3.0.second", "<1 items>", "@QStringList")
+ Check("map3.0.second.0", "[0]", "\"11\"", "@QString") + Check("map3.0.second.0", "[0]", "\"11\"", "@QString")
+ Check("map3.1", "[1] 22", "<1 items>", "std::pair<unsigned int const, @QStringList>") + Check("map3.1", "[1] 22", "<1 items>", "")
+ Check("map3.1.first", "22", "unsigned int") + Check("map3.1.first", "22", "unsigned int")
+ Check("map3.1.second", "<1 items>", "@QStringList") + Check("map3.1.second", "<1 items>", "@QStringList")
+ Check("map3.1.second.0", "[0]", "\"22\"", "@QString") + Check("map3.1.second.0", "[0]", "\"22\"", "@QString")
@@ -4222,26 +4222,25 @@ void tst_Dumpers::dumper_data()
+ Check("map4.1.second.0", "[0]", "\"22\"", "@QString") + Check("map4.1.second.0", "[0]", "\"22\"", "@QString")
+ Check("map5", "<2 items>", "std::map<@QString, float>") + Check("map5", "<2 items>", "std::map<@QString, float>")
+ Check("map5.0", "[0] \"11.0\"", FloatValue("11"), "std::pair<@QString const, float>") + Check("map5.0", "[0] \"11.0\"", FloatValue("11"), "")
+ Check("map5.0.first", "\"11.0\"", "@QString") + Check("map5.0.first", "\"11.0\"", "@QString")
+ Check("map5.0.second", FloatValue("11"), "float") + Check("map5.0.second", FloatValue("11"), "float")
+ Check("map5.1", "[1] \"22.0\"", FloatValue("22"), "std::pair<@QString const, float>") + Check("map5.1", "[1] \"22.0\"", FloatValue("22"), "")
+ Check("map5.1.first", "\"22.0\"", "@QString") + Check("map5.1.first", "\"22.0\"", "@QString")
+ Check("map5.1.second", FloatValue("22"), "float") + Check("map5.1.second", FloatValue("22"), "float")
+ Check("map6", "<2 items>", "std::map<int, @QString>") + Check("map6", "<2 items>", "std::map<int, @QString>")
+ Check("map6.0", "[0] 11", "\"11.0\"", "std::pair<int const, @QString>") + Check("map6.0", "[0] 11", "\"11.0\"", "")
+ Check("map6.0.first", "11", "int") + Check("map6.0.first", "11", "int")
+ Check("map6.0.second", "\"11.0\"", "@QString") + Check("map6.0.second", "\"11.0\"", "@QString")
+ Check("map6.1", "[1] 22", "\"22.0\"", "std::pair<int const, @QString>") + Check("map6.1", "[1] 22", "\"22.0\"", "")
+ Check("map6.1.first", "22", "int") + Check("map6.1.first", "22", "int")
+ Check("map6.1.second", "\"22.0\"", "@QString") + Check("map6.1.second", "\"22.0\"", "@QString")
+ Check("map7", "<3 items>", "std::map<@QString, @QPointer<@QObject>>") + Check("map7", "<3 items>", "std::map<@QString, @QPointer<@QObject>>")
+ Check("map7.0", "[0] \".\"", "", "std::pair<@QString const, @QPointer<@QObject>>") + Check("map7.0", "[0] \".\"", "", "")
+ Check("map7.0.first", "\".\"", "@QString") + Check("map7.0.first", "\".\"", "@QString")
+ Check("map7.0.second", "", "@QPointer<@QObject>") + Check("map7.0.second", "", "@QPointer<@QObject>")
+ Check("map7.2", "[2] \"Welt\"", "", "std::pair<@QString const, @QPointer<@QObject>>")
+ Check("map7.2.first", "\"Welt\"", "@QString"); + Check("map7.2.first", "\"Welt\"", "@QString");
@@ -4672,8 +4671,8 @@ void tst_Dumpers::dumper_data()
+ Cxx11Profile() + Cxx11Profile()
+ Check("map1", "<2 items>", "std::unordered_map<unsigned int, unsigned int>") + Check("map1", "<2 items>", "std::unordered_map<unsigned int, unsigned int>")
+ Check("map1.0", "[0] 22", "2", "unsigned int") + Check("map1.0", "[0] 22", "2", "")
+ Check("map1.1", "[1] 11", "1", "unsigned int") + Check("map1.1", "[1] 11", "1", "")
+ Check("map2", "<2 items>", "std::unordered_map<std::string, float>") + Check("map2", "<2 items>", "std::unordered_map<std::string, float>")
+ Check("map2.0", "[0] \"22.0\"", FloatValue("22.0"), "") + Check("map2.0", "[0] \"22.0\"", FloatValue("22.0"), "")
@@ -5382,17 +5381,8 @@ void tst_Dumpers::dumper_data()
+ Check("y2", "", "X") + Check("y2", "", "X")
+ Check("y3", "", "X"); + Check("y3", "", "X");
QTest::newRow("RValueReferenceLldb") QTest::newRow("RValueReference")
<< Data(rvalueData) << Data(rvalueData)
+ LldbEngine
+ Check("x1", "", "X &&")
+ Check("x2", "", "X &&")
+ Check("x3", "", "X &&");
QTest::newRow("RValueReferenceGdb")
<< Data(rvalueData)
+ GdbEngine
+ GccVersion(0, 40704)
+ Check("x1", "", "X &") + Check("x1", "", "X &")
+ Check("x2", "", "X &") + Check("x2", "", "X &")
+ Check("x3", "", "X &"); + Check("x3", "", "X &");