idx
int64 | project
string | commit_id
string | project_url
string | commit_url
string | commit_message
string | target
int64 | func
string | func_hash
string | file_name
string | file_hash
string | cwe
string | cve
string | cve_desc
string | nvd_url
string |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
194,963
|
ImageMagick6
|
dc070da861a015d3c97488fdcca6063b44d47a7b
|
https://github.com/ImageMagick/ImageMagick6
|
https://github.com/ImageMagick/ImageMagick6/commit/dc070da861a015d3c97488fdcca6063b44d47a7b
|
https://github.com/ImageMagick/ImageMagick/pull/5034
| 1
|
static MagickBooleanType GetEXIFProperty(const Image *image,
const char *property)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define EXIF_FMT_BYTE 1
#define EXIF_FMT_STRING 2
#define EXIF_FMT_USHORT 3
#define EXIF_FMT_ULONG 4
#define EXIF_FMT_URATIONAL 5
#define EXIF_FMT_SBYTE 6
#define EXIF_FMT_UNDEFINED 7
#define EXIF_FMT_SSHORT 8
#define EXIF_FMT_SLONG 9
#define EXIF_FMT_SRATIONAL 10
#define EXIF_FMT_SINGLE 11
#define EXIF_FMT_DOUBLE 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_GPS_OFFSET 0x8825
#define TAG_INTEROP_OFFSET 0xa005
#define EXIFMultipleValues(size,format,arg) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \
format", ",arg); \
if (length >= (MaxTextExtent-1)) \
length=MaxTextExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
#define EXIFMultipleFractions(size,format,arg1,arg2) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \
format", ",(arg1),(arg2)); \
if (length >= (MaxTextExtent-1)) \
length=MaxTextExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
typedef struct _DirectoryInfo
{
const unsigned char
*directory;
size_t
entry;
ssize_t
offset;
} DirectoryInfo;
typedef struct _TagInfo
{
size_t
tag;
const char
description[36];
} TagInfo;
static const TagInfo
EXIFTag[] =
{
{ 0x001, "exif:InteroperabilityIndex" },
{ 0x002, "exif:InteroperabilityVersion" },
{ 0x100, "exif:ImageWidth" },
{ 0x101, "exif:ImageLength" },
{ 0x102, "exif:BitsPerSample" },
{ 0x103, "exif:Compression" },
{ 0x106, "exif:PhotometricInterpretation" },
{ 0x10a, "exif:FillOrder" },
{ 0x10d, "exif:DocumentName" },
{ 0x10e, "exif:ImageDescription" },
{ 0x10f, "exif:Make" },
{ 0x110, "exif:Model" },
{ 0x111, "exif:StripOffsets" },
{ 0x112, "exif:Orientation" },
{ 0x115, "exif:SamplesPerPixel" },
{ 0x116, "exif:RowsPerStrip" },
{ 0x117, "exif:StripByteCounts" },
{ 0x11a, "exif:XResolution" },
{ 0x11b, "exif:YResolution" },
{ 0x11c, "exif:PlanarConfiguration" },
{ 0x11d, "exif:PageName" },
{ 0x11e, "exif:XPosition" },
{ 0x11f, "exif:YPosition" },
{ 0x118, "exif:MinSampleValue" },
{ 0x119, "exif:MaxSampleValue" },
{ 0x120, "exif:FreeOffsets" },
{ 0x121, "exif:FreeByteCounts" },
{ 0x122, "exif:GrayResponseUnit" },
{ 0x123, "exif:GrayResponseCurve" },
{ 0x124, "exif:T4Options" },
{ 0x125, "exif:T6Options" },
{ 0x128, "exif:ResolutionUnit" },
{ 0x12d, "exif:TransferFunction" },
{ 0x131, "exif:Software" },
{ 0x132, "exif:DateTime" },
{ 0x13b, "exif:Artist" },
{ 0x13e, "exif:WhitePoint" },
{ 0x13f, "exif:PrimaryChromaticities" },
{ 0x140, "exif:ColorMap" },
{ 0x141, "exif:HalfToneHints" },
{ 0x142, "exif:TileWidth" },
{ 0x143, "exif:TileLength" },
{ 0x144, "exif:TileOffsets" },
{ 0x145, "exif:TileByteCounts" },
{ 0x14a, "exif:SubIFD" },
{ 0x14c, "exif:InkSet" },
{ 0x14d, "exif:InkNames" },
{ 0x14e, "exif:NumberOfInks" },
{ 0x150, "exif:DotRange" },
{ 0x151, "exif:TargetPrinter" },
{ 0x152, "exif:ExtraSample" },
{ 0x153, "exif:SampleFormat" },
{ 0x154, "exif:SMinSampleValue" },
{ 0x155, "exif:SMaxSampleValue" },
{ 0x156, "exif:TransferRange" },
{ 0x157, "exif:ClipPath" },
{ 0x158, "exif:XClipPathUnits" },
{ 0x159, "exif:YClipPathUnits" },
{ 0x15a, "exif:Indexed" },
{ 0x15b, "exif:JPEGTables" },
{ 0x15f, "exif:OPIProxy" },
{ 0x200, "exif:JPEGProc" },
{ 0x201, "exif:JPEGInterchangeFormat" },
{ 0x202, "exif:JPEGInterchangeFormatLength" },
{ 0x203, "exif:JPEGRestartInterval" },
{ 0x205, "exif:JPEGLosslessPredictors" },
{ 0x206, "exif:JPEGPointTransforms" },
{ 0x207, "exif:JPEGQTables" },
{ 0x208, "exif:JPEGDCTables" },
{ 0x209, "exif:JPEGACTables" },
{ 0x211, "exif:YCbCrCoefficients" },
{ 0x212, "exif:YCbCrSubSampling" },
{ 0x213, "exif:YCbCrPositioning" },
{ 0x214, "exif:ReferenceBlackWhite" },
{ 0x2bc, "exif:ExtensibleMetadataPlatform" },
{ 0x301, "exif:Gamma" },
{ 0x302, "exif:ICCProfileDescriptor" },
{ 0x303, "exif:SRGBRenderingIntent" },
{ 0x320, "exif:ImageTitle" },
{ 0x5001, "exif:ResolutionXUnit" },
{ 0x5002, "exif:ResolutionYUnit" },
{ 0x5003, "exif:ResolutionXLengthUnit" },
{ 0x5004, "exif:ResolutionYLengthUnit" },
{ 0x5005, "exif:PrintFlags" },
{ 0x5006, "exif:PrintFlagsVersion" },
{ 0x5007, "exif:PrintFlagsCrop" },
{ 0x5008, "exif:PrintFlagsBleedWidth" },
{ 0x5009, "exif:PrintFlagsBleedWidthScale" },
{ 0x500A, "exif:HalftoneLPI" },
{ 0x500B, "exif:HalftoneLPIUnit" },
{ 0x500C, "exif:HalftoneDegree" },
{ 0x500D, "exif:HalftoneShape" },
{ 0x500E, "exif:HalftoneMisc" },
{ 0x500F, "exif:HalftoneScreen" },
{ 0x5010, "exif:JPEGQuality" },
{ 0x5011, "exif:GridSize" },
{ 0x5012, "exif:ThumbnailFormat" },
{ 0x5013, "exif:ThumbnailWidth" },
{ 0x5014, "exif:ThumbnailHeight" },
{ 0x5015, "exif:ThumbnailColorDepth" },
{ 0x5016, "exif:ThumbnailPlanes" },
{ 0x5017, "exif:ThumbnailRawBytes" },
{ 0x5018, "exif:ThumbnailSize" },
{ 0x5019, "exif:ThumbnailCompressedSize" },
{ 0x501a, "exif:ColorTransferFunction" },
{ 0x501b, "exif:ThumbnailData" },
{ 0x5020, "exif:ThumbnailImageWidth" },
{ 0x5021, "exif:ThumbnailImageHeight" },
{ 0x5022, "exif:ThumbnailBitsPerSample" },
{ 0x5023, "exif:ThumbnailCompression" },
{ 0x5024, "exif:ThumbnailPhotometricInterp" },
{ 0x5025, "exif:ThumbnailImageDescription" },
{ 0x5026, "exif:ThumbnailEquipMake" },
{ 0x5027, "exif:ThumbnailEquipModel" },
{ 0x5028, "exif:ThumbnailStripOffsets" },
{ 0x5029, "exif:ThumbnailOrientation" },
{ 0x502a, "exif:ThumbnailSamplesPerPixel" },
{ 0x502b, "exif:ThumbnailRowsPerStrip" },
{ 0x502c, "exif:ThumbnailStripBytesCount" },
{ 0x502d, "exif:ThumbnailResolutionX" },
{ 0x502e, "exif:ThumbnailResolutionY" },
{ 0x502f, "exif:ThumbnailPlanarConfig" },
{ 0x5030, "exif:ThumbnailResolutionUnit" },
{ 0x5031, "exif:ThumbnailTransferFunction" },
{ 0x5032, "exif:ThumbnailSoftwareUsed" },
{ 0x5033, "exif:ThumbnailDateTime" },
{ 0x5034, "exif:ThumbnailArtist" },
{ 0x5035, "exif:ThumbnailWhitePoint" },
{ 0x5036, "exif:ThumbnailPrimaryChromaticities" },
{ 0x5037, "exif:ThumbnailYCbCrCoefficients" },
{ 0x5038, "exif:ThumbnailYCbCrSubsampling" },
{ 0x5039, "exif:ThumbnailYCbCrPositioning" },
{ 0x503A, "exif:ThumbnailRefBlackWhite" },
{ 0x503B, "exif:ThumbnailCopyRight" },
{ 0x5090, "exif:LuminanceTable" },
{ 0x5091, "exif:ChrominanceTable" },
{ 0x5100, "exif:FrameDelay" },
{ 0x5101, "exif:LoopCount" },
{ 0x5110, "exif:PixelUnit" },
{ 0x5111, "exif:PixelPerUnitX" },
{ 0x5112, "exif:PixelPerUnitY" },
{ 0x5113, "exif:PaletteHistogram" },
{ 0x1000, "exif:RelatedImageFileFormat" },
{ 0x1001, "exif:RelatedImageLength" },
{ 0x1002, "exif:RelatedImageWidth" },
{ 0x800d, "exif:ImageID" },
{ 0x80e3, "exif:Matteing" },
{ 0x80e4, "exif:DataType" },
{ 0x80e5, "exif:ImageDepth" },
{ 0x80e6, "exif:TileDepth" },
{ 0x828d, "exif:CFARepeatPatternDim" },
{ 0x828e, "exif:CFAPattern2" },
{ 0x828f, "exif:BatteryLevel" },
{ 0x8298, "exif:Copyright" },
{ 0x829a, "exif:ExposureTime" },
{ 0x829d, "exif:FNumber" },
{ 0x83bb, "exif:IPTC/NAA" },
{ 0x84e3, "exif:IT8RasterPadding" },
{ 0x84e5, "exif:IT8ColorTable" },
{ 0x8649, "exif:ImageResourceInformation" },
{ 0x8769, "exif:ExifOffset" }, /* specs as "Exif IFD Pointer"? */
{ 0x8773, "exif:InterColorProfile" },
{ 0x8822, "exif:ExposureProgram" },
{ 0x8824, "exif:SpectralSensitivity" },
{ 0x8825, "exif:GPSInfo" }, /* specs as "GPSInfo IFD Pointer"? */
{ 0x8827, "exif:PhotographicSensitivity" },
{ 0x8828, "exif:OECF" },
{ 0x8829, "exif:Interlace" },
{ 0x882a, "exif:TimeZoneOffset" },
{ 0x882b, "exif:SelfTimerMode" },
{ 0x8830, "exif:SensitivityType" },
{ 0x8831, "exif:StandardOutputSensitivity" },
{ 0x8832, "exif:RecommendedExposureIndex" },
{ 0x8833, "exif:ISOSpeed" },
{ 0x8834, "exif:ISOSpeedLatitudeyyy" },
{ 0x8835, "exif:ISOSpeedLatitudezzz" },
{ 0x9000, "exif:ExifVersion" },
{ 0x9003, "exif:DateTimeOriginal" },
{ 0x9004, "exif:DateTimeDigitized" },
{ 0x9010, "exif:OffsetTime" },
{ 0x9011, "exif:OffsetTimeOriginal" },
{ 0x9012, "exif:OffsetTimeDigitized" },
{ 0x9101, "exif:ComponentsConfiguration" },
{ 0x9102, "exif:CompressedBitsPerPixel" },
{ 0x9201, "exif:ShutterSpeedValue" },
{ 0x9202, "exif:ApertureValue" },
{ 0x9203, "exif:BrightnessValue" },
{ 0x9204, "exif:ExposureBiasValue" },
{ 0x9205, "exif:MaxApertureValue" },
{ 0x9206, "exif:SubjectDistance" },
{ 0x9207, "exif:MeteringMode" },
{ 0x9208, "exif:LightSource" },
{ 0x9209, "exif:Flash" },
{ 0x920a, "exif:FocalLength" },
{ 0x920b, "exif:FlashEnergy" },
{ 0x920c, "exif:SpatialFrequencyResponse" },
{ 0x920d, "exif:Noise" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0x9211, "exif:ImageNumber" },
{ 0x9212, "exif:SecurityClassification" },
{ 0x9213, "exif:ImageHistory" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9215, "exif:ExposureIndex" },
{ 0x9216, "exif:TIFF-EPStandardID" },
{ 0x927c, "exif:MakerNote" },
{ 0x9286, "exif:UserComment" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0x9400, "exif:Temperature" },
{ 0x9401, "exif:Humidity" },
{ 0x9402, "exif:Pressure" },
{ 0x9403, "exif:WaterDepth" },
{ 0x9404, "exif:Acceleration" },
{ 0x9405, "exif:CameraElevationAngle" },
{ 0x9C9b, "exif:WinXP-Title" },
{ 0x9C9c, "exif:WinXP-Comments" },
{ 0x9C9d, "exif:WinXP-Author" },
{ 0x9C9e, "exif:WinXP-Keywords" },
{ 0x9C9f, "exif:WinXP-Subject" },
{ 0xa000, "exif:FlashPixVersion" },
{ 0xa001, "exif:ColorSpace" },
{ 0xa002, "exif:PixelXDimension" },
{ 0xa003, "exif:PixelYDimension" },
{ 0xa004, "exif:RelatedSoundFile" },
{ 0xa005, "exif:InteroperabilityOffset" },
{ 0xa20b, "exif:FlashEnergy" },
{ 0xa20c, "exif:SpatialFrequencyResponse" },
{ 0xa20d, "exif:Noise" },
{ 0xa20e, "exif:FocalPlaneXResolution" },
{ 0xa20f, "exif:FocalPlaneYResolution" },
{ 0xa210, "exif:FocalPlaneResolutionUnit" },
{ 0xa214, "exif:SubjectLocation" },
{ 0xa215, "exif:ExposureIndex" },
{ 0xa216, "exif:TIFF/EPStandardID" },
{ 0xa217, "exif:SensingMethod" },
{ 0xa300, "exif:FileSource" },
{ 0xa301, "exif:SceneType" },
{ 0xa302, "exif:CFAPattern" },
{ 0xa401, "exif:CustomRendered" },
{ 0xa402, "exif:ExposureMode" },
{ 0xa403, "exif:WhiteBalance" },
{ 0xa404, "exif:DigitalZoomRatio" },
{ 0xa405, "exif:FocalLengthIn35mmFilm" },
{ 0xa406, "exif:SceneCaptureType" },
{ 0xa407, "exif:GainControl" },
{ 0xa408, "exif:Contrast" },
{ 0xa409, "exif:Saturation" },
{ 0xa40a, "exif:Sharpness" },
{ 0xa40b, "exif:DeviceSettingDescription" },
{ 0xa40c, "exif:SubjectDistanceRange" },
{ 0xa420, "exif:ImageUniqueID" },
{ 0xa430, "exif:CameraOwnerName" },
{ 0xa431, "exif:BodySerialNumber" },
{ 0xa432, "exif:LensSpecification" },
{ 0xa433, "exif:LensMake" },
{ 0xa434, "exif:LensModel" },
{ 0xa435, "exif:LensSerialNumber" },
{ 0xc4a5, "exif:PrintImageMatching" },
{ 0xa500, "exif:Gamma" },
{ 0xc640, "exif:CR2Slice" },
{ 0x10000, "exif:GPSVersionID" },
{ 0x10001, "exif:GPSLatitudeRef" },
{ 0x10002, "exif:GPSLatitude" },
{ 0x10003, "exif:GPSLongitudeRef" },
{ 0x10004, "exif:GPSLongitude" },
{ 0x10005, "exif:GPSAltitudeRef" },
{ 0x10006, "exif:GPSAltitude" },
{ 0x10007, "exif:GPSTimeStamp" },
{ 0x10008, "exif:GPSSatellites" },
{ 0x10009, "exif:GPSStatus" },
{ 0x1000a, "exif:GPSMeasureMode" },
{ 0x1000b, "exif:GPSDop" },
{ 0x1000c, "exif:GPSSpeedRef" },
{ 0x1000d, "exif:GPSSpeed" },
{ 0x1000e, "exif:GPSTrackRef" },
{ 0x1000f, "exif:GPSTrack" },
{ 0x10010, "exif:GPSImgDirectionRef" },
{ 0x10011, "exif:GPSImgDirection" },
{ 0x10012, "exif:GPSMapDatum" },
{ 0x10013, "exif:GPSDestLatitudeRef" },
{ 0x10014, "exif:GPSDestLatitude" },
{ 0x10015, "exif:GPSDestLongitudeRef" },
{ 0x10016, "exif:GPSDestLongitude" },
{ 0x10017, "exif:GPSDestBearingRef" },
{ 0x10018, "exif:GPSDestBearing" },
{ 0x10019, "exif:GPSDestDistanceRef" },
{ 0x1001a, "exif:GPSDestDistance" },
{ 0x1001b, "exif:GPSProcessingMethod" },
{ 0x1001c, "exif:GPSAreaInformation" },
{ 0x1001d, "exif:GPSDateStamp" },
{ 0x1001e, "exif:GPSDifferential" },
{ 0x1001f, "exif:GPSHPositioningError" },
{ 0x00000, "" }
}; /* http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf */
const StringInfo
*profile;
const unsigned char
*directory,
*exif;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
MagickBooleanType
status;
ssize_t
i;
size_t
entry,
length,
number_entries,
tag,
tag_value;
SplayTreeInfo
*exif_resources;
ssize_t
all,
id,
level,
offset,
tag_offset;
static int
tag_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
/*
If EXIF data exists, then try to parse the request for a tag.
*/
profile=GetImageProfile(image,"exif");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if ((property == (const char *) NULL) || (*property == '\0'))
return(MagickFalse);
while (isspace((int) ((unsigned char) *property)) != 0)
property++;
if (strlen(property) <= 5)
return(MagickFalse);
all=0;
tag=(~0UL);
switch (*(property+5))
{
case '*':
{
/*
Caller has asked for all the tags in the EXIF data.
*/
tag=0;
all=1; /* return the data in description=value format */
break;
}
case '!':
{
tag=0;
all=2; /* return the data in tagid=value format */
break;
}
case '#':
case '@':
{
int
c;
size_t
n;
/*
Check for a hex based tag specification first.
*/
tag=(*(property+5) == '@') ? 1UL : 0UL;
property+=6;
n=strlen(property);
if (n != 4)
return(MagickFalse);
/*
Parse tag specification as a hex number.
*/
n/=4;
do
{
for (i=(ssize_t) n-1L; i >= 0; i--)
{
c=(*property++);
tag<<=4;
if ((c >= '0') && (c <= '9'))
tag|=(c-'0');
else
if ((c >= 'A') && (c <= 'F'))
tag|=(c-('A'-10));
else
if ((c >= 'a') && (c <= 'f'))
tag|=(c-('a'-10));
else
return(MagickFalse);
}
} while (*property != '\0');
break;
}
default:
{
/*
Try to match the text with a tag name instead.
*/
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (LocaleCompare(EXIFTag[i].description,property) == 0)
{
tag=(size_t) EXIFTag[i].tag;
break;
}
}
break;
}
}
if (tag == (~0UL))
return(MagickFalse);
length=GetStringInfoLength(profile);
if (length < 6)
return(MagickFalse);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadPropertyByte(&exif,&length) != 0x45)
continue;
if (ReadPropertyByte(&exif,&length) != 0x78)
continue;
if (ReadPropertyByte(&exif,&length) != 0x69)
continue;
if (ReadPropertyByte(&exif,&length) != 0x66)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadPropertySignedShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadPropertyUnsignedShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadPropertySignedLong(endian,exif+4);
if ((offset < 0) || (size_t) offset >= length)
return(MagickFalse);
/*
Set the pointer to the first IFD and follow it were it leads.
*/
status=MagickFalse;
directory=exif+offset;
level=0;
entry=0;
tag_offset=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
/*
If there is anything on the stack then pop it off.
*/
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
tag_offset=directory_stack[level].offset;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=(size_t) ReadPropertyUnsignedShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
unsigned char
*p,
*q;
size_t
format;
ssize_t
number_bytes,
components;
q=(unsigned char *) (directory+(12*entry)+2);
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(size_t) ReadPropertyUnsignedShort(endian,q)+tag_offset;
format=(size_t) ReadPropertyUnsignedShort(endian,q+2);
if (format >= (sizeof(tag_bytes)/sizeof(*tag_bytes)))
break;
if (format == 0)
break; /* corrupt EXIF */
components=(ssize_t) ReadPropertySignedLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*tag_bytes[format];
if (number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
dir_offset;
/*
The directory entry contains an offset.
*/
dir_offset=(ssize_t) ReadPropertySignedLong(endian,q+8);
if ((dir_offset < 0) || (size_t) dir_offset >= length)
continue;
if (((size_t) dir_offset+number_bytes) < (size_t) dir_offset)
continue; /* prevent overflow */
if (((size_t) dir_offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+dir_offset);
}
if ((all != 0) || (tag == (size_t) tag_value))
{
char
buffer[MaxTextExtent],
*value;
if ((p < exif) || (p > (exif+length-tag_bytes[format])))
break;
value=(char *) NULL;
*buffer='\0';
switch (format)
{
case EXIF_FMT_BYTE:
case EXIF_FMT_UNDEFINED:
{
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if (isprint((int) p[i]) != 0)
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
case EXIF_FMT_SBYTE:
{
EXIFMultipleValues(1,"%.20g",(double) (*(signed char *) p1));
break;
}
case EXIF_FMT_SSHORT:
{
EXIFMultipleValues(2,"%hd",ReadPropertySignedShort(endian,p1));
break;
}
case EXIF_FMT_USHORT:
{
EXIFMultipleValues(2,"%hu",ReadPropertyUnsignedShort(endian,p1));
break;
}
case EXIF_FMT_ULONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertyUnsignedLong(endian,p1));
break;
}
case EXIF_FMT_SLONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertySignedLong(endian,p1));
break;
}
case EXIF_FMT_URATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertyUnsignedLong(endian,p1),(double)
ReadPropertyUnsignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SRATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertySignedLong(endian,p1),(double)
ReadPropertySignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SINGLE:
{
EXIFMultipleValues(4,"%f",(double) *(float *) p1);
break;
}
case EXIF_FMT_DOUBLE:
{
EXIFMultipleValues(8,"%f",*(double *) p1);
break;
}
case EXIF_FMT_STRING:
default:
{
if ((p < exif) || (p > (exif+length-number_bytes)))
break;
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
ssize_t
i;
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if ((isprint((int) p[i]) != 0) || (p[i] == '\0'))
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
}
if (value != (char *) NULL)
{
char
*key;
const char
*p;
key=AcquireString(property);
switch (all)
{
case 1:
{
const char
*description;
ssize_t
i;
description="unknown";
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (EXIFTag[i].tag == tag_value)
{
description=EXIFTag[i].description;
break;
}
}
(void) FormatLocaleString(key,MaxTextExtent,"%s",
description);
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
break;
}
case 2:
{
if (tag_value < 0x10000)
(void) FormatLocaleString(key,MaxTextExtent,"#%04lx",
(unsigned long) tag_value);
else
if (tag_value < 0x20000)
(void) FormatLocaleString(key,MaxTextExtent,"@%04lx",
(unsigned long) (tag_value & 0xffff));
else
(void) FormatLocaleString(key,MaxTextExtent,"unknown");
break;
}
default:
{
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
}
}
p=(const char *) NULL;
if (image->properties != (void *) NULL)
p=(const char *) GetValueFromSplayTree((SplayTreeInfo *)
image->properties,key);
if (p == (const char *) NULL)
(void) SetImageProperty((Image *) image,key,value);
value=DestroyString(value);
key=DestroyString(key);
status=MagickTrue;
}
}
if ((tag_value == TAG_EXIF_OFFSET) ||
(tag_value == TAG_INTEROP_OFFSET) || (tag_value == TAG_GPS_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ReadPropertySignedLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
ssize_t
tag_offset1;
tag_offset1=(ssize_t) ((tag_value == TAG_GPS_OFFSET) ? 0x10000 :
0);
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
directory_stack[level].offset=tag_offset;
level++;
/*
Check for duplicate tag.
*/
for (i=0; i < level; i++)
if (directory_stack[i].directory == (exif+tag_offset1))
break;
if (i < level)
break; /* duplicate tag */
directory_stack[level].directory=exif+offset;
directory_stack[level].offset=tag_offset1;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)+4) > (exif+length))
break;
offset=(ssize_t) ReadPropertySignedLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
directory_stack[level].offset=tag_offset1;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(status);
}
|
292096308156704952246887123009503225331
|
property.c
|
122751008107964047346147343124174074065
|
CWE-704
|
CVE-2022-32547
|
In ImageMagick, there is load of misaligned address for type 'double', which requires 8 byte alignment and for type 'float', which requires 4 byte alignment at MagickCore/property.c. Whenever crafted or untrusted input is processed by ImageMagick, this causes a negative impact to application availability or other problems related to undefined behavior.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-32547
|
217,569
|
ImageMagick6
|
dc070da861a015d3c97488fdcca6063b44d47a7b
|
https://github.com/ImageMagick/ImageMagick6
|
https://github.com/ImageMagick/ImageMagick6/commit/dc070da861a015d3c97488fdcca6063b44d47a7b
|
https://github.com/ImageMagick/ImageMagick/pull/5034
| 0
|
static MagickBooleanType GetEXIFProperty(const Image *image,
const char *property)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define EXIF_FMT_BYTE 1
#define EXIF_FMT_STRING 2
#define EXIF_FMT_USHORT 3
#define EXIF_FMT_ULONG 4
#define EXIF_FMT_URATIONAL 5
#define EXIF_FMT_SBYTE 6
#define EXIF_FMT_UNDEFINED 7
#define EXIF_FMT_SSHORT 8
#define EXIF_FMT_SLONG 9
#define EXIF_FMT_SRATIONAL 10
#define EXIF_FMT_SINGLE 11
#define EXIF_FMT_DOUBLE 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_GPS_OFFSET 0x8825
#define TAG_INTEROP_OFFSET 0xa005
#define EXIFMultipleValues(size,format,arg) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \
format", ",arg); \
if (length >= (MaxTextExtent-1)) \
length=MaxTextExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
#define EXIFMultipleFractions(size,format,arg1,arg2) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \
format", ",(arg1),(arg2)); \
if (length >= (MaxTextExtent-1)) \
length=MaxTextExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
typedef struct _DirectoryInfo
{
const unsigned char
*directory;
size_t
entry;
ssize_t
offset;
} DirectoryInfo;
typedef struct _TagInfo
{
size_t
tag;
const char
description[36];
} TagInfo;
static const TagInfo
EXIFTag[] =
{
{ 0x001, "exif:InteroperabilityIndex" },
{ 0x002, "exif:InteroperabilityVersion" },
{ 0x100, "exif:ImageWidth" },
{ 0x101, "exif:ImageLength" },
{ 0x102, "exif:BitsPerSample" },
{ 0x103, "exif:Compression" },
{ 0x106, "exif:PhotometricInterpretation" },
{ 0x10a, "exif:FillOrder" },
{ 0x10d, "exif:DocumentName" },
{ 0x10e, "exif:ImageDescription" },
{ 0x10f, "exif:Make" },
{ 0x110, "exif:Model" },
{ 0x111, "exif:StripOffsets" },
{ 0x112, "exif:Orientation" },
{ 0x115, "exif:SamplesPerPixel" },
{ 0x116, "exif:RowsPerStrip" },
{ 0x117, "exif:StripByteCounts" },
{ 0x11a, "exif:XResolution" },
{ 0x11b, "exif:YResolution" },
{ 0x11c, "exif:PlanarConfiguration" },
{ 0x11d, "exif:PageName" },
{ 0x11e, "exif:XPosition" },
{ 0x11f, "exif:YPosition" },
{ 0x118, "exif:MinSampleValue" },
{ 0x119, "exif:MaxSampleValue" },
{ 0x120, "exif:FreeOffsets" },
{ 0x121, "exif:FreeByteCounts" },
{ 0x122, "exif:GrayResponseUnit" },
{ 0x123, "exif:GrayResponseCurve" },
{ 0x124, "exif:T4Options" },
{ 0x125, "exif:T6Options" },
{ 0x128, "exif:ResolutionUnit" },
{ 0x12d, "exif:TransferFunction" },
{ 0x131, "exif:Software" },
{ 0x132, "exif:DateTime" },
{ 0x13b, "exif:Artist" },
{ 0x13e, "exif:WhitePoint" },
{ 0x13f, "exif:PrimaryChromaticities" },
{ 0x140, "exif:ColorMap" },
{ 0x141, "exif:HalfToneHints" },
{ 0x142, "exif:TileWidth" },
{ 0x143, "exif:TileLength" },
{ 0x144, "exif:TileOffsets" },
{ 0x145, "exif:TileByteCounts" },
{ 0x14a, "exif:SubIFD" },
{ 0x14c, "exif:InkSet" },
{ 0x14d, "exif:InkNames" },
{ 0x14e, "exif:NumberOfInks" },
{ 0x150, "exif:DotRange" },
{ 0x151, "exif:TargetPrinter" },
{ 0x152, "exif:ExtraSample" },
{ 0x153, "exif:SampleFormat" },
{ 0x154, "exif:SMinSampleValue" },
{ 0x155, "exif:SMaxSampleValue" },
{ 0x156, "exif:TransferRange" },
{ 0x157, "exif:ClipPath" },
{ 0x158, "exif:XClipPathUnits" },
{ 0x159, "exif:YClipPathUnits" },
{ 0x15a, "exif:Indexed" },
{ 0x15b, "exif:JPEGTables" },
{ 0x15f, "exif:OPIProxy" },
{ 0x200, "exif:JPEGProc" },
{ 0x201, "exif:JPEGInterchangeFormat" },
{ 0x202, "exif:JPEGInterchangeFormatLength" },
{ 0x203, "exif:JPEGRestartInterval" },
{ 0x205, "exif:JPEGLosslessPredictors" },
{ 0x206, "exif:JPEGPointTransforms" },
{ 0x207, "exif:JPEGQTables" },
{ 0x208, "exif:JPEGDCTables" },
{ 0x209, "exif:JPEGACTables" },
{ 0x211, "exif:YCbCrCoefficients" },
{ 0x212, "exif:YCbCrSubSampling" },
{ 0x213, "exif:YCbCrPositioning" },
{ 0x214, "exif:ReferenceBlackWhite" },
{ 0x2bc, "exif:ExtensibleMetadataPlatform" },
{ 0x301, "exif:Gamma" },
{ 0x302, "exif:ICCProfileDescriptor" },
{ 0x303, "exif:SRGBRenderingIntent" },
{ 0x320, "exif:ImageTitle" },
{ 0x5001, "exif:ResolutionXUnit" },
{ 0x5002, "exif:ResolutionYUnit" },
{ 0x5003, "exif:ResolutionXLengthUnit" },
{ 0x5004, "exif:ResolutionYLengthUnit" },
{ 0x5005, "exif:PrintFlags" },
{ 0x5006, "exif:PrintFlagsVersion" },
{ 0x5007, "exif:PrintFlagsCrop" },
{ 0x5008, "exif:PrintFlagsBleedWidth" },
{ 0x5009, "exif:PrintFlagsBleedWidthScale" },
{ 0x500A, "exif:HalftoneLPI" },
{ 0x500B, "exif:HalftoneLPIUnit" },
{ 0x500C, "exif:HalftoneDegree" },
{ 0x500D, "exif:HalftoneShape" },
{ 0x500E, "exif:HalftoneMisc" },
{ 0x500F, "exif:HalftoneScreen" },
{ 0x5010, "exif:JPEGQuality" },
{ 0x5011, "exif:GridSize" },
{ 0x5012, "exif:ThumbnailFormat" },
{ 0x5013, "exif:ThumbnailWidth" },
{ 0x5014, "exif:ThumbnailHeight" },
{ 0x5015, "exif:ThumbnailColorDepth" },
{ 0x5016, "exif:ThumbnailPlanes" },
{ 0x5017, "exif:ThumbnailRawBytes" },
{ 0x5018, "exif:ThumbnailSize" },
{ 0x5019, "exif:ThumbnailCompressedSize" },
{ 0x501a, "exif:ColorTransferFunction" },
{ 0x501b, "exif:ThumbnailData" },
{ 0x5020, "exif:ThumbnailImageWidth" },
{ 0x5021, "exif:ThumbnailImageHeight" },
{ 0x5022, "exif:ThumbnailBitsPerSample" },
{ 0x5023, "exif:ThumbnailCompression" },
{ 0x5024, "exif:ThumbnailPhotometricInterp" },
{ 0x5025, "exif:ThumbnailImageDescription" },
{ 0x5026, "exif:ThumbnailEquipMake" },
{ 0x5027, "exif:ThumbnailEquipModel" },
{ 0x5028, "exif:ThumbnailStripOffsets" },
{ 0x5029, "exif:ThumbnailOrientation" },
{ 0x502a, "exif:ThumbnailSamplesPerPixel" },
{ 0x502b, "exif:ThumbnailRowsPerStrip" },
{ 0x502c, "exif:ThumbnailStripBytesCount" },
{ 0x502d, "exif:ThumbnailResolutionX" },
{ 0x502e, "exif:ThumbnailResolutionY" },
{ 0x502f, "exif:ThumbnailPlanarConfig" },
{ 0x5030, "exif:ThumbnailResolutionUnit" },
{ 0x5031, "exif:ThumbnailTransferFunction" },
{ 0x5032, "exif:ThumbnailSoftwareUsed" },
{ 0x5033, "exif:ThumbnailDateTime" },
{ 0x5034, "exif:ThumbnailArtist" },
{ 0x5035, "exif:ThumbnailWhitePoint" },
{ 0x5036, "exif:ThumbnailPrimaryChromaticities" },
{ 0x5037, "exif:ThumbnailYCbCrCoefficients" },
{ 0x5038, "exif:ThumbnailYCbCrSubsampling" },
{ 0x5039, "exif:ThumbnailYCbCrPositioning" },
{ 0x503A, "exif:ThumbnailRefBlackWhite" },
{ 0x503B, "exif:ThumbnailCopyRight" },
{ 0x5090, "exif:LuminanceTable" },
{ 0x5091, "exif:ChrominanceTable" },
{ 0x5100, "exif:FrameDelay" },
{ 0x5101, "exif:LoopCount" },
{ 0x5110, "exif:PixelUnit" },
{ 0x5111, "exif:PixelPerUnitX" },
{ 0x5112, "exif:PixelPerUnitY" },
{ 0x5113, "exif:PaletteHistogram" },
{ 0x1000, "exif:RelatedImageFileFormat" },
{ 0x1001, "exif:RelatedImageLength" },
{ 0x1002, "exif:RelatedImageWidth" },
{ 0x800d, "exif:ImageID" },
{ 0x80e3, "exif:Matteing" },
{ 0x80e4, "exif:DataType" },
{ 0x80e5, "exif:ImageDepth" },
{ 0x80e6, "exif:TileDepth" },
{ 0x828d, "exif:CFARepeatPatternDim" },
{ 0x828e, "exif:CFAPattern2" },
{ 0x828f, "exif:BatteryLevel" },
{ 0x8298, "exif:Copyright" },
{ 0x829a, "exif:ExposureTime" },
{ 0x829d, "exif:FNumber" },
{ 0x83bb, "exif:IPTC/NAA" },
{ 0x84e3, "exif:IT8RasterPadding" },
{ 0x84e5, "exif:IT8ColorTable" },
{ 0x8649, "exif:ImageResourceInformation" },
{ 0x8769, "exif:ExifOffset" }, /* specs as "Exif IFD Pointer"? */
{ 0x8773, "exif:InterColorProfile" },
{ 0x8822, "exif:ExposureProgram" },
{ 0x8824, "exif:SpectralSensitivity" },
{ 0x8825, "exif:GPSInfo" }, /* specs as "GPSInfo IFD Pointer"? */
{ 0x8827, "exif:PhotographicSensitivity" },
{ 0x8828, "exif:OECF" },
{ 0x8829, "exif:Interlace" },
{ 0x882a, "exif:TimeZoneOffset" },
{ 0x882b, "exif:SelfTimerMode" },
{ 0x8830, "exif:SensitivityType" },
{ 0x8831, "exif:StandardOutputSensitivity" },
{ 0x8832, "exif:RecommendedExposureIndex" },
{ 0x8833, "exif:ISOSpeed" },
{ 0x8834, "exif:ISOSpeedLatitudeyyy" },
{ 0x8835, "exif:ISOSpeedLatitudezzz" },
{ 0x9000, "exif:ExifVersion" },
{ 0x9003, "exif:DateTimeOriginal" },
{ 0x9004, "exif:DateTimeDigitized" },
{ 0x9010, "exif:OffsetTime" },
{ 0x9011, "exif:OffsetTimeOriginal" },
{ 0x9012, "exif:OffsetTimeDigitized" },
{ 0x9101, "exif:ComponentsConfiguration" },
{ 0x9102, "exif:CompressedBitsPerPixel" },
{ 0x9201, "exif:ShutterSpeedValue" },
{ 0x9202, "exif:ApertureValue" },
{ 0x9203, "exif:BrightnessValue" },
{ 0x9204, "exif:ExposureBiasValue" },
{ 0x9205, "exif:MaxApertureValue" },
{ 0x9206, "exif:SubjectDistance" },
{ 0x9207, "exif:MeteringMode" },
{ 0x9208, "exif:LightSource" },
{ 0x9209, "exif:Flash" },
{ 0x920a, "exif:FocalLength" },
{ 0x920b, "exif:FlashEnergy" },
{ 0x920c, "exif:SpatialFrequencyResponse" },
{ 0x920d, "exif:Noise" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0x9211, "exif:ImageNumber" },
{ 0x9212, "exif:SecurityClassification" },
{ 0x9213, "exif:ImageHistory" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9215, "exif:ExposureIndex" },
{ 0x9216, "exif:TIFF-EPStandardID" },
{ 0x927c, "exif:MakerNote" },
{ 0x9286, "exif:UserComment" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0x9400, "exif:Temperature" },
{ 0x9401, "exif:Humidity" },
{ 0x9402, "exif:Pressure" },
{ 0x9403, "exif:WaterDepth" },
{ 0x9404, "exif:Acceleration" },
{ 0x9405, "exif:CameraElevationAngle" },
{ 0x9C9b, "exif:WinXP-Title" },
{ 0x9C9c, "exif:WinXP-Comments" },
{ 0x9C9d, "exif:WinXP-Author" },
{ 0x9C9e, "exif:WinXP-Keywords" },
{ 0x9C9f, "exif:WinXP-Subject" },
{ 0xa000, "exif:FlashPixVersion" },
{ 0xa001, "exif:ColorSpace" },
{ 0xa002, "exif:PixelXDimension" },
{ 0xa003, "exif:PixelYDimension" },
{ 0xa004, "exif:RelatedSoundFile" },
{ 0xa005, "exif:InteroperabilityOffset" },
{ 0xa20b, "exif:FlashEnergy" },
{ 0xa20c, "exif:SpatialFrequencyResponse" },
{ 0xa20d, "exif:Noise" },
{ 0xa20e, "exif:FocalPlaneXResolution" },
{ 0xa20f, "exif:FocalPlaneYResolution" },
{ 0xa210, "exif:FocalPlaneResolutionUnit" },
{ 0xa214, "exif:SubjectLocation" },
{ 0xa215, "exif:ExposureIndex" },
{ 0xa216, "exif:TIFF/EPStandardID" },
{ 0xa217, "exif:SensingMethod" },
{ 0xa300, "exif:FileSource" },
{ 0xa301, "exif:SceneType" },
{ 0xa302, "exif:CFAPattern" },
{ 0xa401, "exif:CustomRendered" },
{ 0xa402, "exif:ExposureMode" },
{ 0xa403, "exif:WhiteBalance" },
{ 0xa404, "exif:DigitalZoomRatio" },
{ 0xa405, "exif:FocalLengthIn35mmFilm" },
{ 0xa406, "exif:SceneCaptureType" },
{ 0xa407, "exif:GainControl" },
{ 0xa408, "exif:Contrast" },
{ 0xa409, "exif:Saturation" },
{ 0xa40a, "exif:Sharpness" },
{ 0xa40b, "exif:DeviceSettingDescription" },
{ 0xa40c, "exif:SubjectDistanceRange" },
{ 0xa420, "exif:ImageUniqueID" },
{ 0xa430, "exif:CameraOwnerName" },
{ 0xa431, "exif:BodySerialNumber" },
{ 0xa432, "exif:LensSpecification" },
{ 0xa433, "exif:LensMake" },
{ 0xa434, "exif:LensModel" },
{ 0xa435, "exif:LensSerialNumber" },
{ 0xc4a5, "exif:PrintImageMatching" },
{ 0xa500, "exif:Gamma" },
{ 0xc640, "exif:CR2Slice" },
{ 0x10000, "exif:GPSVersionID" },
{ 0x10001, "exif:GPSLatitudeRef" },
{ 0x10002, "exif:GPSLatitude" },
{ 0x10003, "exif:GPSLongitudeRef" },
{ 0x10004, "exif:GPSLongitude" },
{ 0x10005, "exif:GPSAltitudeRef" },
{ 0x10006, "exif:GPSAltitude" },
{ 0x10007, "exif:GPSTimeStamp" },
{ 0x10008, "exif:GPSSatellites" },
{ 0x10009, "exif:GPSStatus" },
{ 0x1000a, "exif:GPSMeasureMode" },
{ 0x1000b, "exif:GPSDop" },
{ 0x1000c, "exif:GPSSpeedRef" },
{ 0x1000d, "exif:GPSSpeed" },
{ 0x1000e, "exif:GPSTrackRef" },
{ 0x1000f, "exif:GPSTrack" },
{ 0x10010, "exif:GPSImgDirectionRef" },
{ 0x10011, "exif:GPSImgDirection" },
{ 0x10012, "exif:GPSMapDatum" },
{ 0x10013, "exif:GPSDestLatitudeRef" },
{ 0x10014, "exif:GPSDestLatitude" },
{ 0x10015, "exif:GPSDestLongitudeRef" },
{ 0x10016, "exif:GPSDestLongitude" },
{ 0x10017, "exif:GPSDestBearingRef" },
{ 0x10018, "exif:GPSDestBearing" },
{ 0x10019, "exif:GPSDestDistanceRef" },
{ 0x1001a, "exif:GPSDestDistance" },
{ 0x1001b, "exif:GPSProcessingMethod" },
{ 0x1001c, "exif:GPSAreaInformation" },
{ 0x1001d, "exif:GPSDateStamp" },
{ 0x1001e, "exif:GPSDifferential" },
{ 0x1001f, "exif:GPSHPositioningError" },
{ 0x00000, "" }
}; /* http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf */
const StringInfo
*profile;
const unsigned char
*directory,
*exif;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
MagickBooleanType
status;
ssize_t
i;
size_t
entry,
length,
number_entries,
tag,
tag_value;
SplayTreeInfo
*exif_resources;
ssize_t
all,
id,
level,
offset,
tag_offset;
static int
tag_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
/*
If EXIF data exists, then try to parse the request for a tag.
*/
profile=GetImageProfile(image,"exif");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if ((property == (const char *) NULL) || (*property == '\0'))
return(MagickFalse);
while (isspace((int) ((unsigned char) *property)) != 0)
property++;
if (strlen(property) <= 5)
return(MagickFalse);
all=0;
tag=(~0UL);
switch (*(property+5))
{
case '*':
{
/*
Caller has asked for all the tags in the EXIF data.
*/
tag=0;
all=1; /* return the data in description=value format */
break;
}
case '!':
{
tag=0;
all=2; /* return the data in tagid=value format */
break;
}
case '#':
case '@':
{
int
c;
size_t
n;
/*
Check for a hex based tag specification first.
*/
tag=(*(property+5) == '@') ? 1UL : 0UL;
property+=6;
n=strlen(property);
if (n != 4)
return(MagickFalse);
/*
Parse tag specification as a hex number.
*/
n/=4;
do
{
for (i=(ssize_t) n-1L; i >= 0; i--)
{
c=(*property++);
tag<<=4;
if ((c >= '0') && (c <= '9'))
tag|=(c-'0');
else
if ((c >= 'A') && (c <= 'F'))
tag|=(c-('A'-10));
else
if ((c >= 'a') && (c <= 'f'))
tag|=(c-('a'-10));
else
return(MagickFalse);
}
} while (*property != '\0');
break;
}
default:
{
/*
Try to match the text with a tag name instead.
*/
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (LocaleCompare(EXIFTag[i].description,property) == 0)
{
tag=(size_t) EXIFTag[i].tag;
break;
}
}
break;
}
}
if (tag == (~0UL))
return(MagickFalse);
length=GetStringInfoLength(profile);
if (length < 6)
return(MagickFalse);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadPropertyByte(&exif,&length) != 0x45)
continue;
if (ReadPropertyByte(&exif,&length) != 0x78)
continue;
if (ReadPropertyByte(&exif,&length) != 0x69)
continue;
if (ReadPropertyByte(&exif,&length) != 0x66)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadPropertySignedShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadPropertyUnsignedShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadPropertySignedLong(endian,exif+4);
if ((offset < 0) || (size_t) offset >= length)
return(MagickFalse);
/*
Set the pointer to the first IFD and follow it were it leads.
*/
status=MagickFalse;
directory=exif+offset;
level=0;
entry=0;
tag_offset=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
/*
If there is anything on the stack then pop it off.
*/
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
tag_offset=directory_stack[level].offset;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=(size_t) ReadPropertyUnsignedShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
unsigned char
*p,
*q;
size_t
format;
ssize_t
number_bytes,
components;
q=(unsigned char *) (directory+(12*entry)+2);
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(size_t) ReadPropertyUnsignedShort(endian,q)+tag_offset;
format=(size_t) ReadPropertyUnsignedShort(endian,q+2);
if (format >= (sizeof(tag_bytes)/sizeof(*tag_bytes)))
break;
if (format == 0)
break; /* corrupt EXIF */
components=(ssize_t) ReadPropertySignedLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*tag_bytes[format];
if (number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
dir_offset;
/*
The directory entry contains an offset.
*/
dir_offset=(ssize_t) ReadPropertySignedLong(endian,q+8);
if ((dir_offset < 0) || (size_t) dir_offset >= length)
continue;
if (((size_t) dir_offset+number_bytes) < (size_t) dir_offset)
continue; /* prevent overflow */
if (((size_t) dir_offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+dir_offset);
}
if ((all != 0) || (tag == (size_t) tag_value))
{
char
buffer[MaxTextExtent],
*value;
if ((p < exif) || (p > (exif+length-tag_bytes[format])))
break;
value=(char *) NULL;
*buffer='\0';
switch (format)
{
case EXIF_FMT_BYTE:
case EXIF_FMT_UNDEFINED:
{
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if (isprint((int) p[i]) != 0)
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
case EXIF_FMT_SBYTE:
{
EXIFMultipleValues(1,"%.20g",(double) (*(signed char *) p1));
break;
}
case EXIF_FMT_SSHORT:
{
EXIFMultipleValues(2,"%hd",ReadPropertySignedShort(endian,p1));
break;
}
case EXIF_FMT_USHORT:
{
EXIFMultipleValues(2,"%hu",ReadPropertyUnsignedShort(endian,p1));
break;
}
case EXIF_FMT_ULONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertyUnsignedLong(endian,p1));
break;
}
case EXIF_FMT_SLONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertySignedLong(endian,p1));
break;
}
case EXIF_FMT_URATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertyUnsignedLong(endian,p1),(double)
ReadPropertyUnsignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SRATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertySignedLong(endian,p1),(double)
ReadPropertySignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SINGLE:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertySignedLong(endian,p1));
break;
}
case EXIF_FMT_DOUBLE:
{
EXIFMultipleValues(8,"%.20g",(double)
ReadPropertySignedLong(endian,p1));
break;
}
case EXIF_FMT_STRING:
default:
{
if ((p < exif) || (p > (exif+length-number_bytes)))
break;
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
ssize_t
i;
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if ((isprint((int) p[i]) != 0) || (p[i] == '\0'))
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
}
if (value != (char *) NULL)
{
char
*key;
const char
*p;
key=AcquireString(property);
switch (all)
{
case 1:
{
const char
*description;
ssize_t
i;
description="unknown";
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (EXIFTag[i].tag == tag_value)
{
description=EXIFTag[i].description;
break;
}
}
(void) FormatLocaleString(key,MaxTextExtent,"%s",
description);
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
break;
}
case 2:
{
if (tag_value < 0x10000)
(void) FormatLocaleString(key,MaxTextExtent,"#%04lx",
(unsigned long) tag_value);
else
if (tag_value < 0x20000)
(void) FormatLocaleString(key,MaxTextExtent,"@%04lx",
(unsigned long) (tag_value & 0xffff));
else
(void) FormatLocaleString(key,MaxTextExtent,"unknown");
break;
}
default:
{
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
}
}
p=(const char *) NULL;
if (image->properties != (void *) NULL)
p=(const char *) GetValueFromSplayTree((SplayTreeInfo *)
image->properties,key);
if (p == (const char *) NULL)
(void) SetImageProperty((Image *) image,key,value);
value=DestroyString(value);
key=DestroyString(key);
status=MagickTrue;
}
}
if ((tag_value == TAG_EXIF_OFFSET) ||
(tag_value == TAG_INTEROP_OFFSET) || (tag_value == TAG_GPS_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ReadPropertySignedLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
ssize_t
tag_offset1;
tag_offset1=(ssize_t) ((tag_value == TAG_GPS_OFFSET) ? 0x10000 :
0);
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
directory_stack[level].offset=tag_offset;
level++;
/*
Check for duplicate tag.
*/
for (i=0; i < level; i++)
if (directory_stack[i].directory == (exif+tag_offset1))
break;
if (i < level)
break; /* duplicate tag */
directory_stack[level].directory=exif+offset;
directory_stack[level].offset=tag_offset1;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)+4) > (exif+length))
break;
offset=(ssize_t) ReadPropertySignedLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
directory_stack[level].offset=tag_offset1;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(status);
}
|
75422468811560646183620950160304672170
|
property.c
|
320426917520707901134127411021604962567
|
CWE-704
|
CVE-2022-32547
|
In ImageMagick, there is load of misaligned address for type 'double', which requires 8 byte alignment and for type 'float', which requires 4 byte alignment at MagickCore/property.c. Whenever crafted or untrusted input is processed by ImageMagick, this causes a negative impact to application availability or other problems related to undefined behavior.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-32547
|
194,989
|
ImageMagick6
|
450949ed017f009b399c937cf362f0058eacc5fa
|
https://github.com/ImageMagick/ImageMagick6
|
https://github.com/ImageMagick/ImageMagick6/commit/450949ed017f009b399c937cf362f0058eacc5fa
|
Pull request: https://github.com/ImageMagick/ImageMagick/pull/4963
| 1
|
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
|
50584299779312396054491404176852470969
|
psd.c
|
159316916509494023086155162326374999236
|
CWE-190
|
CVE-2022-32545
|
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned char' at coders/psd.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-32545
|
218,785
|
ImageMagick6
|
450949ed017f009b399c937cf362f0058eacc5fa
|
https://github.com/ImageMagick/ImageMagick6
|
https://github.com/ImageMagick/ImageMagick6/commit/450949ed017f009b399c937cf362f0058eacc5fa
|
Pull request: https://github.com/ImageMagick/ImageMagick/pull/4963
| 0
|
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,
(((unsigned char) ((ssize_t) pixel)) & (0x01 << (7-bit))) != 0 ? 0 :
QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
|
177518249272594340059836567736761123364
|
psd.c
|
226732625250511916284298083592366716300
|
CWE-190
|
CVE-2022-32545
|
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned char' at coders/psd.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-32545
|
194,994
|
tensorflow
|
c79ccba517dbb1a0ccb9b01ee3bd2a63748b60dd
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/c79ccba517dbb1a0ccb9b01ee3bd2a63748b60dd
|
Fix memory leak when a graph node is invalid.
If a graph node is invalid but a kernel is created then we set the kernel back to `nullptr` but we forget to delete it. Hence, we get a memory leak.
PiperOrigin-RevId: 408968108
Change-Id: I1d8a9d0d8988ed5e08be8b9f2004ce1b4cd11b7c
| 1
|
Status ImmutableExecutorState::Initialize(const Graph& graph) {
TF_RETURN_IF_ERROR(gview_.Initialize(&graph));
// Build the information about frames in this subgraph.
ControlFlowInfo cf_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(&graph, &cf_info));
for (auto& it : cf_info.unique_frame_names) {
EnsureFrameInfo(it)->nodes =
absl::make_unique<std::vector<const NodeItem*>>();
}
root_frame_info_ = frame_info_[""].get();
pending_ids_.resize(gview_.num_nodes());
// Preprocess every node in the graph to create an instance of op
// kernel for each node.
requires_control_flow_ = false;
for (const Node* n : graph.nodes()) {
if (IsSink(n)) continue;
if (IsSwitch(n) || IsMerge(n) || IsEnter(n) || IsExit(n)) {
requires_control_flow_ = true;
} else if (IsRecv(n)) {
// A Recv node from a different device may produce dead tensors from
// non-local control-flow nodes.
//
// TODO(mrry): Track whether control flow was present in the
// pre-partitioned graph, and enable the caller (e.g.
// `DirectSession`) to relax this constraint.
string send_device;
string recv_device;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "send_device", &send_device));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "recv_device", &recv_device));
if (send_device != recv_device) {
requires_control_flow_ = true;
}
}
const int id = n->id();
const string& frame_name = cf_info.frame_names[id];
FrameInfo* frame_info = EnsureFrameInfo(frame_name);
NodeItem* item = gview_.node(id);
item->node_id = id;
item->input_start = frame_info->total_inputs;
frame_info->total_inputs += n->num_inputs();
Status s = params_.create_kernel(n->properties(), &item->kernel);
if (!s.ok()) {
item->kernel = nullptr;
s = AttachDef(s, *n);
return s;
}
CHECK(item->kernel);
item->kernel_is_async = (item->kernel->AsAsync() != nullptr);
item->is_merge = IsMerge(n);
item->is_any_consumer_merge_or_control_trigger = false;
for (const Node* consumer : n->out_nodes()) {
if (IsMerge(consumer) || IsControlTrigger(consumer)) {
item->is_any_consumer_merge_or_control_trigger = true;
break;
}
}
const Tensor* const_tensor = item->kernel->const_tensor();
if (const_tensor) {
// Hold onto a shallow copy of the constant tensor in `*this` so that the
// reference count does not drop to 1. This prevents the constant tensor
// from being forwarded, and its buffer reused.
const_tensors_.emplace_back(*const_tensor);
}
item->const_tensor = const_tensor;
item->is_noop = (item->kernel->type_string_view() == "NoOp");
item->is_enter = IsEnter(n);
if (item->is_enter) {
bool is_constant_enter;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "is_constant", &is_constant_enter));
item->is_constant_enter = is_constant_enter;
string frame_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &frame_name));
FrameInfo* frame_info = frame_info_[frame_name].get();
int parallel_iterations;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "parallel_iterations", ¶llel_iterations));
if (frame_info->parallel_iterations == -1) {
frame_info->parallel_iterations = parallel_iterations;
} else if (frame_info->parallel_iterations != parallel_iterations) {
LOG(WARNING) << "Loop frame \"" << frame_name
<< "\" had two different values for parallel_iterations: "
<< frame_info->parallel_iterations << " vs. "
<< parallel_iterations << ".";
}
if (enter_frame_info_.size() <= id) {
enter_frame_info_.resize(id + 1);
}
enter_frame_info_[id] = frame_info;
} else {
item->is_constant_enter = false;
}
item->is_exit = IsExit(n);
item->is_control_trigger = IsControlTrigger(n);
item->is_source = IsSource(n);
item->is_enter_exit_or_next_iter =
(IsEnter(n) || IsExit(n) || IsNextIteration(n));
item->is_transfer_node = IsTransferNode(n);
item->is_initialization_op = IsInitializationOp(n);
item->is_recv_or_switch = IsRecv(n) || IsSwitch(n);
item->is_next_iteration = IsNextIteration(n);
item->is_distributed_communication = IsDistributedCommunication(n);
// Compute the maximum values we'll store for this node in the
// pending counts data structure, and allocate a handle in
// that frame's pending counts data structure that has enough
// space to store these maximal count values.
size_t max_pending, max_dead;
GetMaxPendingCounts(n, &max_pending, &max_dead);
pending_ids_[id] =
frame_info->pending_counts_layout.CreateHandle(max_pending, max_dead);
// See if this node is a root node, and if so, add item to root_nodes_.
if (n->in_edges().empty()) {
root_nodes_.push_back(item);
}
// Initialize static information about the frames in the graph.
frame_info->nodes->push_back(item);
if (item->is_enter) {
string enter_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &enter_name));
EnsureFrameInfo(enter_name)->input_count++;
}
// Record information about whether each output of the op is used.
std::unique_ptr<bool[]> outputs_required(new bool[n->num_outputs()]);
std::fill(&outputs_required[0], &outputs_required[n->num_outputs()], false);
int32_t unused_outputs = n->num_outputs();
for (const Edge* e : n->out_edges()) {
if (IsSink(e->dst())) continue;
if (e->src_output() >= 0) {
if (!outputs_required[e->src_output()]) {
--unused_outputs;
outputs_required[e->src_output()] = true;
}
}
}
if (unused_outputs > 0) {
for (int i = 0; i < n->num_outputs(); ++i) {
if (!outputs_required[i]) {
metrics::RecordUnusedOutput(n->type_string());
}
}
item->outputs_required = std::move(outputs_required);
}
}
// Rewrite each `EdgeInfo::input_slot` member to refer directly to the input
// location.
for (const Node* n : graph.nodes()) {
if (IsSink(n)) continue;
const int id = n->id();
NodeItem* item = gview_.node(id);
for (EdgeInfo& e : item->mutable_output_edges()) {
const int dst_id = e.dst_id;
NodeItem* dst_item = gview_.node(dst_id);
e.input_slot += dst_item->input_start;
}
}
// Initialize PendingCounts only after pending_ids_[node.id] is initialized
// for all nodes.
InitializePending(&graph, cf_info);
return gview_.SetAllocAttrs(&graph, params_.device);
}
|
105248557138287586060572648585871722551
|
immutable_executor_state.cc
|
234046012522402227954780787024760975669
|
CWE-401
|
CVE-2022-23578
|
Tensorflow is an Open Source Machine Learning Framework. If a graph node is invalid, TensorFlow can leak memory in the implementation of `ImmutableExecutorState::Initialize`. Here, we set `item->kernel` to `nullptr` but it is a simple `OpKernel*` pointer so the memory that was previously allocated to it would leak. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23578
|
218,852
|
tensorflow
|
c79ccba517dbb1a0ccb9b01ee3bd2a63748b60dd
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/c79ccba517dbb1a0ccb9b01ee3bd2a63748b60dd
|
Fix memory leak when a graph node is invalid.
If a graph node is invalid but a kernel is created then we set the kernel back to `nullptr` but we forget to delete it. Hence, we get a memory leak.
PiperOrigin-RevId: 408968108
Change-Id: I1d8a9d0d8988ed5e08be8b9f2004ce1b4cd11b7c
| 0
|
Status ImmutableExecutorState::Initialize(const Graph& graph) {
TF_RETURN_IF_ERROR(gview_.Initialize(&graph));
// Build the information about frames in this subgraph.
ControlFlowInfo cf_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(&graph, &cf_info));
for (auto& it : cf_info.unique_frame_names) {
EnsureFrameInfo(it)->nodes =
absl::make_unique<std::vector<const NodeItem*>>();
}
root_frame_info_ = frame_info_[""].get();
pending_ids_.resize(gview_.num_nodes());
// Preprocess every node in the graph to create an instance of op
// kernel for each node.
requires_control_flow_ = false;
for (const Node* n : graph.nodes()) {
if (IsSink(n)) continue;
if (IsSwitch(n) || IsMerge(n) || IsEnter(n) || IsExit(n)) {
requires_control_flow_ = true;
} else if (IsRecv(n)) {
// A Recv node from a different device may produce dead tensors from
// non-local control-flow nodes.
//
// TODO(mrry): Track whether control flow was present in the
// pre-partitioned graph, and enable the caller (e.g.
// `DirectSession`) to relax this constraint.
string send_device;
string recv_device;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "send_device", &send_device));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "recv_device", &recv_device));
if (send_device != recv_device) {
requires_control_flow_ = true;
}
}
const int id = n->id();
const string& frame_name = cf_info.frame_names[id];
FrameInfo* frame_info = EnsureFrameInfo(frame_name);
NodeItem* item = gview_.node(id);
item->node_id = id;
item->input_start = frame_info->total_inputs;
frame_info->total_inputs += n->num_inputs();
Status s = params_.create_kernel(n->properties(), &item->kernel);
if (!s.ok()) {
params_.delete_kernel(item->kernel);
item->kernel = nullptr;
s = AttachDef(s, *n);
return s;
}
CHECK(item->kernel);
item->kernel_is_async = (item->kernel->AsAsync() != nullptr);
item->is_merge = IsMerge(n);
item->is_any_consumer_merge_or_control_trigger = false;
for (const Node* consumer : n->out_nodes()) {
if (IsMerge(consumer) || IsControlTrigger(consumer)) {
item->is_any_consumer_merge_or_control_trigger = true;
break;
}
}
const Tensor* const_tensor = item->kernel->const_tensor();
if (const_tensor) {
// Hold onto a shallow copy of the constant tensor in `*this` so that the
// reference count does not drop to 1. This prevents the constant tensor
// from being forwarded, and its buffer reused.
const_tensors_.emplace_back(*const_tensor);
}
item->const_tensor = const_tensor;
item->is_noop = (item->kernel->type_string_view() == "NoOp");
item->is_enter = IsEnter(n);
if (item->is_enter) {
bool is_constant_enter;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "is_constant", &is_constant_enter));
item->is_constant_enter = is_constant_enter;
string frame_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &frame_name));
FrameInfo* frame_info = frame_info_[frame_name].get();
int parallel_iterations;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "parallel_iterations", ¶llel_iterations));
if (frame_info->parallel_iterations == -1) {
frame_info->parallel_iterations = parallel_iterations;
} else if (frame_info->parallel_iterations != parallel_iterations) {
LOG(WARNING) << "Loop frame \"" << frame_name
<< "\" had two different values for parallel_iterations: "
<< frame_info->parallel_iterations << " vs. "
<< parallel_iterations << ".";
}
if (enter_frame_info_.size() <= id) {
enter_frame_info_.resize(id + 1);
}
enter_frame_info_[id] = frame_info;
} else {
item->is_constant_enter = false;
}
item->is_exit = IsExit(n);
item->is_control_trigger = IsControlTrigger(n);
item->is_source = IsSource(n);
item->is_enter_exit_or_next_iter =
(IsEnter(n) || IsExit(n) || IsNextIteration(n));
item->is_transfer_node = IsTransferNode(n);
item->is_initialization_op = IsInitializationOp(n);
item->is_recv_or_switch = IsRecv(n) || IsSwitch(n);
item->is_next_iteration = IsNextIteration(n);
item->is_distributed_communication = IsDistributedCommunication(n);
// Compute the maximum values we'll store for this node in the
// pending counts data structure, and allocate a handle in
// that frame's pending counts data structure that has enough
// space to store these maximal count values.
size_t max_pending, max_dead;
GetMaxPendingCounts(n, &max_pending, &max_dead);
pending_ids_[id] =
frame_info->pending_counts_layout.CreateHandle(max_pending, max_dead);
// See if this node is a root node, and if so, add item to root_nodes_.
if (n->in_edges().empty()) {
root_nodes_.push_back(item);
}
// Initialize static information about the frames in the graph.
frame_info->nodes->push_back(item);
if (item->is_enter) {
string enter_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &enter_name));
EnsureFrameInfo(enter_name)->input_count++;
}
// Record information about whether each output of the op is used.
std::unique_ptr<bool[]> outputs_required(new bool[n->num_outputs()]);
std::fill(&outputs_required[0], &outputs_required[n->num_outputs()], false);
int32_t unused_outputs = n->num_outputs();
for (const Edge* e : n->out_edges()) {
if (IsSink(e->dst())) continue;
if (e->src_output() >= 0) {
if (!outputs_required[e->src_output()]) {
--unused_outputs;
outputs_required[e->src_output()] = true;
}
}
}
if (unused_outputs > 0) {
for (int i = 0; i < n->num_outputs(); ++i) {
if (!outputs_required[i]) {
metrics::RecordUnusedOutput(n->type_string());
}
}
item->outputs_required = std::move(outputs_required);
}
}
// Rewrite each `EdgeInfo::input_slot` member to refer directly to the input
// location.
for (const Node* n : graph.nodes()) {
if (IsSink(n)) continue;
const int id = n->id();
NodeItem* item = gview_.node(id);
for (EdgeInfo& e : item->mutable_output_edges()) {
const int dst_id = e.dst_id;
NodeItem* dst_item = gview_.node(dst_id);
e.input_slot += dst_item->input_start;
}
}
// Initialize PendingCounts only after pending_ids_[node.id] is initialized
// for all nodes.
InitializePending(&graph, cf_info);
return gview_.SetAllocAttrs(&graph, params_.device);
}
|
156764801773187472412077288460661715117
|
immutable_executor_state.cc
|
208458315060777566057381971058447382110
|
CWE-401
|
CVE-2022-23578
|
Tensorflow is an Open Source Machine Learning Framework. If a graph node is invalid, TensorFlow can leak memory in the implementation of `ImmutableExecutorState::Initialize`. Here, we set `item->kernel` to `nullptr` but it is a simple `OpKernel*` pointer so the memory that was previously allocated to it would leak. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23578
|
194,996
|
tensorflow
|
4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
|
Prevent null dereference read in `GetInitOp`.
We have a map of maps. We test that the key exists in the first map but then we don't have any validation that this also means the second map has the needed key. In the scenarios where this is not the case, we'll dereference a nullptr, if we don't have this check
PiperOrigin-RevId: 408739325
Change-Id: If9bb7ed759aba1f3b56a34913f209508dbaf65ce
| 1
|
Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def,
string* init_op_name) {
const auto& sig_def_map = meta_graph_def.signature_def();
const auto& init_op_sig_it =
meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey);
if (init_op_sig_it != sig_def_map.end()) {
*init_op_name = init_op_sig_it->second.outputs()
.find(kSavedModelInitOpSignatureKey)
->second.name();
return Status::OK();
}
const auto& collection_def_map = meta_graph_def.collection_def();
string init_op_collection_key;
if (collection_def_map.find(kSavedModelMainOpKey) !=
collection_def_map.end()) {
init_op_collection_key = kSavedModelMainOpKey;
} else {
init_op_collection_key = kSavedModelLegacyInitOpKey;
}
const auto init_op_it = collection_def_map.find(init_op_collection_key);
if (init_op_it != collection_def_map.end()) {
if (init_op_it->second.node_list().value_size() != 1) {
return errors::FailedPrecondition(
strings::StrCat("Expected exactly one main op in : ", export_dir));
}
*init_op_name = init_op_it->second.node_list().value(0);
}
return Status::OK();
}
|
90320046309155279319769139363770698236
|
loader_util.cc
|
223638670651747648145854147173893848422
|
CWE-476
|
CVE-2022-23577
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `GetInitOp` is vulnerable to a crash caused by dereferencing a null pointer. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23577
|
218,933
|
tensorflow
|
4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
|
Prevent null dereference read in `GetInitOp`.
We have a map of maps. We test that the key exists in the first map but then we don't have any validation that this also means the second map has the needed key. In the scenarios where this is not the case, we'll dereference a nullptr, if we don't have this check
PiperOrigin-RevId: 408739325
Change-Id: If9bb7ed759aba1f3b56a34913f209508dbaf65ce
| 0
|
Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def,
string* init_op_name) {
const auto& sig_def_map = meta_graph_def.signature_def();
const auto& init_op_sig_it =
meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey);
if (init_op_sig_it != sig_def_map.end()) {
const auto& sig_def_outputs = init_op_sig_it->second.outputs();
const auto& sig_def_outputs_it =
sig_def_outputs.find(kSavedModelInitOpSignatureKey);
if (sig_def_outputs_it == sig_def_outputs.end()) {
return errors::FailedPrecondition("Could not find output ",
kSavedModelInitOpSignatureKey);
}
*init_op_name = sig_def_outputs_it->second.name();
return Status::OK();
}
const auto& collection_def_map = meta_graph_def.collection_def();
string init_op_collection_key;
if (collection_def_map.find(kSavedModelMainOpKey) !=
collection_def_map.end()) {
init_op_collection_key = kSavedModelMainOpKey;
} else {
init_op_collection_key = kSavedModelLegacyInitOpKey;
}
const auto init_op_it = collection_def_map.find(init_op_collection_key);
if (init_op_it != collection_def_map.end()) {
if (init_op_it->second.node_list().value_size() != 1) {
return errors::FailedPrecondition(
strings::StrCat("Expected exactly one main op in : ", export_dir));
}
*init_op_name = init_op_it->second.node_list().value(0);
}
return Status::OK();
}
|
120370294428908534368713689048437773064
|
loader_util.cc
|
225205642200693417259460288987767726126
|
CWE-476
|
CVE-2022-23577
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `GetInitOp` is vulnerable to a crash caused by dereferencing a null pointer. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23577
|
194,998
|
tensorflow
|
240655511cd3e701155f944a972db71b6c0b1bb6
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/240655511cd3e701155f944a972db71b6c0b1bb6
|
Eliminate `CHECK`-fails from `IsSimplifiableReshape` via `MakeShape(<invalid shape>)`
PiperOrigin-RevId: 409166738
Change-Id: I7f0a3590b8acae3f3e3e2fe636e1f5ef285693cf
| 1
|
Status ConstantFolding::IsSimplifiableReshape(
const NodeDef& node, const GraphProperties& properties) const {
if (!IsReshape(node)) {
return errors::Internal("Node ", node.name(), " is not a Reshape node");
}
if (2 > node.input_size()) {
return errors::Internal("Node ", node.name(),
" must have at most 2 inputs but has ",
node.input_size());
}
const NodeDef* new_shape = node_map_->GetNode(node.input(1));
if (!IsReallyConstant(*new_shape)) {
return errors::Internal("Node ", node.name(), " has shape ",
new_shape->DebugString(),
" which is not a constant");
}
TensorVector outputs;
auto outputs_cleanup = gtl::MakeCleanup([&outputs] {
for (const auto& output : outputs) {
delete output.tensor;
}
});
Status s = EvaluateNode(*new_shape, TensorVector(), &outputs);
if (!s.ok()) {
return errors::Internal("Could not evaluate node ", node.name());
}
if (outputs.size() != 1) {
return errors::Internal("Node ", node.name(),
" must have exactly 1 output but has ",
outputs.size());
}
const std::vector<OpInfo::TensorProperties>& props =
properties.GetInputProperties(node.name());
if (props.empty()) {
return errors::Internal("Node ", node.name(), " has no properties");
}
const OpInfo::TensorProperties& prop = props[0];
if (prop.dtype() == DT_INVALID) {
return errors::Internal("Node ", node.name(), " has property ",
prop.DebugString(), " with invalid dtype");
}
const PartialTensorShape shape(prop.shape());
if (!shape.IsFullyDefined()) {
return errors::Internal("Node ", node.name(), " has property ",
prop.DebugString(), " with shape ",
shape.DebugString(), " which is not fully defined");
}
PartialTensorShape new_dims;
if (outputs[0]->dtype() == DT_INT32) {
std::vector<int32> shp;
for (int i = 0; i < outputs[0]->NumElements(); ++i) {
int32_t dim = outputs[0]->flat<int32>()(i);
shp.push_back(dim);
}
TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims));
} else {
std::vector<int64_t> shp;
for (int i = 0; i < outputs[0]->NumElements(); ++i) {
int64_t dim = outputs[0]->flat<int64_t>()(i);
shp.push_back(dim);
}
TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims));
}
if (!shape.IsCompatibleWith(new_dims)) {
return errors::Internal("Expected shape ", shape.DebugString(),
"to be compatible with ", new_dims.DebugString());
}
return Status::OK();
}
|
122664089420988233915419567191040959656
|
constant_folding.cc
|
35061507297230918846503076104140700863
|
CWE-617
|
CVE-2022-23581
|
Tensorflow is an Open Source Machine Learning Framework. The Grappler optimizer in TensorFlow can be used to cause a denial of service by altering a `SavedModel` such that `IsSimplifiableReshape` would trigger `CHECK` failures. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23581
|
219,032
|
tensorflow
|
240655511cd3e701155f944a972db71b6c0b1bb6
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/240655511cd3e701155f944a972db71b6c0b1bb6
|
Eliminate `CHECK`-fails from `IsSimplifiableReshape` via `MakeShape(<invalid shape>)`
PiperOrigin-RevId: 409166738
Change-Id: I7f0a3590b8acae3f3e3e2fe636e1f5ef285693cf
| 0
|
Status ConstantFolding::IsSimplifiableReshape(
const NodeDef& node, const GraphProperties& properties) const {
if (!IsReshape(node)) {
return errors::Internal("Node ", node.name(), " is not a Reshape node");
}
if (2 > node.input_size()) {
return errors::Internal("Node ", node.name(),
" must have at most 2 inputs but has ",
node.input_size());
}
const NodeDef* new_shape = node_map_->GetNode(node.input(1));
if (!IsReallyConstant(*new_shape)) {
return errors::Internal("Node ", node.name(), " has shape ",
new_shape->DebugString(),
" which is not a constant");
}
TensorVector outputs;
auto outputs_cleanup = gtl::MakeCleanup([&outputs] {
for (const auto& output : outputs) {
delete output.tensor;
}
});
Status s = EvaluateNode(*new_shape, TensorVector(), &outputs);
if (!s.ok()) {
return errors::Internal("Could not evaluate node ", node.name());
}
if (outputs.size() != 1) {
return errors::Internal("Node ", node.name(),
" must have exactly 1 output but has ",
outputs.size());
}
const std::vector<OpInfo::TensorProperties>& props =
properties.GetInputProperties(node.name());
if (props.empty()) {
return errors::Internal("Node ", node.name(), " has no properties");
}
const OpInfo::TensorProperties& prop = props[0];
if (prop.dtype() == DT_INVALID) {
return errors::Internal("Node ", node.name(), " has property ",
prop.DebugString(), " with invalid dtype");
}
const PartialTensorShape shape(prop.shape());
if (!shape.IsFullyDefined()) {
return errors::Internal("Node ", node.name(), " has property ",
prop.DebugString(), " with shape ",
shape.DebugString(), " which is not fully defined");
}
PartialTensorShape new_dims;
if (outputs[0]->dtype() == DT_INT32) {
std::vector<int32> shp;
for (int i = 0; i < outputs[0]->NumElements(); ++i) {
int32_t dim = outputs[0]->flat<int32>()(i);
shp.push_back(dim);
}
s = TensorShapeUtils::MakeShape(shp, &new_dims);
if (!s.ok()) return s;
} else {
std::vector<int64_t> shp;
for (int i = 0; i < outputs[0]->NumElements(); ++i) {
int64_t dim = outputs[0]->flat<int64_t>()(i);
shp.push_back(dim);
}
s = TensorShapeUtils::MakeShape(shp, &new_dims);
if (!s.ok()) return s;
}
if (!shape.IsCompatibleWith(new_dims)) {
return errors::Internal("Expected shape ", shape.DebugString(),
"to be compatible with ", new_dims.DebugString());
}
return Status::OK();
}
|
262760907526734396914090099303096262406
|
constant_folding.cc
|
271606694375277711450004865336349725435
|
CWE-617
|
CVE-2022-23581
|
Tensorflow is an Open Source Machine Learning Framework. The Grappler optimizer in TensorFlow can be used to cause a denial of service by altering a `SavedModel` such that `IsSimplifiableReshape` would trigger `CHECK` failures. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23581
|
195,017
|
gpac
|
ad18ece95fa064efc0995c4ab2c985f77fb166ec
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/ad18ece95fa064efc0995c4ab2c985f77fb166ec
|
fixed #1904
| 1
|
u32 GetHintFormat(GF_TrackBox *trak)
{
GF_HintMediaHeaderBox *hmhd = (GF_HintMediaHeaderBox *)trak->Media->information->InfoHeader;
if (hmhd->type != GF_ISOM_BOX_TYPE_HMHD)
return 0;
if (!hmhd || !hmhd->subType) {
GF_Box *a = (GF_Box *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, 0);
if (!hmhd) return a ? a->type : 0;
if (a) hmhd->subType = a->type;
return hmhd->subType;
}
return hmhd->subType;
}
|
91218268849686441388880855658517990203
|
hint_track.c
|
60176895274654779679144452624639678766
|
CWE-476
|
CVE-2021-40576
|
The binary MP4Box in Gpac 1.0.1 has a null pointer dereference vulnerability in the gf_isom_get_payt_count function in hint_track.c, which allows attackers to cause a denial of service.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40576
|
219,912
|
gpac
|
ad18ece95fa064efc0995c4ab2c985f77fb166ec
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/ad18ece95fa064efc0995c4ab2c985f77fb166ec
|
fixed #1904
| 0
|
u32 GetHintFormat(GF_TrackBox *trak)
{
GF_HintMediaHeaderBox *hmhd = (GF_HintMediaHeaderBox *)trak->Media->information->InfoHeader;
if (!hmhd || (hmhd->type != GF_ISOM_BOX_TYPE_HMHD))
return 0;
if (!hmhd || !hmhd->subType) {
GF_Box *a = (GF_Box *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, 0);
if (!hmhd) return a ? a->type : 0;
if (a) hmhd->subType = a->type;
return hmhd->subType;
}
return hmhd->subType;
}
|
240641657114030682383886931707833033482
|
hint_track.c
|
28976036322661795345788739460485147148
|
CWE-476
|
CVE-2021-40576
|
The binary MP4Box in Gpac 1.0.1 has a null pointer dereference vulnerability in the gf_isom_get_payt_count function in hint_track.c, which allows attackers to cause a denial of service.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40576
|
195,019
|
tensorflow
|
6b5adc0877de832b2a7c189532dbbbc64622eeb6
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/6b5adc0877de832b2a7c189532dbbbc64622eeb6
|
Prevent `CHECK`-fail when building reference tensor.
The tensor constructor does not allow reference dtypes, as these should not show up explicitly. However, when passed these invalid types instead of building an invalid object the constructor crashes via a `CHECK`-fail. We have a static builder that properly handles this case but is not applicable given current usage.
Instead, before calling the constructor, we can check that the dtype is not a reference type and return an error otherwise, given that the dtype is user controlled so malicious users can trigger denial of service.
PiperOrigin-RevId: 409662503
Change-Id: I5892f831fde7f276cd7ab34519cf6b8061c71a59
| 1
|
Status ConstantFolding::EvaluateOneFoldable(const NodeDef& node,
std::vector<NodeDef>* outputs,
bool* result_too_large) {
TensorVector inputs;
TensorVector output_tensors;
auto inputs_cleanup = gtl::MakeCleanup([&inputs, &output_tensors] {
for (const auto& input : inputs) {
delete input.tensor;
}
for (const auto& output : output_tensors) {
if (output.tensor) {
delete output.tensor;
}
}
});
size_t total_inputs_size = 0;
for (const auto& input : node.input()) {
const TensorId input_tensor = ParseTensorName(input);
if (input_tensor.index() < 0) {
// Control dependency
break;
}
const NodeDef* input_node = node_map_->GetNode(input);
if (!IsReallyConstant(*input_node)) {
return Status(error::INVALID_ARGUMENT,
strings::StrCat("Can't fold ", node.name(), ", its ", input,
" isn't constant"));
}
TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value"));
const TensorProto& raw_val = input_node->attr().at("value").tensor();
if (raw_val.dtype() == DT_INVALID) {
return Status(
error::INVALID_ARGUMENT,
strings::StrCat("A tensor in the input node, with TensorId of ",
input_tensor.ToString(),
" has a dtype of DT_INVALID."));
}
Tensor* value = new Tensor(raw_val.dtype(), raw_val.tensor_shape());
if (!value->FromProto(raw_val)) {
delete (value);
return errors::InvalidArgument("Unable to make Tensor from proto for ",
node.name(), " with shape ",
raw_val.tensor_shape().DebugString());
}
inputs.emplace_back(value);
total_inputs_size += value->TotalBytes();
}
TF_RETURN_IF_ERROR(EvaluateNode(node, inputs, &output_tensors));
if (output_tensors.empty()) {
return Status(error::INVALID_ARGUMENT, "Expected at least one output.");
}
outputs->resize(output_tensors.size());
for (size_t i = 0; i < output_tensors.size(); i++) {
string node_name = OptimizedNodeName(node, "-folded");
if (output_tensors.size() > 1) {
node_name = strings::StrCat(node_name, "-", i);
}
if (output_tensors[i].tensor) {
Status s = CreateNodeDef(node_name, output_tensors[i], &outputs->at(i),
total_inputs_size);
if (!s.ok()) {
*result_too_large = true;
return s;
}
} else {
// Create an empty NodeDef to identify dead outputs (e.g. the output of a
// switch that's not selected by the switch predicate).
outputs->at(i) = NodeDef();
}
}
return Status::OK();
}
|
33937240667530924395323323412961833143
|
constant_folding.cc
|
221573695858123615640237954647315751120
|
CWE-617
|
CVE-2022-23588
|
Tensorflow is an Open Source Machine Learning Framework. A malicious user can cause a denial of service by altering a `SavedModel` such that Grappler optimizer would attempt to build a tensor using a reference `dtype`. This would result in a crash due to a `CHECK`-fail in the `Tensor` constructor as reference types are not allowed. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23588
|
219,931
|
tensorflow
|
6b5adc0877de832b2a7c189532dbbbc64622eeb6
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/6b5adc0877de832b2a7c189532dbbbc64622eeb6
|
Prevent `CHECK`-fail when building reference tensor.
The tensor constructor does not allow reference dtypes, as these should not show up explicitly. However, when passed these invalid types instead of building an invalid object the constructor crashes via a `CHECK`-fail. We have a static builder that properly handles this case but is not applicable given current usage.
Instead, before calling the constructor, we can check that the dtype is not a reference type and return an error otherwise, given that the dtype is user controlled so malicious users can trigger denial of service.
PiperOrigin-RevId: 409662503
Change-Id: I5892f831fde7f276cd7ab34519cf6b8061c71a59
| 0
|
Status ConstantFolding::EvaluateOneFoldable(const NodeDef& node,
std::vector<NodeDef>* outputs,
bool* result_too_large) {
TensorVector inputs;
TensorVector output_tensors;
auto inputs_cleanup = gtl::MakeCleanup([&inputs, &output_tensors] {
for (const auto& input : inputs) {
delete input.tensor;
}
for (const auto& output : output_tensors) {
if (output.tensor) {
delete output.tensor;
}
}
});
size_t total_inputs_size = 0;
for (const auto& input : node.input()) {
const TensorId input_tensor = ParseTensorName(input);
if (input_tensor.index() < 0) {
// Control dependency
break;
}
const NodeDef* input_node = node_map_->GetNode(input);
if (!IsReallyConstant(*input_node)) {
return Status(error::INVALID_ARGUMENT,
strings::StrCat("Can't fold ", node.name(), ", its ", input,
" isn't constant"));
}
TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value"));
const TensorProto& raw_val = input_node->attr().at("value").tensor();
if (raw_val.dtype() == DT_INVALID) {
return Status(
error::INVALID_ARGUMENT,
strings::StrCat("A tensor in the input node, with TensorId of ",
input_tensor.ToString(),
" has a dtype of DT_INVALID."));
}
if (IsRefType(raw_val.dtype())) {
return errors::InvalidArgument(
"Not allowed to construct a tensor with reference dtype, got ",
DataTypeString(raw_val.dtype()));
}
Tensor* value = new Tensor(raw_val.dtype(), raw_val.tensor_shape());
if (!value->FromProto(raw_val)) {
delete (value);
return errors::InvalidArgument("Unable to make Tensor from proto for ",
node.name(), " with shape ",
raw_val.tensor_shape().DebugString());
}
inputs.emplace_back(value);
total_inputs_size += value->TotalBytes();
}
TF_RETURN_IF_ERROR(EvaluateNode(node, inputs, &output_tensors));
if (output_tensors.empty()) {
return Status(error::INVALID_ARGUMENT, "Expected at least one output.");
}
outputs->resize(output_tensors.size());
for (size_t i = 0; i < output_tensors.size(); i++) {
string node_name = OptimizedNodeName(node, "-folded");
if (output_tensors.size() > 1) {
node_name = strings::StrCat(node_name, "-", i);
}
if (output_tensors[i].tensor) {
Status s = CreateNodeDef(node_name, output_tensors[i], &outputs->at(i),
total_inputs_size);
if (!s.ok()) {
*result_too_large = true;
return s;
}
} else {
// Create an empty NodeDef to identify dead outputs (e.g. the output of a
// switch that's not selected by the switch predicate).
outputs->at(i) = NodeDef();
}
}
return Status::OK();
}
|
111779981092160670584101984885423453823
|
constant_folding.cc
|
271606694375277711450004865336349725435
|
CWE-617
|
CVE-2022-23588
|
Tensorflow is an Open Source Machine Learning Framework. A malicious user can cause a denial of service by altering a `SavedModel` such that Grappler optimizer would attempt to build a tensor using a reference `dtype`. This would result in a crash due to a `CHECK`-fail in the `Tensor` constructor as reference types are not allowed. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23588
|
195,022
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
|
https://github.com/babelouest/glewlwyd
|
https://github.com/babelouest/glewlwyd/commit/125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
|
Fix update session when auth fail
| 1
|
int callback_glewlwyd_user_auth (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_result = NULL;
const char * ip_source = get_ip_source(request);
char * issued_for = get_client_hostname(request);
char * session_uid, expires[129];
time_t now;
struct tm ts;
time(&now);
now += GLEWLWYD_DEFAULT_SESSION_EXPIRATION_COOKIE;
gmtime_r(&now, &ts);
strftime(expires, 128, "%a, %d %b %Y %T %Z", &ts);
if (j_param != NULL) {
if (json_string_length(json_object_get(j_param, "username"))) {
if (json_object_get(j_param, "scheme_type") == NULL || 0 == o_strcmp(json_string_value(json_object_get(j_param, "scheme_type")), "password")) {
if (json_string_length(json_object_get(j_param, "password"))) {
j_result = auth_check_user_credentials(config, json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "password")));
if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (1)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with password", json_string_value(json_object_get(j_param, "username")));
}
o_free(session_uid);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID_SCHEME, 1, "scheme_type", "password", NULL);
} else {
if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username %s at IP Address %s", json_string_value(json_object_get(j_param, "username")), ip_source);
}
if ((session_uid = get_session_id(config, request)) != NULL && user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (2)");
}
o_free(session_uid);
response->status = 401;
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID_SCHEME, 1, "scheme_type", "password", NULL);
}
json_decref(j_result);
} else if (json_object_get(j_param, "password") != NULL && !json_is_string(json_object_get(j_param, "password"))) {
ulfius_set_string_body_response(response, 400, "password must be a string");
} else {
session_uid = get_session_id(config, request);
j_result = get_users_for_session(config, session_uid);
if (check_result_value(j_result, G_OK)) {
// Refresh username to set as default
if (user_session_update(config, u_map_get(request->map_cookie, config->session_key), u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 0) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (3)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
}
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 401;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error get_users_for_session");
response->status = 500;
}
o_free(session_uid);
json_decref(j_result);
}
} else {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name")) && json_is_object(json_object_get(j_param, "value"))) {
j_result = auth_check_user_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_string_value(json_object_get(j_param, "username")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username %s at IP Address %s", json_string_value(json_object_get(j_param, "username")), ip_source);
response->status = 401;
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID_SCHEME, 1, "scheme_type", json_string_value(json_object_get(j_param, "scheme_type")), "scheme_name", json_string_value(json_object_get(j_param, "scheme_name")), NULL);
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_name")), 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (4)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with scheme '%s/%s'", json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
}
o_free(session_uid);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID_SCHEME, 1, "scheme_type", json_string_value(json_object_get(j_param, "scheme_type")), "scheme_name", json_string_value(json_object_get(j_param, "scheme_name")), NULL);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "scheme_type, scheme_name and value are mandatory");
}
}
} else {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name")) && json_is_object(json_object_get(j_param, "value"))) {
j_result = auth_check_identify_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username <UNKNOWN> at IP Address %s", ip_source);
response->status = 401;
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_result, "username")), json_string_value(json_object_get(j_param, "scheme_name")), 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (4)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with scheme '%s/%s'", json_string_value(json_object_get(j_result, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
}
o_free(session_uid);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "username is mandatory");
}
}
} else {
ulfius_set_string_body_response(response, 400, "Input parameters must be in JSON format");
}
json_decref(j_param);
o_free(issued_for);
return U_CALLBACK_CONTINUE;
}
|
236114269060053642565806917047085397848
|
webservice.c
|
249878395356016662912854745569339968395
|
CWE-287
|
CVE-2021-45379
|
Glewlwyd 2.0.0, fixed in 2.6.1 is affected by an incorrect access control vulnerability. One user can attempt to log in as another user without its password.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-45379
|
219,947
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
|
https://github.com/babelouest/glewlwyd
|
https://github.com/babelouest/glewlwyd/commit/125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
|
Fix update session when auth fail
| 0
|
int callback_glewlwyd_user_auth (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_result = NULL;
const char * ip_source = get_ip_source(request);
char * issued_for = get_client_hostname(request);
char * session_uid, expires[129];
time_t now;
struct tm ts;
time(&now);
now += GLEWLWYD_DEFAULT_SESSION_EXPIRATION_COOKIE;
gmtime_r(&now, &ts);
strftime(expires, 128, "%a, %d %b %Y %T %Z", &ts);
if (j_param != NULL) {
if (json_string_length(json_object_get(j_param, "username"))) {
if (json_object_get(j_param, "scheme_type") == NULL || 0 == o_strcmp(json_string_value(json_object_get(j_param, "scheme_type")), "password")) {
if (json_string_length(json_object_get(j_param, "password"))) {
j_result = auth_check_user_credentials(config, json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "password")));
if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (1)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with password", json_string_value(json_object_get(j_param, "username")));
}
o_free(session_uid);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID_SCHEME, 1, "scheme_type", "password", NULL);
} else {
if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username %s at IP Address %s", json_string_value(json_object_get(j_param, "username")), ip_source);
}
response->status = 401;
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID_SCHEME, 1, "scheme_type", "password", NULL);
}
json_decref(j_result);
} else if (json_object_get(j_param, "password") != NULL && !json_is_string(json_object_get(j_param, "password"))) {
ulfius_set_string_body_response(response, 400, "password must be a string");
} else {
session_uid = get_session_id(config, request);
j_result = get_users_for_session(config, session_uid);
if (check_result_value(j_result, G_OK)) {
// Refresh username to set as default
if (user_session_update(config, u_map_get(request->map_cookie, config->session_key), u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 0) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (3)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
}
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 401;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error get_users_for_session");
response->status = 500;
}
o_free(session_uid);
json_decref(j_result);
}
} else {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name")) && json_is_object(json_object_get(j_param, "value"))) {
j_result = auth_check_user_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_string_value(json_object_get(j_param, "username")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username %s at IP Address %s", json_string_value(json_object_get(j_param, "username")), ip_source);
response->status = 401;
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID_SCHEME, 1, "scheme_type", json_string_value(json_object_get(j_param, "scheme_type")), "scheme_name", json_string_value(json_object_get(j_param, "scheme_name")), NULL);
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_name")), 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (4)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with scheme '%s/%s'", json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
}
o_free(session_uid);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID_SCHEME, 1, "scheme_type", json_string_value(json_object_get(j_param, "scheme_type")), "scheme_name", json_string_value(json_object_get(j_param, "scheme_name")), NULL);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "scheme_type, scheme_name and value are mandatory");
}
}
} else {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name")) && json_is_object(json_object_get(j_param, "value"))) {
j_result = auth_check_identify_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username <UNKNOWN> at IP Address %s", ip_source);
response->status = 401;
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_result, "username")), json_string_value(json_object_get(j_param, "scheme_name")), 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (4)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with scheme '%s/%s'", json_string_value(json_object_get(j_result, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
}
o_free(session_uid);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "username is mandatory");
}
}
} else {
ulfius_set_string_body_response(response, 400, "Input parameters must be in JSON format");
}
json_decref(j_param);
o_free(issued_for);
return U_CALLBACK_CONTINUE;
}
|
155113792370707223407331204609439430532
|
webservice.c
|
287798817606377336444620654835011177393
|
CWE-287
|
CVE-2021-45379
|
Glewlwyd 2.0.0, fixed in 2.6.1 is affected by an incorrect access control vulnerability. One user can attempt to log in as another user without its password.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-45379
|
195,023
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/a68f68061e263a88321c104a6c911fe5598050a8
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
OP_REQUIRES(
context,
input_values->shape().dim_size(0) == input_indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
input_values->shape().dim_size(0),
" values, indices shape: ", input_indices->shape().DebugString()));
OP_REQUIRES(
context,
input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", input_shape->shape().dim_size(0),
" dimensions, indices shape: ",
input_indices->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
int new_num_elements = 1;
bool overflow_ocurred = false;
for (int i = 0; i < input_shape_vec.size(); i++) {
new_num_elements =
MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i));
if (new_num_elements < 0) {
overflow_ocurred = true;
break;
}
}
OP_REQUIRES(
context, !overflow_ocurred,
errors::Internal("Encountered overflow from large input shape."));
TensorShape tensor_input_shape(input_shape_vec);
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
|
160387063214720131730960354923232758630
|
sparse_tensors_map_ops.cc
|
224775123349374780251651202891389866533
|
CWE-190
|
CVE-2022-23568
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `AddManySparseToTensorsMap` is vulnerable to an integer overflow which results in a `CHECK`-fail when building new `TensorShape` objects (so, an assert failure based denial of service). We are missing some validation on the shapes of the input tensors as well as directly constructing a large `TensorShape` with user-provided dimensions. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23568
|
220,021
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/a68f68061e263a88321c104a6c911fe5598050a8
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
OP_REQUIRES(
context,
input_values->shape().dim_size(0) == input_indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
input_values->shape().dim_size(0),
" values, indices shape: ", input_indices->shape().DebugString()));
OP_REQUIRES(
context,
input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", input_shape->shape().dim_size(0),
" dimensions, indices shape: ",
input_indices->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
TensorShape tensor_input_shape;
OP_REQUIRES_OK(context, TensorShape::BuildTensorShape(input_shape_vec,
&tensor_input_shape));
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
|
294930600730557371611113946400120075396
|
sparse_tensors_map_ops.cc
|
5591389034837291700501932002893322459
|
CWE-190
|
CVE-2022-23568
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `AddManySparseToTensorsMap` is vulnerable to an integer overflow which results in a `CHECK`-fail when building new `TensorShape` objects (so, an assert failure based denial of service). We are missing some validation on the shapes of the input tensors as well as directly constructing a large `TensorShape` with user-provided dimensions. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23568
|
195,026
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
|
https://github.com/torvalds/linux
|
https://github.com/torvalds/linux/commit/ab0fc21bc7105b54bafd85bd8b82742f9e68898a
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
| 1
|
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
struct iattr attr;
int err;
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
*
* We only get this far for a cached positive dentry. We skipped
* revalidation, so handle it here by dropping the dentry and returning
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
dprintk("NFS: open file(%pd2)\n", dentry);
err = nfs_check_flags(openflags);
if (err)
return err;
if ((openflags & O_ACCMODE) == 3)
return nfs_open(inode, filp);
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
attr.ia_valid = ATTR_OPEN;
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
filemap_write_and_wait(inode->i_mapping);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
default:
goto out_put_ctx;
case -ENOENT:
case -ESTALE:
case -EISDIR:
case -ENOTDIR:
case -ELOOP:
goto out_drop;
}
}
if (inode != d_inode(dentry))
goto out_drop;
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
put_nfs_open_context(ctx);
out:
dput(parent);
return err;
out_drop:
d_drop(dentry);
err = -EOPENSTALE;
goto out_put_ctx;
}
|
67846125552854891508125900978071958871
|
nfs4file.c
|
109456154040292488452120321326967957719
|
CWE-909
|
CVE-2022-24448
|
An issue was discovered in fs/nfs/dir.c in the Linux kernel before 5.16.5. If an application sets the O_DIRECTORY flag, and tries to open a regular file, nfs_atomic_open() performs a regular lookup. If a regular file is found, ENOTDIR should occur, but the server instead returns uninitialized data in the file descriptor.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-24448
|
220,100
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
|
https://github.com/torvalds/linux
|
https://github.com/torvalds/linux/commit/ab0fc21bc7105b54bafd85bd8b82742f9e68898a
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
| 0
|
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
struct iattr attr;
int err;
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
*
* We only get this far for a cached positive dentry. We skipped
* revalidation, so handle it here by dropping the dentry and returning
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
dprintk("NFS: open file(%pd2)\n", dentry);
err = nfs_check_flags(openflags);
if (err)
return err;
if ((openflags & O_ACCMODE) == 3)
openflags--;
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
attr.ia_valid = ATTR_OPEN;
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
filemap_write_and_wait(inode->i_mapping);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
default:
goto out_put_ctx;
case -ENOENT:
case -ESTALE:
case -EISDIR:
case -ENOTDIR:
case -ELOOP:
goto out_drop;
}
}
if (inode != d_inode(dentry))
goto out_drop;
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
put_nfs_open_context(ctx);
out:
dput(parent);
return err;
out_drop:
d_drop(dentry);
err = -EOPENSTALE;
goto out_put_ctx;
}
|
272987829557105540879962051296017178836
|
nfs4file.c
|
19160442996144037090827134285929888626
|
CWE-909
|
CVE-2022-24448
|
An issue was discovered in fs/nfs/dir.c in the Linux kernel before 5.16.5. If an application sets the O_DIRECTORY flag, and tries to open a regular file, nfs_atomic_open() performs a regular lookup. If a regular file is found, ENOTDIR should occur, but the server instead returns uninitialized data in the file descriptor.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-24448
|
195,028
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/ab51e5b813573dc9f51efa335aebcf2994125ee9
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
| 1
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output));
} else {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output));
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
|
67814436772398534036630434647873886403
|
decode_image_op.cc
|
283519422605879710361255065504339887165
|
CWE-401
|
CVE-2022-23585
|
Tensorflow is an Open Source Machine Learning Framework. When decoding PNG images TensorFlow can produce a memory leak if the image is invalid. After calling `png::CommonInitDecode(..., &decode)`, the `decode` value contains allocated buffers which can only be freed by calling `png::CommonFreeDecode(&decode)`. However, several error case in the function implementation invoke the `OP_REQUIRES` macro which immediately terminates the execution of the function, without allowing for the memory free to occur. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23585
|
220,168
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/ab51e5b813573dc9f51efa335aebcf2994125ee9
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
| 0
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// If we reach this point, then there is data in `decode` which must be
// freed by the time we end execution in this function. We cannot call
// `png::CommonFreeDecode()` before an `OP_REQUIRES` because if
// `OP_REQUIRES` constraint is satisfied then the data would be freed
// prematurely. Instead, let's use a `Cleanup` object.
auto cleanup = gtl::MakeCleanup([&decode]() {
std::cerr << "Cleanup called...\n";
png::CommonFreeDecode(&decode);
});
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output));
} else {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output));
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
|
183944225263640230240348550837981668390
|
decode_image_op.cc
|
140340118421060830961361158847913918052
|
CWE-401
|
CVE-2022-23585
|
Tensorflow is an Open Source Machine Learning Framework. When decoding PNG images TensorFlow can produce a memory leak if the image is invalid. After calling `png::CommonInitDecode(..., &decode)`, the `decode` value contains allocated buffers which can only be freed by calling `png::CommonFreeDecode(&decode)`. However, several error case in the function implementation invoke the `OP_REQUIRES` macro which immediately terminates the execution of the function, without allowing for the memory free to occur. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23585
|
195,029
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/c99d98cd189839dcf51aee94e7437b54b31f8abd
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
| 1
|
void Node::RunForwardTypeInference() {
VLOG(4) << "Forward type inference: " << props_->node_def.DebugString();
if (props_->fwd_type_fn == nullptr) {
return;
}
std::vector<Node*> input_nodes(props_->input_types.size(), nullptr);
std::vector<int> input_idx(props_->input_types.size(), 0);
for (const auto& edge : in_edges_) {
if (edge->IsControlEdge()) {
continue;
}
DCHECK(edge->dst_input() < input_nodes.size()) << DebugString();
int i = edge->dst_input();
input_nodes.at(i) = edge->src();
input_idx.at(i) = edge->src_output();
}
// Note: technically, we could use a very generic type when some of the inputs
// are unknown. But there is an expectation that a node will have complete
// inputs soon, so updating intermediate types is largely unnecessary.
for (const auto* node : input_nodes) {
if (node == nullptr) {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (int i = 0; i < input_nodes.size(); i++) {
const auto* node = input_nodes[i];
if (node->def().has_experimental_type()) {
const auto& node_t = node->def().experimental_type();
if (node_t.type_id() != TFT_UNSET) {
int ix = input_idx[i];
DCHECK(ix < node_t.args_size())
<< "input " << i << " should have an output " << ix
<< " but instead only has " << node_t.args_size()
<< " outputs: " << node_t.DebugString();
input_types.emplace_back(node_t.args(ix));
} else {
input_types.emplace_back(*no_type);
}
} else {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
const auto infer_type = props_->fwd_type_fn(input_types);
const FullTypeDef infer_typedef = infer_type.ValueOrDie();
if (infer_typedef.type_id() != TFT_UNSET) {
MaybeCopyOnWrite();
*(props_->node_def.mutable_experimental_type()) = infer_typedef;
}
}
|
285691869172413131662679092330979772991
|
graph.cc
|
172099243927919341591512227523808328051
|
CWE-125
|
CVE-2022-23592
|
Tensorflow is an Open Source Machine Learning Framework. TensorFlow's type inference can cause a heap out of bounds read as the bounds checking is done in a `DCHECK` (which is a no-op during production). An attacker can control the `input_idx` variable such that `ix` would be larger than the number of values in `node_t.args`. The fix will be included in TensorFlow 2.8.0. This is the only affected version.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23592
|
220,201
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/c99d98cd189839dcf51aee94e7437b54b31f8abd
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
| 0
|
void Node::RunForwardTypeInference() {
VLOG(4) << "Forward type inference: " << props_->node_def.DebugString();
if (props_->fwd_type_fn == nullptr) {
return;
}
std::vector<Node*> input_nodes(props_->input_types.size(), nullptr);
std::vector<int> input_idx(props_->input_types.size(), 0);
for (const auto& edge : in_edges_) {
if (edge->IsControlEdge()) {
continue;
}
DCHECK(edge->dst_input() < input_nodes.size()) << DebugString();
int i = edge->dst_input();
input_nodes.at(i) = edge->src();
input_idx.at(i) = edge->src_output();
}
// Note: technically, we could use a very generic type when some of the inputs
// are unknown. But there is an expectation that a node will have complete
// inputs soon, so updating intermediate types is largely unnecessary.
for (const auto* node : input_nodes) {
if (node == nullptr) {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (int i = 0; i < input_nodes.size(); i++) {
const auto* node = input_nodes[i];
if (node->def().has_experimental_type()) {
const auto& node_t = node->def().experimental_type();
if (node_t.type_id() != TFT_UNSET) {
int ix = input_idx[i];
if (ix >= node_t.args_size()) {
LOG(WARNING) << name() << " has bad type information: input " << i
<< " should have an output " << ix
<< " but instead only has " << node_t.args_size()
<< " outputs: " << node_t.DebugString()
<< "\nThis indicates either "
"a bug in op registration or a corrupted graph.";
ClearTypeInfo();
return;
}
input_types.emplace_back(node_t.args(ix));
} else {
input_types.emplace_back(*no_type);
}
} else {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
const auto infer_type = props_->fwd_type_fn(input_types);
const FullTypeDef infer_typedef = infer_type.ValueOrDie();
if (infer_typedef.type_id() != TFT_UNSET) {
MaybeCopyOnWrite();
*(props_->node_def.mutable_experimental_type()) = infer_typedef;
}
}
|
208747443072046126472677622190312892089
|
graph.cc
|
252683577168046425270820661985512954953
|
CWE-125
|
CVE-2022-23592
|
Tensorflow is an Open Source Machine Learning Framework. TensorFlow's type inference can cause a heap out of bounds read as the bounds checking is done in a `DCHECK` (which is a no-op during production). An attacker can control the `input_idx` variable such that `ix` would be larger than the number of values in `node_t.args`. The fix will be included in TensorFlow 2.8.0. This is the only affected version.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23592
|
195,038
|
mruby
|
27d1e0132a0804581dca28df042e7047fd27eaa8
|
https://github.com/mruby/mruby
|
https://github.com/mruby/mruby/commit/27d1e0132a0804581dca28df042e7047fd27eaa8
|
array.c: fix `mrb_ary_shift_m` initialization bug.
The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
| 1
|
mrb_ary_shift_m(mrb_state *mrb, mrb_value self)
{
struct RArray *a = mrb_ary_ptr(self);
mrb_int len = ARY_LEN(a);
mrb_int n;
mrb_value val;
if (mrb_get_args(mrb, "|i", &n) == 0) {
return mrb_ary_shift(mrb, self);
};
ary_modify_check(mrb, a);
if (len == 0 || n == 0) return mrb_ary_new(mrb);
if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift");
if (n > len) n = len;
val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a));
if (ARY_SHARED_P(a)) {
L_SHIFT:
a->as.heap.ptr+=n;
a->as.heap.len-=n;
return val;
}
if (len > ARY_SHIFT_SHARED_MIN) {
ary_make_shared(mrb, a);
goto L_SHIFT;
}
else if (len == n) {
ARY_SET_LEN(a, 0);
}
else {
mrb_value *ptr = ARY_PTR(a);
mrb_int size = len-n;
while (size--) {
*ptr = *(ptr+n);
++ptr;
}
ARY_SET_LEN(a, len-n);
}
return val;
}
|
88987793594626442814152795226896894437
|
array.c
|
131985777969528154957566525214352537878
|
CWE-476
|
CVE-2021-4188
|
mruby is vulnerable to NULL Pointer Dereference
|
https://nvd.nist.gov/vuln/detail/CVE-2021-4188
|
220,442
|
mruby
|
27d1e0132a0804581dca28df042e7047fd27eaa8
|
https://github.com/mruby/mruby
|
https://github.com/mruby/mruby/commit/27d1e0132a0804581dca28df042e7047fd27eaa8
|
array.c: fix `mrb_ary_shift_m` initialization bug.
The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
| 0
|
mrb_ary_shift_m(mrb_state *mrb, mrb_value self)
{
mrb_int n;
if (mrb_get_args(mrb, "|i", &n) == 0) {
return mrb_ary_shift(mrb, self);
}
struct RArray *a = mrb_ary_ptr(self);
mrb_int len = ARY_LEN(a);
mrb_value val;
ary_modify_check(mrb, a);
if (len == 0 || n == 0) return mrb_ary_new(mrb);
if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift");
if (n > len) n = len;
val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a));
if (ARY_SHARED_P(a)) {
L_SHIFT:
a->as.heap.ptr+=n;
a->as.heap.len-=n;
return val;
}
if (len > ARY_SHIFT_SHARED_MIN) {
ary_make_shared(mrb, a);
goto L_SHIFT;
}
else if (len == n) {
ARY_SET_LEN(a, 0);
}
else {
mrb_value *ptr = ARY_PTR(a);
mrb_int size = len-n;
while (size--) {
*ptr = *(ptr+n);
++ptr;
}
ARY_SET_LEN(a, len-n);
}
return val;
}
|
336824346603495353101799104649854425750
|
array.c
|
295526445825727607536544634773604768998
|
CWE-476
|
CVE-2021-4188
|
mruby is vulnerable to NULL Pointer Dereference
|
https://nvd.nist.gov/vuln/detail/CVE-2021-4188
|
195,039
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e7f497570abb6b4ae5af4970620cd880e4c0c904
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
| 1
|
void operator()(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int row_stride, int col_stride,
int row_dilation, int col_dilation, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
DCHECK(data_format == FORMAT_NHWC)
<< "Grouped conv implementation only "
"supports NHWC tensor format for now.";
const int64_t in_depth = input.dim_size(3);
const int64_t patch_depth = filter.dim_size(2);
const int64_t num_groups = in_depth / patch_depth;
// Shuffle input/filter tensors to have group as a leading dimension.
std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4});
// Compute pre shuffle dimemnsions.
auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2),
num_groups, tensor.dim_size(3) / num_groups};
};
// Compute post shuffle dimemnsions.
auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {num_groups, tensor.dim_size(0), tensor.dim_size(1),
tensor.dim_size(2), tensor.dim_size(3) / num_groups};
};
auto& device = ctx->eigen_device<CPUDevice>();
absl::BlockingCounter shuffles_completed(2);
auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); };
// Shuffle input into temporary tensor.
Tensor input_shuffled(input.dtype(), TensorShape(post_shuffle(input)));
input_shuffled.tensor<T, 5>().device(device, on_shuffled) =
input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle);
// Shuffle filter into temporary tensor.
Tensor filter_shuffled(filter.dtype(), TensorShape(post_shuffle(filter)));
filter_shuffled.tensor<T, 5>().device(device, on_shuffled) =
filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle);
// Wait for the completion of input/filter shuffles.
shuffles_completed.Wait();
// Write group convolution results into temporary output tensor.
Tensor output_shuffled(output->dtype(), TensorShape(post_shuffle(*output)));
for (int64_t i = 0; i < num_groups; ++i) {
// TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor
// will lead to deadlock, SpatialConvolution has to use async Eigen
// assignment). This requires small changes to Eigen to support async
// exeuction for tensor chipping operation.
// TODO(ezhulenev): Grouped convolution should also support 1x1 filter
// optimization.
auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i);
auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i);
auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i);
if (padding == EXPLICIT) {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
static_cast<int>(explicit_paddings[2]),
static_cast<int>(explicit_paddings[3]),
static_cast<int>(explicit_paddings[4]),
static_cast<int>(explicit_paddings[5]));
} else {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
BrainPadding2EigenPadding(padding));
}
}
// Shuffle temporary output back into pre-shuffled shape.
std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4});
output->shaped<T, 5>(pre_shuffle(*output)).device(device) =
output_shuffled.tensor<T, 5>().shuffle(rev_shuffle);
}
|
257618220779157714024325768166416151732
|
conv_ops.cc
|
252300068611383622428481854806618645318
|
CWE-354
|
CVE-2021-41206
|
TensorFlow is an open source platform for machine learning. In affected versions several TensorFlow operations are missing validation for the shapes of the tensor arguments involved in the call. Depending on the API, this can result in undefined behavior and segfault or `CHECK`-fail related crashes but in some scenarios writes and reads from heap populated arrays are also possible. We have discovered these issues internally via tooling while working on improving/testing GPU op determinism. As such, we don't have reproducers and there will be multiple fixes for these issues. These fixes will be included in TensorFlow 2.7.0. We will also cherrypick these commits on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-41206
|
220,449
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e7f497570abb6b4ae5af4970620cd880e4c0c904
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
| 0
|
void operator()(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int row_stride, int col_stride,
int row_dilation, int col_dilation, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
DCHECK(data_format == FORMAT_NHWC)
<< "Grouped conv implementation only "
"supports NHWC tensor format for now.";
const int64_t in_depth = input.dim_size(3);
const int64_t patch_depth = filter.dim_size(2);
const int64_t num_groups = in_depth / patch_depth;
// Shuffle input/filter tensors to have group as a leading dimension.
std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4});
// Compute pre shuffle dimemnsions.
auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2),
num_groups, tensor.dim_size(3) / num_groups};
};
// Compute post shuffle dimemnsions.
auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {num_groups, tensor.dim_size(0), tensor.dim_size(1),
tensor.dim_size(2), tensor.dim_size(3) / num_groups};
};
auto& device = ctx->eigen_device<CPUDevice>();
absl::BlockingCounter shuffles_completed(2);
auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); };
// Shuffle input into temporary tensor.
Tensor input_shuffled;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(input.dtype(), TensorShape(post_shuffle(input)),
&input_shuffled));
input_shuffled.tensor<T, 5>().device(device, on_shuffled) =
input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle);
// Shuffle filter into temporary tensor.
Tensor filter_shuffled;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(filter.dtype(),
TensorShape(post_shuffle(filter)),
&filter_shuffled));
filter_shuffled.tensor<T, 5>().device(device, on_shuffled) =
filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle);
// Wait for the completion of input/filter shuffles.
shuffles_completed.Wait();
// Write group convolution results into temporary output tensor.
Tensor output_shuffled;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(output->dtype(),
TensorShape(post_shuffle(*output)),
&output_shuffled));
for (int64_t i = 0; i < num_groups; ++i) {
// TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor
// will lead to deadlock, SpatialConvolution has to use async Eigen
// assignment). This requires small changes to Eigen to support async
// exeuction for tensor chipping operation.
// TODO(ezhulenev): Grouped convolution should also support 1x1 filter
// optimization.
auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i);
auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i);
auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i);
if (padding == EXPLICIT) {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
static_cast<int>(explicit_paddings[2]),
static_cast<int>(explicit_paddings[3]),
static_cast<int>(explicit_paddings[4]),
static_cast<int>(explicit_paddings[5]));
} else {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
BrainPadding2EigenPadding(padding));
}
}
// Shuffle temporary output back into pre-shuffled shape.
std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4});
output->shaped<T, 5>(pre_shuffle(*output)).device(device) =
output_shuffled.tensor<T, 5>().shuffle(rev_shuffle);
}
|
52476148530312265483336987277784785500
|
conv_ops.cc
|
162425470101834995272420301327894414264
|
CWE-354
|
CVE-2021-41206
|
TensorFlow is an open source platform for machine learning. In affected versions several TensorFlow operations are missing validation for the shapes of the tensor arguments involved in the call. Depending on the API, this can result in undefined behavior and segfault or `CHECK`-fail related crashes but in some scenarios writes and reads from heap populated arrays are also possible. We have discovered these issues internally via tooling while working on improving/testing GPU op determinism. As such, we don't have reproducers and there will be multiple fixes for these issues. These fixes will be included in TensorFlow 2.7.0. We will also cherrypick these commits on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-41206
|
195,040
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e21af685e1828f7ca65038307df5cc06de4479e8
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
| 1
|
Status BuildXlaCompilationCache(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
XlaCompilationCache** cache) {
if (platform_info.xla_device_metadata()) {
*cache = new XlaCompilationCache(
platform_info.xla_device_metadata()->client(),
platform_info.xla_device_metadata()->jit_device_type());
return Status::OK();
}
auto platform =
se::MultiPlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.ValueOrDie());
if (!compiler_for_platform.ok()) {
// In some rare cases (usually in unit tests with very small clusters) we
// may end up transforming an XLA cluster with at least one GPU operation
// (which would normally force the cluster to be compiled using XLA:GPU)
// into an XLA cluster with no GPU operations (i.e. containing only CPU
// operations). Such a cluster can fail compilation (in way that
// MarkForCompilation could not have detected) if the CPU JIT is not linked
// in.
//
// So bail out of _XlaCompile in this case, and let the executor handle the
// situation for us.
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.ValueOrDie()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.ValueOrDie());
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(absl::optional<std::set<int>> gpu_ids,
ParseVisibleDeviceList(allowed_gpus));
client_options.set_allowed_devices(gpu_ids);
auto client = xla::ClientLibrary::GetOrCreateLocalClient(client_options);
if (!client.ok()) {
return client.status();
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(platform_info.device_type().type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_info.device_type().type());
}
*cache = new XlaCompilationCache(
client.ValueOrDie(), DeviceType(registration->compilation_device_name));
return Status::OK();
}
|
179065639871904945359341382009364285020
|
xla_platform_info.cc
|
171804916137745205288117058026592469555
|
CWE-476
|
CVE-2022-23595
|
Tensorflow is an Open Source Machine Learning Framework. When building an XLA compilation cache, if default settings are used, TensorFlow triggers a null pointer dereference. In the default scenario, all devices are allowed, so `flr->config_proto` is `nullptr`. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23595
|
220,463
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e21af685e1828f7ca65038307df5cc06de4479e8
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
| 0
|
Status BuildXlaCompilationCache(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
XlaCompilationCache** cache) {
if (platform_info.xla_device_metadata()) {
*cache = new XlaCompilationCache(
platform_info.xla_device_metadata()->client(),
platform_info.xla_device_metadata()->jit_device_type());
return Status::OK();
}
auto platform =
se::MultiPlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.ValueOrDie());
if (!compiler_for_platform.ok()) {
// In some rare cases (usually in unit tests with very small clusters) we
// may end up transforming an XLA cluster with at least one GPU operation
// (which would normally force the cluster to be compiled using XLA:GPU)
// into an XLA cluster with no GPU operations (i.e. containing only CPU
// operations). Such a cluster can fail compilation (in way that
// MarkForCompilation could not have detected) if the CPU JIT is not linked
// in.
//
// So bail out of _XlaCompile in this case, and let the executor handle the
// situation for us.
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.ValueOrDie()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.ValueOrDie());
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
if (flr->config_proto()) {
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(absl::optional<std::set<int>> gpu_ids,
ParseVisibleDeviceList(allowed_gpus));
client_options.set_allowed_devices(gpu_ids);
}
auto client = xla::ClientLibrary::GetOrCreateLocalClient(client_options);
if (!client.ok()) {
return client.status();
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(platform_info.device_type().type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_info.device_type().type());
}
*cache = new XlaCompilationCache(
client.ValueOrDie(), DeviceType(registration->compilation_device_name));
return Status::OK();
}
|
150487232572114145456611052017035566512
|
xla_platform_info.cc
|
318276067980065095571736754899104138947
|
CWE-476
|
CVE-2022-23595
|
Tensorflow is an Open Source Machine Learning Framework. When building an XLA compilation cache, if default settings are used, TensorFlow triggers a null pointer dereference. In the default scenario, all devices are allowed, so `flr->config_proto` is `nullptr`. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23595
|
195,055
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/2b7100d6cdff36aa21010a82269bc05a6d1cc74a
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& indices = context->input(0);
const Tensor& values = context->input(1);
const Tensor& shape = context->input(2);
const Tensor& weights = context->input(3);
bool use_weights = weights.NumElements() > 0;
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()),
errors::InvalidArgument(
"Input indices must be a 2-dimensional tensor. Got: ",
indices.shape().DebugString()));
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == values.shape(),
errors::InvalidArgument(
"Weights and values must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; values shape: ", values.shape().DebugString()));
}
OP_REQUIRES(context, shape.NumElements() != 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
bool is_1d = shape.NumElements() == 1;
auto shape_vector = shape.flat<int64_t>();
int num_batches = is_1d ? 1 : shape_vector(0);
int num_values = values.NumElements();
for (int b = 0; b < shape_vector.size(); b++) {
OP_REQUIRES(context, shape_vector(b) >= 0,
errors::InvalidArgument(
"Elements in dense_shape must be >= 0. Instead got:",
shape.DebugString()));
}
OP_REQUIRES(context, num_values == indices.shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices.",
"Got ", num_values,
" values, indices shape: ", indices.shape().DebugString()));
const auto indices_values = indices.matrix<int64_t>();
const auto values_values = values.flat<T>();
const auto weight_values = weights.flat<W>();
auto per_batch_counts = BatchedMap<W>(num_batches);
T max_value = 0;
OP_REQUIRES(context, num_values <= indices.shape().dim_size(0),
errors::InvalidArgument(
"The first dimension of indices must be equal to or "
"greather than number of values. ( ",
indices.shape().dim_size(0), " vs. ", num_values, " )"));
OP_REQUIRES(context, indices.shape().dim_size(1) > 0,
errors::InvalidArgument("The second dimension of indices must "
"be greater than 0. Received: ",
indices.shape().dim_size(1)));
for (int idx = 0; idx < num_values; ++idx) {
int batch = is_1d ? 0 : indices_values(idx, 0);
if (batch >= num_batches) {
OP_REQUIRES(context, batch < num_batches,
errors::InvalidArgument(
"Indices value along the first dimension must be ",
"lower than the first index of the shape.", "Got ",
batch, " as batch and ", num_batches,
" as the first dimension of the shape."));
}
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[batch][value] = 1;
} else if (use_weights) {
per_batch_counts[batch][value] += weight_values(idx);
} else {
per_batch_counts[batch][value]++;
}
if (value > max_value) {
max_value = value;
}
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
|
115744370413617881150207979427400512016
|
count_ops.cc
|
290832582717285970119064032382621433475
|
CWE-787
|
CVE-2022-21740
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `SparseCountSparseOutput` is vulnerable to a heap overflow. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21740
|
220,804
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/2b7100d6cdff36aa21010a82269bc05a6d1cc74a
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor& splits = context->input(0);
const Tensor& values = context->input(1);
const Tensor& weights = context->input(2);
bool use_weights = weights.NumElements() > 0;
bool is_1d = false;
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == values.shape(),
errors::InvalidArgument(
"Weights and values must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; values shape: ", values.shape().DebugString()));
}
const auto splits_values = splits.flat<int64_t>();
const auto values_values = values.flat<T>();
const auto weight_values = weights.flat<W>();
int num_batches = splits.NumElements() - 1;
int num_values = values.NumElements();
OP_REQUIRES(
context, num_batches > 0,
errors::InvalidArgument(
"Must provide at least 2 elements for the splits argument"));
OP_REQUIRES(context, splits_values(0) == 0,
errors::InvalidArgument("Splits must start with 0, not with ",
splits_values(0)));
OP_REQUIRES(context, splits_values(num_batches) == num_values,
errors::InvalidArgument(
"Splits must end with the number of values, got ",
splits_values(num_batches), " instead of ", num_values));
auto per_batch_counts = BatchedMap<W>(num_batches);
T max_value = 0;
int batch_idx = 0;
for (int idx = 0; idx < num_values; ++idx) {
while (idx >= splits_values(batch_idx)) {
batch_idx++;
}
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[batch_idx - 1][value] = 1;
} else if (use_weights) {
per_batch_counts[batch_idx - 1][value] += weight_values(idx);
} else {
per_batch_counts[batch_idx - 1][value]++;
}
if (value > max_value) {
max_value = value;
}
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
|
321329284400462468105618833406255634390
|
count_ops.cc
|
221778566959720819887290009238961995785
|
CWE-787
|
CVE-2022-21740
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `SparseCountSparseOutput` is vulnerable to a heap overflow. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21740
|
195,056
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/8c6f391a2282684a25cbfec7687bd5d35261a209
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
| 1
|
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
const float* bias_data, int array_size,
float* array_data) {
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
// this with the Eigen one-liner:
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
// This turned out to severely regress performance: +4ms (i.e. 8%) on
// MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
TFLITE_DCHECK_EQ((array_size % bias_size), 0);
#ifdef USE_NEON
float* array_ptr = array_data;
float* array_end_ptr = array_ptr + array_size;
const auto clamp_min_vec = vdupq_n_f32(clamp_min);
const auto clamp_max_vec = vdupq_n_f32(clamp_max);
for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
int i = 0;
for (; i <= bias_size - 16; i += 16) {
auto b0 = vld1q_f32(bias_data + i);
auto b1 = vld1q_f32(bias_data + i + 4);
auto b2 = vld1q_f32(bias_data + i + 8);
auto b3 = vld1q_f32(bias_data + i + 12);
auto a0 = vld1q_f32(array_ptr + i);
auto a1 = vld1q_f32(array_ptr + i + 4);
auto a2 = vld1q_f32(array_ptr + i + 8);
auto a3 = vld1q_f32(array_ptr + i + 12);
auto x0 = vaddq_f32(a0, b0);
auto x1 = vaddq_f32(a1, b1);
auto x2 = vaddq_f32(a2, b2);
auto x3 = vaddq_f32(a3, b3);
x0 = vmaxq_f32(clamp_min_vec, x0);
x1 = vmaxq_f32(clamp_min_vec, x1);
x2 = vmaxq_f32(clamp_min_vec, x2);
x3 = vmaxq_f32(clamp_min_vec, x3);
x0 = vminq_f32(clamp_max_vec, x0);
x1 = vminq_f32(clamp_max_vec, x1);
x2 = vminq_f32(clamp_max_vec, x2);
x3 = vminq_f32(clamp_max_vec, x3);
vst1q_f32(array_ptr + i, x0);
vst1q_f32(array_ptr + i + 4, x1);
vst1q_f32(array_ptr + i + 8, x2);
vst1q_f32(array_ptr + i + 12, x3);
}
for (; i <= bias_size - 4; i += 4) {
auto b = vld1q_f32(bias_data + i);
auto a = vld1q_f32(array_ptr + i);
auto x = vaddq_f32(a, b);
x = vmaxq_f32(clamp_min_vec, x);
x = vminq_f32(clamp_max_vec, x);
vst1q_f32(array_ptr + i, x);
}
for (; i < bias_size; i++) {
array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
clamp_min, clamp_max);
}
}
#else // not NEON
for (int array_offset = 0; array_offset < array_size;
array_offset += bias_size) {
for (int i = 0; i < bias_size; i++) {
array_data[array_offset + i] = ActivationFunctionWithMinMax(
array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
}
}
#endif
}
|
154263320578941255259441922880599149557
|
common.h
|
11373796702176609664888229687660280569
|
CWE-369
|
CVE-2022-23557
|
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would trigger a division by zero in `BiasAndClamp` implementation. There is no check that the `bias_size` is non zero. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23557
|
220,841
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/8c6f391a2282684a25cbfec7687bd5d35261a209
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
| 0
|
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
const float* bias_data, int array_size,
float* array_data) {
if (bias_size == 0) return;
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
// this with the Eigen one-liner:
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
// This turned out to severely regress performance: +4ms (i.e. 8%) on
// MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
TFLITE_DCHECK_EQ((array_size % bias_size), 0);
#ifdef USE_NEON
float* array_ptr = array_data;
float* array_end_ptr = array_ptr + array_size;
const auto clamp_min_vec = vdupq_n_f32(clamp_min);
const auto clamp_max_vec = vdupq_n_f32(clamp_max);
for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
int i = 0;
for (; i <= bias_size - 16; i += 16) {
auto b0 = vld1q_f32(bias_data + i);
auto b1 = vld1q_f32(bias_data + i + 4);
auto b2 = vld1q_f32(bias_data + i + 8);
auto b3 = vld1q_f32(bias_data + i + 12);
auto a0 = vld1q_f32(array_ptr + i);
auto a1 = vld1q_f32(array_ptr + i + 4);
auto a2 = vld1q_f32(array_ptr + i + 8);
auto a3 = vld1q_f32(array_ptr + i + 12);
auto x0 = vaddq_f32(a0, b0);
auto x1 = vaddq_f32(a1, b1);
auto x2 = vaddq_f32(a2, b2);
auto x3 = vaddq_f32(a3, b3);
x0 = vmaxq_f32(clamp_min_vec, x0);
x1 = vmaxq_f32(clamp_min_vec, x1);
x2 = vmaxq_f32(clamp_min_vec, x2);
x3 = vmaxq_f32(clamp_min_vec, x3);
x0 = vminq_f32(clamp_max_vec, x0);
x1 = vminq_f32(clamp_max_vec, x1);
x2 = vminq_f32(clamp_max_vec, x2);
x3 = vminq_f32(clamp_max_vec, x3);
vst1q_f32(array_ptr + i, x0);
vst1q_f32(array_ptr + i + 4, x1);
vst1q_f32(array_ptr + i + 8, x2);
vst1q_f32(array_ptr + i + 12, x3);
}
for (; i <= bias_size - 4; i += 4) {
auto b = vld1q_f32(bias_data + i);
auto a = vld1q_f32(array_ptr + i);
auto x = vaddq_f32(a, b);
x = vmaxq_f32(clamp_min_vec, x);
x = vminq_f32(clamp_max_vec, x);
vst1q_f32(array_ptr + i, x);
}
for (; i < bias_size; i++) {
array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
clamp_min, clamp_max);
}
}
#else // not NEON
for (int array_offset = 0; array_offset < array_size;
array_offset += bias_size) {
for (int i = 0; i < bias_size; i++) {
array_data[array_offset + i] = ActivationFunctionWithMinMax(
array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
}
}
#endif
}
|
163406073569204971648641083480315438791
|
common.h
|
206010119069068373550820723284960883967
|
CWE-369
|
CVE-2022-23557
|
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would trigger a division by zero in `BiasAndClamp` implementation. There is no check that the `bias_size` is non zero. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23557
|
195,059
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/92dba16749fae36c246bec3f9ba474d9ddeb7662
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
| 1
|
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
|
61147310262209694276783937154772465535
|
dependency_optimizer.cc
|
98916752340112642333125918775752240620
|
CWE-617
|
CVE-2022-23579
|
Tensorflow is an Open Source Machine Learning Framework. The Grappler optimizer in TensorFlow can be used to cause a denial of service by altering a `SavedModel` such that `SafeToRemoveIdentity` would trigger `CHECK` failures. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23579
|
220,909
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/92dba16749fae36c246bec3f9ba474d9ddeb7662
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
| 0
|
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
if (input == nullptr) {
VLOG(1) << "node = " << node.name() << " input = " << node.input(0);
return false;
}
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
|
223351651745564626611627672045490996261
|
None
|
CWE-617
|
CVE-2022-23579
|
Tensorflow is an Open Source Machine Learning Framework. The Grappler optimizer in TensorFlow can be used to cause a denial of service by altering a `SavedModel` such that `SafeToRemoveIdentity` would trigger `CHECK` failures. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23579
|
|
195,063
|
gpac
|
5f2c2a16d30229b6241f02fa28e3d6b810d64858
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/5f2c2a16d30229b6241f02fa28e3d6b810d64858
|
fixed #1905
| 1
|
GF_Err mpgviddmx_process(GF_Filter *filter)
{
GF_MPGVidDmxCtx *ctx = gf_filter_get_udta(filter);
GF_FilterPacket *pck, *dst_pck;
u64 byte_offset;
s64 vosh_start = -1;
s64 vosh_end = -1;
GF_Err e;
char *data;
u8 *start;
u32 pck_size;
s32 remain;
//always reparse duration
if (!ctx->duration.num)
mpgviddmx_check_dur(filter, ctx);
pck = gf_filter_pid_get_packet(ctx->ipid);
if (!pck) {
if (gf_filter_pid_is_eos(ctx->ipid)) {
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_TRUE);
if (ctx->opid)
gf_filter_pid_set_eos(ctx->opid);
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = NULL;
return GF_EOS;
}
return GF_OK;
}
data = (char *) gf_filter_pck_get_data(pck, &pck_size);
byte_offset = gf_filter_pck_get_byte_offset(pck);
start = data;
remain = pck_size;
//input pid sets some timescale - we flushed pending data , update cts
if (!ctx->resume_from && ctx->timescale) {
u64 ts = gf_filter_pck_get_cts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->cts || !ctx->recompute_cts)
ctx->cts = ts;
}
ts = gf_filter_pck_get_dts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->dts || !ctx->recompute_cts)
ctx->dts = ts;
if (!ctx->prev_dts) ctx->prev_dts = ts;
else if (ctx->prev_dts != ts) {
u64 diff = ts;
diff -= ctx->prev_dts;
if (!ctx->cur_fps.den) ctx->cur_fps.den = (u32) diff;
else if (ctx->cur_fps.den > diff)
ctx->cur_fps.den = (u32) diff;
}
}
gf_filter_pck_get_framing(pck, &ctx->input_is_au_start, &ctx->input_is_au_end);
//this will force CTS recomput of each frame
if (ctx->recompute_cts) ctx->input_is_au_start = GF_FALSE;
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = pck;
gf_filter_pck_ref_props(&ctx->src_pck);
}
//we stored some data to find the complete vosh, aggregate this packet with current one
if (!ctx->resume_from && ctx->hdr_store_size) {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size) {
ctx->hdr_store_alloc = ctx->hdr_store_size + pck_size;
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data, sizeof(char)*pck_size);
if (byte_offset != GF_FILTER_NO_BO) {
if (byte_offset >= ctx->hdr_store_size)
byte_offset -= ctx->hdr_store_size;
else
byte_offset = GF_FILTER_NO_BO;
}
ctx->hdr_store_size += pck_size;
start = data = ctx->hdr_store;
remain = pck_size = ctx->hdr_store_size;
}
if (ctx->resume_from) {
if (gf_filter_pid_would_block(ctx->opid))
return GF_OK;
//resume from data copied internally
if (ctx->hdr_store_size) {
assert(ctx->resume_from <= ctx->hdr_store_size);
start = data = ctx->hdr_store + ctx->resume_from;
remain = pck_size = ctx->hdr_store_size - ctx->resume_from;
} else {
assert(remain >= (s32) ctx->resume_from);
start += ctx->resume_from;
remain -= ctx->resume_from;
}
ctx->resume_from = 0;
}
if (!ctx->bs) {
ctx->bs = gf_bs_new(start, remain, GF_BITSTREAM_READ);
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
}
if (!ctx->vparser) {
ctx->vparser = gf_m4v_parser_bs_new(ctx->bs, ctx->is_mpg12);
}
while (remain) {
Bool full_frame;
u8 *pck_data;
s32 current;
u8 sc_type, forced_sc_type=0;
Bool sc_type_forced = GF_FALSE;
Bool skip_pck = GF_FALSE;
u8 ftype;
u32 tinc;
u64 size=0;
u64 fstart;
Bool is_coded;
u32 bytes_from_store = 0;
u32 hdr_offset = 0;
Bool copy_last_bytes = GF_FALSE;
//not enough bytes to parse start code
if (remain<5) {
memcpy(ctx->hdr_store, start, remain);
ctx->bytes_in_header = remain;
break;
}
current = -1;
//we have some potential bytes of a start code in the store, copy some more bytes and check if valid start code.
//if not, dispatch these bytes as continuation of the data
if (ctx->bytes_in_header) {
memcpy(ctx->hdr_store + ctx->bytes_in_header, start, 8 - ctx->bytes_in_header);
current = mpgviddmx_next_start_code(ctx->hdr_store, 8);
//no start code in stored buffer
if ((current<0) || (current >= (s32) ctx->bytes_in_header) ) {
if (ctx->opid) {
dst_pck = gf_filter_pck_new_alloc(ctx->opid, ctx->bytes_in_header, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
memcpy(pck_data, ctx->hdr_store, ctx->bytes_in_header);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - ctx->bytes_in_header);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
if (current<0) current = -1;
else current -= ctx->bytes_in_header;
ctx->bytes_in_header = 0;
} else {
//we have a valid start code, check which byte in our store or in the packet payload is the start code type
//and remember its location to reinit the parser from there
hdr_offset = 4 - ctx->bytes_in_header + current;
//bytes still to dispatch
bytes_from_store = ctx->bytes_in_header;
ctx->bytes_in_header = 0;
if (!hdr_offset) {
forced_sc_type = ctx->hdr_store[current+3];
} else {
forced_sc_type = start[hdr_offset-1];
}
sc_type_forced = GF_TRUE;
}
}
//no starcode in store, look for startcode in packet
if (current == -1) {
//locate next start code
current = mpgviddmx_next_start_code(start, remain);
//no start code, dispatch the block
if (current<0) {
u8 b3, b2, b1;
if (! ctx->frame_started) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[MPGVid] no start code in block and no frame started, discarding data\n" ));
break;
}
size = remain;
b3 = start[remain-3];
b2 = start[remain-2];
b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
memcpy(pck_data, start, (size_t) size);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
}
assert(current>=0);
//if we are in the middle of parsing the vosh, skip over bytes remaining from previous obj not parsed
if ((vosh_start>=0) && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//also skip if no output pid
if (!ctx->opid && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//dispatch remaining bytes
if (current>0) {
//flush remaining
dst_pck = gf_filter_pck_new_alloc(ctx->opid, current, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_TRUE);
//bytes were partly in store, partly in packet
if (bytes_from_store) {
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
assert(bytes_from_store>=(u32) current);
bytes_from_store -= current;
memcpy(pck_data, ctx->hdr_store, current);
} else {
//bytes were only in packet
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
memcpy(pck_data, start, current);
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
gf_filter_pck_set_carousel_version(dst_pck, 1);
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
//parse headers
//we have a start code loaded, eg the data packet does not have a full start code at the beginning
if (sc_type_forced) {
gf_bs_reassign_buffer(ctx->bs, start + hdr_offset, remain - hdr_offset);
sc_type = forced_sc_type;
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
gf_bs_read_int(ctx->bs, 24);
sc_type = gf_bs_read_int(ctx->bs, 8);
}
if (ctx->is_mpg12) {
switch (sc_type) {
case M2V_SEQ_START_CODE:
case M2V_EXT_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
mpgviddmx_check_pid(filter, ctx, 0, NULL);
}
break;
case M2V_PIC_START_CODE:
break;
default:
break;
}
} else {
u8 PL;
switch (sc_type) {
case M4V_VOS_START_CODE:
ctx->dsi.VideoPL = (u8) gf_bs_read_u8(ctx->bs);
vosh_start = start - (u8 *)data;
skip_pck = GF_TRUE;
assert(remain>=5);
start += 5;
remain -= 5;
break;
case M4V_VOL_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
PL = ctx->dsi.VideoPL;
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
ctx->dsi.VideoPL = PL;
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - (u32) vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
u32 obj_size = (u32) gf_m4v_get_object_start(ctx->vparser);
if (vosh_start<0) vosh_start = 0;
vosh_end = start - (u8 *)data + obj_size;
vosh_end -= vosh_start;
mpgviddmx_check_pid(filter, ctx,(u32) vosh_end, data+vosh_start);
skip_pck = GF_TRUE;
assert(remain>=(s32) obj_size);
start += obj_size;
remain -= obj_size;
}
break;
case M4V_VOP_START_CODE:
case M4V_GOV_START_CODE:
break;
case M4V_VO_START_CODE:
case M4V_VISOBJ_START_CODE:
default:
if (vosh_start>=0) {
skip_pck = GF_TRUE;
assert(remain>=4);
start += 4;
remain -= 4;
}
break;
}
}
if (skip_pck) {
continue;
}
if (!ctx->opid) {
assert(remain>=4);
start += 4;
remain -= 4;
continue;
}
if (!ctx->is_playing) {
ctx->resume_from = (u32) ((char *)start - (char *)data);
return GF_OK;
}
//at this point, we no longer reaggregate packets
ctx->hdr_store_size = 0;
if (ctx->in_seek) {
u64 nb_frames_at_seek = (u64) (ctx->start_range * ctx->cur_fps.num);
if (ctx->cts + ctx->cur_fps.den >= nb_frames_at_seek) {
//u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek;
ctx->in_seek = GF_FALSE;
}
}
//may happen that after all our checks, only 4 bytes are left, continue to store these 4 bytes
if (remain<5)
continue;
//good to go
gf_m4v_parser_reset(ctx->vparser, sc_type_forced ? forced_sc_type + 1 : 0);
size = 0;
e = gf_m4v_parse_frame(ctx->vparser, &ctx->dsi, &ftype, &tinc, &size, &fstart, &is_coded);
//true if we strip VO and VISOBJ assert(!fstart);
//we skipped bytes already in store + end of start code present in packet, so the size of the first object
//needs adjustement
if (bytes_from_store) {
size += bytes_from_store + hdr_offset;
}
if ((e == GF_EOS) && !ctx->input_is_au_end) {
u8 b3 = start[remain-3];
u8 b2 = start[remain-2];
u8 b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
full_frame = GF_FALSE;
} else {
full_frame = GF_TRUE;
}
if (!is_coded) {
/*if prev is B and we're parsing a packed bitstream discard n-vop*/
if (ctx->forced_packed && ctx->b_frames) {
ctx->is_packed = GF_TRUE;
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to import at variable frame rate, skip*/
if (ctx->vfr) {
ctx->is_vfr = GF_TRUE;
mpgviddmx_update_time(ctx);
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to keep non coded frame (constant frame rate), add*/
}
if (ftype==2) {
//count number of B-frames since last ref
ctx->b_frames++;
ctx->nb_b++;
} else {
//flush all pending packets
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_FALSE);
//remeber the CTS of the last ref
ctx->last_ref_cts = ctx->cts;
if (ctx->max_b < ctx->b_frames) ctx->max_b = ctx->b_frames;
ctx->b_frames = 0;
if (ftype)
ctx->nb_p++;
else
ctx->nb_i++;
}
ctx->nb_frames++;
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
//bytes come from both our store and the data packet
if (bytes_from_store) {
memcpy(pck_data, ctx->hdr_store+current, bytes_from_store);
assert(size >= bytes_from_store);
size -= bytes_from_store;
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
memcpy(pck_data + bytes_from_store, start, (size_t) size);
} else {
//bytes only come the data packet
memcpy(pck_data, start, (size_t) size);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset + start - (u8 *) data);
}
}
assert(pck_data[0] == 0);
assert(pck_data[1] == 0);
assert(pck_data[2] == 0x01);
gf_filter_pck_set_framing(dst_pck, GF_TRUE, (full_frame || ctx->input_is_au_end) ? GF_TRUE : GF_FALSE);
gf_filter_pck_set_cts(dst_pck, ctx->cts);
gf_filter_pck_set_dts(dst_pck, ctx->dts);
if (ctx->input_is_au_start) {
ctx->input_is_au_start = GF_FALSE;
} else {
//we use the carousel flag temporarly to indicate the cts must be recomputed
gf_filter_pck_set_carousel_version(dst_pck, 1);
}
gf_filter_pck_set_sap(dst_pck, ftype ? GF_FILTER_SAP_NONE : GF_FILTER_SAP_1);
gf_filter_pck_set_duration(dst_pck, ctx->cur_fps.den);
if (ctx->in_seek) gf_filter_pck_set_seek_flag(dst_pck, GF_TRUE);
ctx->frame_started = GF_TRUE;
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
mpgviddmx_update_time(ctx);
if (!full_frame) {
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
assert(remain>=size);
start += size;
remain -= (s32) size;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
}
|
49630978088913986571244550780083545600
|
reframe_mpgvid.c
|
148306570807841160156662867455353144265
|
CWE-476
|
CVE-2021-40575
|
The binary MP4Box in Gpac 1.0.1 has a null pointer dereference vulnerability in the mpgviddmx_process function in reframe_mpgvid.c, which allows attackers to cause a denial of service. This vulnerability is possibly due to an incomplete fix for CVE-2021-40566.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40575
|
220,924
|
gpac
|
5f2c2a16d30229b6241f02fa28e3d6b810d64858
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/5f2c2a16d30229b6241f02fa28e3d6b810d64858
|
fixed #1905
| 0
|
GF_Err mpgviddmx_process(GF_Filter *filter)
{
GF_MPGVidDmxCtx *ctx = gf_filter_get_udta(filter);
GF_FilterPacket *pck, *dst_pck;
u64 byte_offset;
s64 vosh_start = -1;
s64 vosh_end = -1;
GF_Err e;
char *data;
u8 *start;
u32 pck_size;
s32 remain;
//always reparse duration
if (!ctx->duration.num)
mpgviddmx_check_dur(filter, ctx);
pck = gf_filter_pid_get_packet(ctx->ipid);
if (!pck) {
if (gf_filter_pid_is_eos(ctx->ipid)) {
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_TRUE);
if (ctx->opid)
gf_filter_pid_set_eos(ctx->opid);
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = NULL;
return GF_EOS;
}
return GF_OK;
}
data = (char *) gf_filter_pck_get_data(pck, &pck_size);
byte_offset = gf_filter_pck_get_byte_offset(pck);
start = data;
remain = pck_size;
//input pid sets some timescale - we flushed pending data , update cts
if (!ctx->resume_from && ctx->timescale) {
u64 ts = gf_filter_pck_get_cts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->cts || !ctx->recompute_cts)
ctx->cts = ts;
}
ts = gf_filter_pck_get_dts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->dts || !ctx->recompute_cts)
ctx->dts = ts;
if (!ctx->prev_dts) ctx->prev_dts = ts;
else if (ctx->prev_dts != ts) {
u64 diff = ts;
diff -= ctx->prev_dts;
if (!ctx->cur_fps.den) ctx->cur_fps.den = (u32) diff;
else if (ctx->cur_fps.den > diff)
ctx->cur_fps.den = (u32) diff;
}
}
gf_filter_pck_get_framing(pck, &ctx->input_is_au_start, &ctx->input_is_au_end);
//this will force CTS recomput of each frame
if (ctx->recompute_cts) ctx->input_is_au_start = GF_FALSE;
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = pck;
gf_filter_pck_ref_props(&ctx->src_pck);
}
//we stored some data to find the complete vosh, aggregate this packet with current one
if (!ctx->resume_from && ctx->hdr_store_size) {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size) {
ctx->hdr_store_alloc = ctx->hdr_store_size + pck_size;
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data, sizeof(char)*pck_size);
if (byte_offset != GF_FILTER_NO_BO) {
if (byte_offset >= ctx->hdr_store_size)
byte_offset -= ctx->hdr_store_size;
else
byte_offset = GF_FILTER_NO_BO;
}
ctx->hdr_store_size += pck_size;
start = data = ctx->hdr_store;
remain = pck_size = ctx->hdr_store_size;
}
if (ctx->resume_from) {
if (gf_filter_pid_would_block(ctx->opid))
return GF_OK;
//resume from data copied internally
if (ctx->hdr_store_size) {
assert(ctx->resume_from <= ctx->hdr_store_size);
start = data = ctx->hdr_store + ctx->resume_from;
remain = pck_size = ctx->hdr_store_size - ctx->resume_from;
} else {
assert(remain >= (s32) ctx->resume_from);
start += ctx->resume_from;
remain -= ctx->resume_from;
}
ctx->resume_from = 0;
}
if (!ctx->bs) {
ctx->bs = gf_bs_new(start, remain, GF_BITSTREAM_READ);
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
}
if (!ctx->vparser) {
ctx->vparser = gf_m4v_parser_bs_new(ctx->bs, ctx->is_mpg12);
}
while (remain) {
Bool full_frame;
u8 *pck_data;
s32 current;
u8 sc_type, forced_sc_type=0;
Bool sc_type_forced = GF_FALSE;
Bool skip_pck = GF_FALSE;
u8 ftype;
u32 tinc;
u64 size=0;
u64 fstart;
Bool is_coded;
u32 bytes_from_store = 0;
u32 hdr_offset = 0;
Bool copy_last_bytes = GF_FALSE;
//not enough bytes to parse start code
if (remain<5) {
memcpy(ctx->hdr_store, start, remain);
ctx->bytes_in_header = remain;
break;
}
current = -1;
//we have some potential bytes of a start code in the store, copy some more bytes and check if valid start code.
//if not, dispatch these bytes as continuation of the data
if (ctx->bytes_in_header) {
memcpy(ctx->hdr_store + ctx->bytes_in_header, start, 8 - ctx->bytes_in_header);
current = mpgviddmx_next_start_code(ctx->hdr_store, 8);
//no start code in stored buffer
if ((current<0) || (current >= (s32) ctx->bytes_in_header) ) {
if (ctx->opid) {
dst_pck = gf_filter_pck_new_alloc(ctx->opid, ctx->bytes_in_header, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
memcpy(pck_data, ctx->hdr_store, ctx->bytes_in_header);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - ctx->bytes_in_header);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
if (current<0) current = -1;
else current -= ctx->bytes_in_header;
ctx->bytes_in_header = 0;
} else {
//we have a valid start code, check which byte in our store or in the packet payload is the start code type
//and remember its location to reinit the parser from there
hdr_offset = 4 - ctx->bytes_in_header + current;
//bytes still to dispatch
bytes_from_store = ctx->bytes_in_header;
ctx->bytes_in_header = 0;
if (!hdr_offset) {
forced_sc_type = ctx->hdr_store[current+3];
} else {
forced_sc_type = start[hdr_offset-1];
}
sc_type_forced = GF_TRUE;
}
}
//no starcode in store, look for startcode in packet
if (current == -1) {
//locate next start code
current = mpgviddmx_next_start_code(start, remain);
//no start code, dispatch the block
if (current<0) {
u8 b3, b2, b1;
if (! ctx->frame_started) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[MPGVid] no start code in block and no frame started, discarding data\n" ));
break;
}
size = remain;
b3 = start[remain-3];
b2 = start[remain-2];
b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
memcpy(pck_data, start, (size_t) size);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
}
assert(current>=0);
//if we are in the middle of parsing the vosh, skip over bytes remaining from previous obj not parsed
if ((vosh_start>=0) && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//also skip if no output pid
if (!ctx->opid && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//dispatch remaining bytes
if (current>0) {
//flush remaining
dst_pck = gf_filter_pck_new_alloc(ctx->opid, current, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_TRUE);
//bytes were partly in store, partly in packet
if (bytes_from_store) {
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
assert(bytes_from_store>=(u32) current);
bytes_from_store -= current;
memcpy(pck_data, ctx->hdr_store, current);
} else {
//bytes were only in packet
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
memcpy(pck_data, start, current);
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
gf_filter_pck_set_carousel_version(dst_pck, 1);
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
//not enough bytes to parse start code
if (remain<5) {
memcpy(ctx->hdr_store, start, remain);
ctx->bytes_in_header = remain;
break;
}
//parse headers
//we have a start code loaded, eg the data packet does not have a full start code at the beginning
if (sc_type_forced) {
gf_bs_reassign_buffer(ctx->bs, start + hdr_offset, remain - hdr_offset);
sc_type = forced_sc_type;
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
gf_bs_read_int(ctx->bs, 24);
sc_type = gf_bs_read_int(ctx->bs, 8);
}
if (ctx->is_mpg12) {
switch (sc_type) {
case M2V_SEQ_START_CODE:
case M2V_EXT_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
mpgviddmx_check_pid(filter, ctx, 0, NULL);
}
break;
case M2V_PIC_START_CODE:
break;
default:
break;
}
} else {
u8 PL;
switch (sc_type) {
case M4V_VOS_START_CODE:
ctx->dsi.VideoPL = (u8) gf_bs_read_u8(ctx->bs);
vosh_start = start - (u8 *)data;
skip_pck = GF_TRUE;
assert(remain>=5);
start += 5;
remain -= 5;
break;
case M4V_VOL_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
PL = ctx->dsi.VideoPL;
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
ctx->dsi.VideoPL = PL;
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - (u32) vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
u32 obj_size = (u32) gf_m4v_get_object_start(ctx->vparser);
if (vosh_start<0) vosh_start = 0;
vosh_end = start - (u8 *)data + obj_size;
vosh_end -= vosh_start;
mpgviddmx_check_pid(filter, ctx,(u32) vosh_end, data+vosh_start);
skip_pck = GF_TRUE;
assert(remain>=(s32) obj_size);
start += obj_size;
remain -= obj_size;
}
break;
case M4V_VOP_START_CODE:
case M4V_GOV_START_CODE:
break;
case M4V_VO_START_CODE:
case M4V_VISOBJ_START_CODE:
default:
if (vosh_start>=0) {
skip_pck = GF_TRUE;
assert(remain>=4);
start += 4;
remain -= 4;
}
break;
}
}
if (skip_pck) {
continue;
}
if (!ctx->opid) {
assert(remain>=4);
start += 4;
remain -= 4;
continue;
}
if (!ctx->is_playing) {
ctx->resume_from = (u32) ((char *)start - (char *)data);
return GF_OK;
}
//at this point, we no longer reaggregate packets
ctx->hdr_store_size = 0;
if (ctx->in_seek) {
u64 nb_frames_at_seek = (u64) (ctx->start_range * ctx->cur_fps.num);
if (ctx->cts + ctx->cur_fps.den >= nb_frames_at_seek) {
//u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek;
ctx->in_seek = GF_FALSE;
}
}
//may happen that after all our checks, only 4 bytes are left, continue to store these 4 bytes
if (remain<5)
continue;
//good to go
gf_m4v_parser_reset(ctx->vparser, sc_type_forced ? forced_sc_type + 1 : 0);
size = 0;
e = gf_m4v_parse_frame(ctx->vparser, &ctx->dsi, &ftype, &tinc, &size, &fstart, &is_coded);
//true if we strip VO and VISOBJ assert(!fstart);
//we skipped bytes already in store + end of start code present in packet, so the size of the first object
//needs adjustement
if (bytes_from_store) {
size += bytes_from_store + hdr_offset;
}
if ((e == GF_EOS) && !ctx->input_is_au_end) {
u8 b3 = start[remain-3];
u8 b2 = start[remain-2];
u8 b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
full_frame = GF_FALSE;
} else {
full_frame = GF_TRUE;
}
if (!is_coded) {
/*if prev is B and we're parsing a packed bitstream discard n-vop*/
if (ctx->forced_packed && ctx->b_frames) {
ctx->is_packed = GF_TRUE;
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to import at variable frame rate, skip*/
if (ctx->vfr) {
ctx->is_vfr = GF_TRUE;
mpgviddmx_update_time(ctx);
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to keep non coded frame (constant frame rate), add*/
}
if (ftype==2) {
//count number of B-frames since last ref
ctx->b_frames++;
ctx->nb_b++;
} else {
//flush all pending packets
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_FALSE);
//remeber the CTS of the last ref
ctx->last_ref_cts = ctx->cts;
if (ctx->max_b < ctx->b_frames) ctx->max_b = ctx->b_frames;
ctx->b_frames = 0;
if (ftype)
ctx->nb_p++;
else
ctx->nb_i++;
}
ctx->nb_frames++;
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
//bytes come from both our store and the data packet
if (bytes_from_store) {
memcpy(pck_data, ctx->hdr_store+current, bytes_from_store);
assert(size >= bytes_from_store);
size -= bytes_from_store;
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
memcpy(pck_data + bytes_from_store, start, (size_t) size);
} else {
//bytes only come the data packet
memcpy(pck_data, start, (size_t) size);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset + start - (u8 *) data);
}
}
assert(pck_data[0] == 0);
assert(pck_data[1] == 0);
assert(pck_data[2] == 0x01);
gf_filter_pck_set_framing(dst_pck, GF_TRUE, (full_frame || ctx->input_is_au_end) ? GF_TRUE : GF_FALSE);
gf_filter_pck_set_cts(dst_pck, ctx->cts);
gf_filter_pck_set_dts(dst_pck, ctx->dts);
if (ctx->input_is_au_start) {
ctx->input_is_au_start = GF_FALSE;
} else {
//we use the carousel flag temporarly to indicate the cts must be recomputed
gf_filter_pck_set_carousel_version(dst_pck, 1);
}
gf_filter_pck_set_sap(dst_pck, ftype ? GF_FILTER_SAP_NONE : GF_FILTER_SAP_1);
gf_filter_pck_set_duration(dst_pck, ctx->cur_fps.den);
if (ctx->in_seek) gf_filter_pck_set_seek_flag(dst_pck, GF_TRUE);
ctx->frame_started = GF_TRUE;
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
mpgviddmx_update_time(ctx);
if (!full_frame) {
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
assert(remain>=size);
start += size;
remain -= (s32) size;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
}
|
313423902312193703310274340114212883369
|
None
|
CWE-476
|
CVE-2021-40575
|
The binary MP4Box in Gpac 1.0.1 has a null pointer dereference vulnerability in the mpgviddmx_process function in reframe_mpgvid.c, which allows attackers to cause a denial of service. This vulnerability is possibly due to an incomplete fix for CVE-2021-40566.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40575
|
|
195,069
|
gpac
|
f1ae01d745200a258cdf62622f71754c37cb6c30
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/f1ae01d745200a258cdf62622f71754c37cb6c30
|
fixed #1900
| 1
|
static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si)
{
s32 pps_id;
/*s->current_picture.reference= h->nal_ref_idc != 0;*/
gf_bs_read_ue_log(bs, "first_mb_in_slice");
si->slice_type = gf_bs_read_ue_log(bs, "slice_type");
if (si->slice_type > 9) return -1;
pps_id = gf_bs_read_ue_log(bs, "pps_id");
if (pps_id > 255)
return -1;
si->pps = &avc->pps[pps_id];
si->pps->id = pps_id;
if (!si->pps->slice_group_count)
return -2;
si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT];
if (!si->sps->log2_max_frame_num)
return -2;
si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num");
si->field_pic_flag = 0;
if (si->sps->frame_mbs_only_flag) {
/*s->picture_structure= PICT_FRAME;*/
}
else {
si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag");
if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag");
}
if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag)
si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id");
if (si->sps->poc_type == 0) {
si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb");
if (si->pps->pic_order_present && !si->field_pic_flag) {
si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom");
}
}
else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) {
si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0");
if ((si->pps->pic_order_present == 1) && !si->field_pic_flag)
si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1");
}
if (si->pps->redundant_pic_cnt_present) {
si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt");
}
return 0;
}
|
32918828304584753556059241288811637938
|
av_parsers.c
|
168517587328341017594269375399465893964
|
CWE-120
|
CVE-2021-40568
|
A buffer overflow vulnerability exists in Gpac through 1.0.1 via a malformed MP4 file in the svc_parse_slice function in av_parsers.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40568
|
221,079
|
gpac
|
f1ae01d745200a258cdf62622f71754c37cb6c30
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/f1ae01d745200a258cdf62622f71754c37cb6c30
|
fixed #1900
| 0
|
static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si)
{
s32 pps_id;
/*s->current_picture.reference= h->nal_ref_idc != 0;*/
gf_bs_read_ue_log(bs, "first_mb_in_slice");
si->slice_type = gf_bs_read_ue_log(bs, "slice_type");
if (si->slice_type > 9) return -1;
pps_id = gf_bs_read_ue_log(bs, "pps_id");
if ((pps_id<0) || (pps_id > 255))
return -1;
si->pps = &avc->pps[pps_id];
si->pps->id = pps_id;
if (!si->pps->slice_group_count)
return -2;
si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT];
if (!si->sps->log2_max_frame_num)
return -2;
si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num");
si->field_pic_flag = 0;
if (si->sps->frame_mbs_only_flag) {
/*s->picture_structure= PICT_FRAME;*/
}
else {
si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag");
if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag");
}
if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag)
si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id");
if (si->sps->poc_type == 0) {
si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb");
if (si->pps->pic_order_present && !si->field_pic_flag) {
si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom");
}
}
else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) {
si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0");
if ((si->pps->pic_order_present == 1) && !si->field_pic_flag)
si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1");
}
if (si->pps->redundant_pic_cnt_present) {
si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt");
}
return 0;
}
|
185903137648833315964702797768717766367
|
av_parsers.c
|
336095072032702615903888752582747164805
|
CWE-120
|
CVE-2021-40568
|
A buffer overflow vulnerability exists in Gpac through 1.0.1 via a malformed MP4 file in the svc_parse_slice function in av_parsers.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40568
|
195,073
|
tensorflow
|
e746adbfcfee15e9cfdb391ff746c765b99bdf9b
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e746adbfcfee15e9cfdb391ff746c765b99bdf9b
|
Prevent use after free in `DecodePng` kernel.
We are cleaning up the memory in `decode` and then we are using an `OP_REQUIRES` to check an invariant on the `decode` data.
PiperOrigin-RevId: 409299145
Change-Id: I4eb93aaca52483eb202e89b78df07fbb2f6cb254
| 1
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
png::CommonFreeDecode(&decode);
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
Status status;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
status = context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output);
} else {
status = context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output);
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (!status.ok()) png::CommonFreeDecode(&decode);
OP_REQUIRES_OK(context, status);
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
|
20785520030401878119367159260444796492
|
decode_image_op.cc
|
250237771010213788823348212493793467085
|
CWE-416
|
CVE-2022-23584
|
Tensorflow is an Open Source Machine Learning Framework. A malicious user can cause a use after free behavior when decoding PNG images. After `png::CommonFreeDecode(&decode)` gets called, the values of `decode.width` and `decode.height` are in an unspecified state. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23584
|
221,123
|
tensorflow
|
e746adbfcfee15e9cfdb391ff746c765b99bdf9b
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e746adbfcfee15e9cfdb391ff746c765b99bdf9b
|
Prevent use after free in `DecodePng` kernel.
We are cleaning up the memory in `decode` and then we are using an `OP_REQUIRES` to check an invariant on the `decode` data.
PiperOrigin-RevId: 409299145
Change-Id: I4eb93aaca52483eb202e89b78df07fbb2f6cb254
| 0
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
Status status;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
status = context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output);
} else {
status = context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output);
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (!status.ok()) png::CommonFreeDecode(&decode);
OP_REQUIRES_OK(context, status);
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
|
197194550974667972193999280383190029027
|
decode_image_op.cc
|
250371192243587827698671199962137663449
|
CWE-416
|
CVE-2022-23584
|
Tensorflow is an Open Source Machine Learning Framework. A malicious user can cause a use after free behavior when decoding PNG images. After `png::CommonFreeDecode(&decode)` gets called, the values of `decode.width` and `decode.height` are in an unspecified state. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23584
|
195,074
|
gpac
|
a69b567b8c95c72f9560c873c5ab348be058f340
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/a69b567b8c95c72f9560c873c5ab348be058f340
|
fixed #1895
| 1
|
GF_AV1Config *gf_odf_av1_cfg_read_bs_size(GF_BitStream *bs, u32 size)
{
#ifndef GPAC_DISABLE_AV_PARSERS
AV1State state;
u8 reserved;
GF_AV1Config *cfg;
if (!size) size = (u32) gf_bs_available(bs);
if (!size) return NULL;
cfg = gf_odf_av1_cfg_new();
gf_av1_init_state(&state);
state.config = cfg;
cfg->marker = gf_bs_read_int(bs, 1);
cfg->version = gf_bs_read_int(bs, 7);
cfg->seq_profile = gf_bs_read_int(bs, 3);
cfg->seq_level_idx_0 = gf_bs_read_int(bs, 5);
cfg->seq_tier_0 = gf_bs_read_int(bs, 1);
cfg->high_bitdepth = gf_bs_read_int(bs, 1);
cfg->twelve_bit = gf_bs_read_int(bs, 1);
cfg->monochrome = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_x = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_y = gf_bs_read_int(bs, 1);
cfg->chroma_sample_position = gf_bs_read_int(bs, 2);
reserved = gf_bs_read_int(bs, 3);
if (reserved != 0 || cfg->marker != 1 || cfg->version != 1) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] wrong avcC reserved %d / marker %d / version %d expecting 0 1 1\n", reserved, cfg->marker, cfg->version));
gf_odf_av1_cfg_del(cfg);
return NULL;
}
cfg->initial_presentation_delay_present = gf_bs_read_int(bs, 1);
if (cfg->initial_presentation_delay_present) {
cfg->initial_presentation_delay_minus_one = gf_bs_read_int(bs, 4);
} else {
/*reserved = */gf_bs_read_int(bs, 4);
cfg->initial_presentation_delay_minus_one = 0;
}
size -= 4;
while (size) {
u64 pos, obu_size;
ObuType obu_type;
GF_AV1_OBUArrayEntry *a;
pos = gf_bs_get_position(bs);
obu_size = 0;
if (gf_av1_parse_obu(bs, &obu_type, &obu_size, NULL, &state) != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] could not parse AV1 OBU at position "LLU". Leaving parsing.\n", pos));
break;
}
assert(obu_size == gf_bs_get_position(bs) - pos);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] parsed AV1 OBU type=%u size="LLU" at position "LLU".\n", obu_type, obu_size, pos));
if (!av1_is_obu_header(obu_type)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] AV1 unexpected OBU type=%u size="LLU" found at position "LLU". Forwarding.\n", pos));
}
GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry);
if (!a) break;
a->obu = gf_malloc((size_t)obu_size);
if (!a->obu) {
gf_free(a);
break;
}
gf_bs_seek(bs, pos);
gf_bs_read_data(bs, (char *) a->obu, (u32)obu_size);
a->obu_length = obu_size;
a->obu_type = obu_type;
gf_list_add(cfg->obu_array, a);
if (size<obu_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] AV1 config misses %d bytes to fit the entire OBU\n", obu_size - size));
break;
}
size -= (u32) obu_size;
}
gf_av1_reset_state(& state, GF_TRUE);
return cfg;
#else
return NULL;
#endif
}
|
270972574846681061752900592460657064315
|
descriptors.c
|
100253523943266503998746709370742625478
|
CWE-415
|
CVE-2021-40571
|
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the ilst_box_read function in box_code_apple.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40571
|
221,160
|
gpac
|
a69b567b8c95c72f9560c873c5ab348be058f340
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/a69b567b8c95c72f9560c873c5ab348be058f340
|
fixed #1895
| 0
|
GF_AV1Config *gf_odf_av1_cfg_read_bs_size(GF_BitStream *bs, u32 size)
{
#ifndef GPAC_DISABLE_AV_PARSERS
AV1State state;
u8 reserved;
GF_AV1Config *cfg;
if (!size) size = (u32) gf_bs_available(bs);
if (!size) return NULL;
cfg = gf_odf_av1_cfg_new();
gf_av1_init_state(&state);
state.config = cfg;
cfg->marker = gf_bs_read_int(bs, 1);
cfg->version = gf_bs_read_int(bs, 7);
cfg->seq_profile = gf_bs_read_int(bs, 3);
cfg->seq_level_idx_0 = gf_bs_read_int(bs, 5);
cfg->seq_tier_0 = gf_bs_read_int(bs, 1);
cfg->high_bitdepth = gf_bs_read_int(bs, 1);
cfg->twelve_bit = gf_bs_read_int(bs, 1);
cfg->monochrome = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_x = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_y = gf_bs_read_int(bs, 1);
cfg->chroma_sample_position = gf_bs_read_int(bs, 2);
reserved = gf_bs_read_int(bs, 3);
if (reserved != 0 || cfg->marker != 1 || cfg->version != 1) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] wrong avcC reserved %d / marker %d / version %d expecting 0 1 1\n", reserved, cfg->marker, cfg->version));
gf_odf_av1_cfg_del(cfg);
return NULL;
}
cfg->initial_presentation_delay_present = gf_bs_read_int(bs, 1);
if (cfg->initial_presentation_delay_present) {
cfg->initial_presentation_delay_minus_one = gf_bs_read_int(bs, 4);
} else {
/*reserved = */gf_bs_read_int(bs, 4);
cfg->initial_presentation_delay_minus_one = 0;
}
size -= 4;
while (size) {
u64 pos, obu_size;
ObuType obu_type;
GF_AV1_OBUArrayEntry *a;
pos = gf_bs_get_position(bs);
obu_size = 0;
if (gf_av1_parse_obu(bs, &obu_type, &obu_size, NULL, &state) != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] could not parse AV1 OBU at position "LLU". Leaving parsing.\n", pos));
break;
}
assert(obu_size == gf_bs_get_position(bs) - pos);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] parsed AV1 OBU type=%u size="LLU" at position "LLU".\n", obu_type, obu_size, pos));
if (!av1_is_obu_header(obu_type)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] AV1 unexpected OBU type=%u size="LLU" found at position "LLU". Forwarding.\n", pos));
}
GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry);
if (!a) break;
a->obu = gf_malloc((size_t)obu_size);
if (!a->obu) {
gf_free(a);
break;
}
gf_bs_seek(bs, pos);
gf_bs_read_data(bs, (char *) a->obu, (u32)obu_size);
a->obu_length = obu_size;
a->obu_type = obu_type;
gf_list_add(cfg->obu_array, a);
if (size<obu_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] AV1 config misses %d bytes to fit the entire OBU\n", obu_size - size));
break;
}
size -= (u32) obu_size;
}
gf_av1_reset_state(& state, GF_TRUE);
gf_bs_align(bs);
return cfg;
#else
return NULL;
#endif
}
|
161782515383812350901831460771265303089
|
descriptors.c
|
86476492964393375980272696403064975409
|
CWE-415
|
CVE-2021-40571
|
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the ilst_box_read function in box_code_apple.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40571
|
195,082
|
linux
|
c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
|
https://github.com/torvalds/linux
|
https://github.com/torvalds/linux/commit/c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
|
KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable
Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor),
then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only
possible by making L0 intercept these instructions.
Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted,
and thus read/write portions of the host physical memory.
Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature")
Suggested-by: Paolo Bonzini <[email protected]>
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
| 1
|
void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h, *g;
unsigned int i;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu))
return;
c = &svm->vmcb->control;
h = &svm->vmcb01.ptr->control;
g = &svm->nested.ctl;
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] = h->intercepts[i];
if (g->int_ctl & V_INTR_MASKING_MASK) {
/* We only want the cr8 intercept bits of L1 */
vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
/*
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
* affect any interrupt we may want to inject; therefore,
* interrupt window vmexits are irrelevant to L0.
*/
vmcb_clr_intercept(c, INTERCEPT_VINTR);
}
/* We don't want to see VMMCALLs from a nested guest */
vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
/* If SMI is not intercepted, ignore guest SMI intercept as well */
if (!intercept_smi)
vmcb_clr_intercept(c, INTERCEPT_SMI);
}
|
308018010909685377463219146239861290533
|
None
|
CWE-862
|
CVE-2021-3656
|
A flaw was found in the KVM's AMD code for supporting SVM nested virtualization. The flaw occurs when processing the VMCB (virtual machine control block) provided by the L1 guest to spawn/handle a nested guest (L2). Due to improper validation of the "virt_ext" field, this issue could allow a malicious L1 to disable both VMLOAD/VMSAVE intercepts and VLS (Virtual VMLOAD/VMSAVE) for the L2 guest. As a result, the L2 guest would be allowed to read/write physical pages of the host, resulting in a crash of the entire system, leak of sensitive data or potential guest-to-host escape.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3656
|
|
221,413
|
linux
|
c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
|
https://github.com/torvalds/linux
|
https://github.com/torvalds/linux/commit/c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
|
KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable
Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor),
then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only
possible by making L0 intercept these instructions.
Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted,
and thus read/write portions of the host physical memory.
Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature")
Suggested-by: Paolo Bonzini <[email protected]>
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
| 0
|
void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h, *g;
unsigned int i;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu))
return;
c = &svm->vmcb->control;
h = &svm->vmcb01.ptr->control;
g = &svm->nested.ctl;
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] = h->intercepts[i];
if (g->int_ctl & V_INTR_MASKING_MASK) {
/* We only want the cr8 intercept bits of L1 */
vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
/*
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
* affect any interrupt we may want to inject; therefore,
* interrupt window vmexits are irrelevant to L0.
*/
vmcb_clr_intercept(c, INTERCEPT_VINTR);
}
/* We don't want to see VMMCALLs from a nested guest */
vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
/* If SMI is not intercepted, ignore guest SMI intercept as well */
if (!intercept_smi)
vmcb_clr_intercept(c, INTERCEPT_SMI);
vmcb_set_intercept(c, INTERCEPT_VMLOAD);
vmcb_set_intercept(c, INTERCEPT_VMSAVE);
}
|
61346983903340748153155985789359366401
|
None
|
CWE-862
|
CVE-2021-3656
|
A flaw was found in the KVM's AMD code for supporting SVM nested virtualization. The flaw occurs when processing the VMCB (virtual machine control block) provided by the L1 guest to spawn/handle a nested guest (L2). Due to improper validation of the "virt_ext" field, this issue could allow a malicious L1 to disable both VMLOAD/VMSAVE intercepts and VLS (Virtual VMLOAD/VMSAVE) for the L2 guest. As a result, the L2 guest would be allowed to read/write physical pages of the host, resulting in a crash of the entire system, leak of sensitive data or potential guest-to-host escape.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3656
|
|
195,083
|
tensorflow
|
5b491cd5e41ad63735161cec9c2a568172c8b6a3
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/5b491cd5e41ad63735161cec9c2a568172c8b6a3
|
Validate `proto.dtype()` before calling `set_dtype()`.
This prevents a `DCHECK`-fail when the proto contains an invalid dtype for a tensor shape with 0 elements or for an incomplete tensor shape.
PiperOrigin-RevId: 408369083
Change-Id: Ia21a3e3d62a90d642a4561f08f3b543e5ad00c46
| 1
|
bool Tensor::FromProto(Allocator* a, const TensorProto& proto) {
CHECK_NOTNULL(a);
TensorBuffer* p = nullptr;
if (!TensorShape::IsValid(proto.tensor_shape())) return false;
if (proto.dtype() == DT_INVALID) return false;
TensorShape shape(proto.tensor_shape());
const int64_t N = shape.num_elements();
if (N > 0 && proto.dtype()) {
bool dtype_error = false;
if (!proto.tensor_content().empty()) {
const auto& content = proto.tensor_content();
CASES_WITH_DEFAULT(proto.dtype(), p = Helper<T>::Decode(a, content, N),
dtype_error = true, dtype_error = true);
} else {
CASES_WITH_DEFAULT(proto.dtype(), p = FromProtoField<T>(a, proto, N),
dtype_error = true, dtype_error = true);
}
if (dtype_error || p == nullptr) return false;
}
shape_ = shape;
set_dtype(proto.dtype());
UnrefIfNonNull(buf_);
buf_ = p;
// TODO(misard) add tracking of which kernels and steps are calling
// FromProto.
if (MemoryLoggingEnabled() && buf_ != nullptr && buf_->data() != nullptr) {
LogMemory::RecordTensorAllocation("Unknown (from Proto)",
LogMemory::UNKNOWN_STEP_ID, *this);
}
return true;
}
|
112719252128622113589892906952570683457
|
tensor.cc
|
289613009517546867193769314060658742037
|
CWE-617
|
CVE-2022-23571
|
Tensorflow is an Open Source Machine Learning Framework. When decoding a tensor from protobuf, a TensorFlow process can encounter cases where a `CHECK` assertion is invalidated based on user controlled arguments, if the tensors have an invalid `dtype` and 0 elements or an invalid shape. This allows attackers to cause denial of services in TensorFlow processes. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23571
|
221,428
|
tensorflow
|
5b491cd5e41ad63735161cec9c2a568172c8b6a3
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/5b491cd5e41ad63735161cec9c2a568172c8b6a3
|
Validate `proto.dtype()` before calling `set_dtype()`.
This prevents a `DCHECK`-fail when the proto contains an invalid dtype for a tensor shape with 0 elements or for an incomplete tensor shape.
PiperOrigin-RevId: 408369083
Change-Id: Ia21a3e3d62a90d642a4561f08f3b543e5ad00c46
| 0
|
bool Tensor::FromProto(Allocator* a, const TensorProto& proto) {
CHECK_NOTNULL(a);
TensorBuffer* p = nullptr;
if (!TensorShape::IsValid(proto.tensor_shape())) return false;
if (proto.dtype() == DT_INVALID) return false;
TensorShape shape(proto.tensor_shape());
const int64_t N = shape.num_elements();
if (N > 0 && proto.dtype()) {
bool dtype_error = false;
if (!proto.tensor_content().empty()) {
const auto& content = proto.tensor_content();
CASES_WITH_DEFAULT(proto.dtype(), p = Helper<T>::Decode(a, content, N),
dtype_error = true, dtype_error = true);
} else {
CASES_WITH_DEFAULT(proto.dtype(), p = FromProtoField<T>(a, proto, N),
dtype_error = true, dtype_error = true);
}
if (dtype_error || p == nullptr) return false;
} else {
// Handle the case of empty tensors (N = 0) or tensors with incomplete shape
// (N = -1). All other values of `shape.num_elements()` should be invalid by
// construction.
// Here, we just need to validate that the `proto.dtype()` value is valid.
bool dtype_error = false;
CASES_WITH_DEFAULT(proto.dtype(), break, dtype_error = true,
dtype_error = true);
if (dtype_error) return false;
}
shape_ = shape;
set_dtype(proto.dtype());
UnrefIfNonNull(buf_);
buf_ = p;
// TODO(misard) add tracking of which kernels and steps are calling
// FromProto.
if (MemoryLoggingEnabled() && buf_ != nullptr && buf_->data() != nullptr) {
LogMemory::RecordTensorAllocation("Unknown (from Proto)",
LogMemory::UNKNOWN_STEP_ID, *this);
}
return true;
}
|
12020279702191708342972381802829194549
|
tensor.cc
|
303884711858139014412460575672580480868
|
CWE-617
|
CVE-2022-23571
|
Tensorflow is an Open Source Machine Learning Framework. When decoding a tensor from protobuf, a TensorFlow process can encounter cases where a `CHECK` assertion is invalidated based on user controlled arguments, if the tensors have an invalid `dtype` and 0 elements or an invalid shape. This allows attackers to cause denial of services in TensorFlow processes. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23571
|
195,091
|
tensorflow
|
35f0fabb4c178253a964d7aabdbb15c6a398b69a
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/35f0fabb4c178253a964d7aabdbb15c6a398b69a
|
Avoid Segfault for scalar shapes.
Calling tensor::FromElementsOp with an empty vector of elements and no type
causes a segfault. We need to let the FromElementsOp know which scalar type it
should have.
Also add back the DynamicBroadcastInDimOp canonicalization patterns, which
previously prevented this bug from happening.
Add a regression test that demonstrates the bug.
PiperOrigin-RevId: 417561444
Change-Id: I6d1d6cfb71aabbad6102422625a00bbe253ac95a
| 1
|
llvm::Optional<Value> simplifyBroadcast(ShapeComponentAnalysis& analysis,
ValueRange shapes, Location loc,
OpBuilder* builder) {
// First find the input shape with the largest rank.
SmallVector<ArrayRef<ShapeComponentAnalysis::SymbolicExpr>> shapes_found;
size_t maxRank = 0;
for (const auto &shape : llvm::enumerate(shapes)) {
auto found_shape = analysis.GetValueInfo(shape.value());
if (!found_shape) return {};
shapes_found.push_back(*found_shape);
maxRank = std::max(maxRank, found_shape->size());
}
SmallVector<const ShapeComponentAnalysis::SymbolicExpr*> joined_dimensions(
maxRank);
SmallVector<std::pair<Value, int64_t>> shape_and_rank_for_dim(maxRank);
for (const auto &shape : llvm::enumerate(shapes_found)) {
for (const auto &dim : llvm::enumerate(llvm::reverse(shape.value()))) {
// 1 dimensions don't contribute to the final result.
if (dim.value().isConstant(1)) continue;
// If it's not a 1 dimension it will be present in the result. Remember
// where it came from.
auto index = maxRank - dim.index() - 1;
if (!joined_dimensions[index]) {
joined_dimensions[index] = &dim.value();
shape_and_rank_for_dim[index] =
std::make_pair(shapes[shape.index()], shape.value().size());
continue;
}
// Bail if the dimensions are neither equal nor 1.
if (*joined_dimensions[index] != dim.value()) return {};
}
}
// If the output is the same as one of the inputs just return that.
if (llvm::is_splat(shape_and_rank_for_dim) &&
shape_and_rank_for_dim[0].first) {
return shape_and_rank_for_dim[0].first;
}
// Otherwise rematerialize the shape from the pieces we have.
SmallVector<Value> elements;
for (int i = 0; i != maxRank; ++i) {
// 1 dimensions are filtered above, recreate the constant.
if (!shape_and_rank_for_dim[i].first) {
auto one = builder->getIntegerAttr(
shapes[0].getType().cast<RankedTensorType>().getElementType(), 1);
elements.push_back(builder->create<ConstantOp>(loc, one));
continue;
}
// Extract from one of the shapes, accounting for the reverse indexing
// performed by broadcast.
Value index = builder->create<ConstantIndexOp>(
loc, i - maxRank + shape_and_rank_for_dim[i].second);
elements.push_back(builder->create<tensor::ExtractOp>(
loc, shape_and_rank_for_dim[i].first, index));
}
return Value(builder->create<tensor::FromElementsOp>(loc, elements));
}
|
84683486121098934971147990908524528886
|
tf_cpurt_symbolic_shape_optimization.cc
|
183860206963562900623001205261417288221
|
CWE-754
|
CVE-2022-23593
|
Tensorflow is an Open Source Machine Learning Framework. The `simplifyBroadcast` function in the MLIR-TFRT infrastructure in TensorFlow is vulnerable to a segfault (hence, denial of service), if called with scalar shapes. If all shapes are scalar, then `maxRank` is 0, so we build an empty `SmallVector`. The fix will be included in TensorFlow 2.8.0. This is the only affected version.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23593
|
221,631
|
tensorflow
|
35f0fabb4c178253a964d7aabdbb15c6a398b69a
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/35f0fabb4c178253a964d7aabdbb15c6a398b69a
|
Avoid Segfault for scalar shapes.
Calling tensor::FromElementsOp with an empty vector of elements and no type
causes a segfault. We need to let the FromElementsOp know which scalar type it
should have.
Also add back the DynamicBroadcastInDimOp canonicalization patterns, which
previously prevented this bug from happening.
Add a regression test that demonstrates the bug.
PiperOrigin-RevId: 417561444
Change-Id: I6d1d6cfb71aabbad6102422625a00bbe253ac95a
| 0
|
llvm::Optional<Value> simplifyBroadcast(ShapeComponentAnalysis& analysis,
ValueRange shapes, Location loc,
OpBuilder* builder) {
// First find the input shape with the largest rank.
SmallVector<ArrayRef<ShapeComponentAnalysis::SymbolicExpr>> shapes_found;
size_t maxRank = 0;
for (const auto &shape : llvm::enumerate(shapes)) {
auto found_shape = analysis.GetValueInfo(shape.value());
if (!found_shape) return {};
shapes_found.push_back(*found_shape);
maxRank = std::max(maxRank, found_shape->size());
}
if (maxRank == 0) {
return Value(builder->create<tensor::FromElementsOp>(
loc, shapes[0].getType(), SmallVector<Value>()));
}
SmallVector<const ShapeComponentAnalysis::SymbolicExpr*> joined_dimensions(
maxRank);
SmallVector<std::pair<Value, int64_t>> shape_and_rank_for_dim(maxRank);
for (const auto &shape : llvm::enumerate(shapes_found)) {
for (const auto &dim : llvm::enumerate(llvm::reverse(shape.value()))) {
// 1 dimensions don't contribute to the final result.
if (dim.value().isConstant(1)) continue;
// If it's not a 1 dimension it will be present in the result. Remember
// where it came from.
auto index = maxRank - dim.index() - 1;
if (!joined_dimensions[index]) {
joined_dimensions[index] = &dim.value();
shape_and_rank_for_dim[index] =
std::make_pair(shapes[shape.index()], shape.value().size());
continue;
}
// Bail if the dimensions are neither equal nor 1.
if (*joined_dimensions[index] != dim.value()) return {};
}
}
// If the output is the same as one of the inputs just return that.
if (llvm::is_splat(shape_and_rank_for_dim) &&
shape_and_rank_for_dim[0].first) {
return shape_and_rank_for_dim[0].first;
}
// Otherwise rematerialize the shape from the pieces we have.
SmallVector<Value> elements;
for (int i = 0; i != maxRank; ++i) {
// 1 dimensions are filtered above, recreate the constant.
if (!shape_and_rank_for_dim[i].first) {
auto one = builder->getIntegerAttr(
shapes[0].getType().cast<RankedTensorType>().getElementType(), 1);
elements.push_back(builder->create<ConstantOp>(loc, one));
continue;
}
// Extract from one of the shapes, accounting for the reverse indexing
// performed by broadcast.
Value index = builder->create<ConstantIndexOp>(
loc, i - maxRank + shape_and_rank_for_dim[i].second);
elements.push_back(builder->create<tensor::ExtractOp>(
loc, shape_and_rank_for_dim[i].first, index));
}
return Value(builder->create<tensor::FromElementsOp>(loc, elements));
}
|
131837408517580503230068988683463768929
|
tf_cpurt_symbolic_shape_optimization.cc
|
61127670286277963749447708912499557476
|
CWE-754
|
CVE-2022-23593
|
Tensorflow is an Open Source Machine Learning Framework. The `simplifyBroadcast` function in the MLIR-TFRT infrastructure in TensorFlow is vulnerable to a segfault (hence, denial of service), if called with scalar shapes. If all shapes are scalar, then `maxRank` is 0, so we build an empty `SmallVector`. The fix will be included in TensorFlow 2.8.0. This is the only affected version.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23593
|
195,092
|
hermes
|
55e1b2343f4deb1a1b5726cfe1e23b2068217ff2
|
https://github.com/facebook/hermes
|
https://github.com/facebook/hermes/commit/55e1b2343f4deb1a1b5726cfe1e23b2068217ff2
|
Handle typeof applied to empty in InstSimplify
Summary:
Do not simplify `typeof` if it is applied to an invalid type. This
handles a case like the one in the added test, where `typeof` is called
on a literal empty in unreachable code.
Reviewed By: kodafb
Differential Revision: D31000173
fbshipit-source-id: 2d7f69cbcc9c1bb0a916585c07171089444c85dc
| 1
|
Literal *hermes::evalUnaryOperator(
UnaryOperatorInst::OpKind kind,
IRBuilder &builder,
Literal *operand) {
switch (kind) {
case UnaryOperatorInst::OpKind::MinusKind:
// Negate constant integers.
switch (operand->getKind()) {
case ValueKind::LiteralNumberKind:
if (auto *literalNum = llvh::dyn_cast<LiteralNumber>(operand)) {
auto V = -literalNum->getValue();
return builder.getLiteralNumber(V);
}
break;
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralNaN();
case ValueKind::LiteralBoolKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralNumber(-1);
} else { // evalIsFalse(operand)
return builder.getLiteralNegativeZero();
}
case ValueKind::LiteralNullKind:
return builder.getLiteralNegativeZero();
default:
break;
}
break;
case UnaryOperatorInst::OpKind::TypeofKind:
switch (operand->getKind()) {
case ValueKind::GlobalObjectKind:
case ValueKind::LiteralNullKind:
return builder.getLiteralString("object");
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralString("undefined");
case ValueKind::LiteralBoolKind:
return builder.getLiteralString("boolean");
case ValueKind::LiteralNumberKind:
return builder.getLiteralString("number");
case ValueKind::LiteralStringKind:
return builder.getLiteralString("string");
default:
llvm_unreachable("Invalid literal kind.");
}
break;
case UnaryOperatorInst::OpKind::BangKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralBool(false);
}
if (evalIsFalse(builder, operand)) {
return builder.getLiteralBool(true);
}
break;
case UnaryOperatorInst::OpKind::VoidKind:
return builder.getLiteralUndefined();
default:
break;
}
return nullptr;
}
|
318397569222892175642900890058916302083
|
IREval.cpp
|
25640608993938735880507555687030796129
|
CWE-843
|
CVE-2021-24045
|
A type confusion vulnerability could be triggered when resolving the "typeof" unary operator in Facebook Hermes prior to v0.10.0. Note that this is only exploitable if the application using Hermes permits evaluation of untrusted JavaScript. Hence, most React Native applications are not affected.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-24045
|
221,651
|
hermes
|
55e1b2343f4deb1a1b5726cfe1e23b2068217ff2
|
https://github.com/facebook/hermes
|
https://github.com/facebook/hermes/commit/55e1b2343f4deb1a1b5726cfe1e23b2068217ff2
|
Handle typeof applied to empty in InstSimplify
Summary:
Do not simplify `typeof` if it is applied to an invalid type. This
handles a case like the one in the added test, where `typeof` is called
on a literal empty in unreachable code.
Reviewed By: kodafb
Differential Revision: D31000173
fbshipit-source-id: 2d7f69cbcc9c1bb0a916585c07171089444c85dc
| 0
|
Literal *hermes::evalUnaryOperator(
UnaryOperatorInst::OpKind kind,
IRBuilder &builder,
Literal *operand) {
switch (kind) {
case UnaryOperatorInst::OpKind::MinusKind:
// Negate constant integers.
switch (operand->getKind()) {
case ValueKind::LiteralNumberKind:
if (auto *literalNum = llvh::dyn_cast<LiteralNumber>(operand)) {
auto V = -literalNum->getValue();
return builder.getLiteralNumber(V);
}
break;
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralNaN();
case ValueKind::LiteralBoolKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralNumber(-1);
} else { // evalIsFalse(operand)
return builder.getLiteralNegativeZero();
}
case ValueKind::LiteralNullKind:
return builder.getLiteralNegativeZero();
default:
break;
}
break;
case UnaryOperatorInst::OpKind::TypeofKind:
switch (operand->getKind()) {
case ValueKind::GlobalObjectKind:
case ValueKind::LiteralNullKind:
return builder.getLiteralString("object");
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralString("undefined");
case ValueKind::LiteralBoolKind:
return builder.getLiteralString("boolean");
case ValueKind::LiteralNumberKind:
return builder.getLiteralString("number");
case ValueKind::LiteralStringKind:
return builder.getLiteralString("string");
default:
break;
}
break;
case UnaryOperatorInst::OpKind::BangKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralBool(false);
}
if (evalIsFalse(builder, operand)) {
return builder.getLiteralBool(true);
}
break;
case UnaryOperatorInst::OpKind::VoidKind:
return builder.getLiteralUndefined();
default:
break;
}
return nullptr;
}
|
123137692006195986808828639392289766950
|
IREval.cpp
|
42961914778696217563735135747458181955
|
CWE-843
|
CVE-2021-24045
|
A type confusion vulnerability could be triggered when resolving the "typeof" unary operator in Facebook Hermes prior to v0.10.0. Note that this is only exploitable if the application using Hermes permits evaluation of untrusted JavaScript. Hence, most React Native applications are not affected.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-24045
|
195,095
|
e2guardian
|
eae46a7e2a57103aadca903c4a24cca94dc502a2
|
https://github.com/e2guardian/e2guardian
|
https://github.com/e2guardian/e2guardian/commit/eae46a7e2a57103aadca903c4a24cca94dc502a2
|
Fix bug #707 cert hostnames not being checked
- only happened when openssl v1.1 is used
| 1
|
int Socket::startSslClient(const std::string &certificate_path, String hostname)
{
if (isssl) {
stopSsl();
}
ERR_clear_error();
#if OPENSSL_VERSION_NUMBER < 0x10100000L
ctx = SSL_CTX_new(SSLv23_client_method());
#else
ctx = SSL_CTX_new(TLS_client_method());
#endif
if (ctx == NULL) {
#ifdef NETDEBUG
std::cout << thread_id << "Error ssl context is null (check that openssl has been inited)" << std::endl;
#endif
log_ssl_errors("Error ssl context is null for %s", hostname.c_str());
return -1;
}
//set the timeout for the ssl session
if (SSL_CTX_set_timeout(ctx, 130l) < 1) {
SSL_CTX_free(ctx);
ctx = NULL;
return -1;
}
//load certs
ERR_clear_error();
if (certificate_path.length()) {
if (!SSL_CTX_load_verify_locations(ctx, NULL, certificate_path.c_str())) {
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load certificates from %s", certificate_path.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
} else if (!SSL_CTX_set_default_verify_paths(ctx)) //use default if no certPpath given
{
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load default certificates for %s", hostname.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
// add validation params
ERR_clear_error();
X509_VERIFY_PARAM *x509_param = X509_VERIFY_PARAM_new();
if (!x509_param) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
//X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!X509_VERIFY_PARAM_set_flags(x509_param, X509_V_FLAG_TRUSTED_FIRST)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!SSL_CTX_set1_param(ctx, x509_param)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
X509_VERIFY_PARAM_free(x509_param); // try not freeing this as SSL_CTX_free seems to be ring to free it
//hand socket over to ssl lib
ERR_clear_error();
ssl = SSL_new(ctx);
SSL_set_options(ssl, SSL_OP_ALL);
SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
SSL_set_connect_state(ssl);
//fcntl(this->getFD() ,F_SETFL, O_NONBLOCK); // blocking mode used currently
SSL_set_fd(ssl, this->getFD());
SSL_set_tlsext_host_name(ssl, hostname.c_str());
//make io non blocking as select wont tell us if we can do a read without blocking
//BIO_set_nbio(SSL_get_rbio(ssl),1l); // blocking mode used currently
//BIO_set_nbio(SSL_get_wbio(ssl),1l); // blocking mode used currently
ERR_clear_error();
int rc = SSL_connect(ssl);
if (rc < 0) {
log_ssl_errors("ssl_connect failed to %s", hostname.c_str());
#ifdef NETDEBUG
std::cout << thread_id << "ssl_connect failed with error " << SSL_get_error(ssl, rc) << std::endl;
#endif
// tidy up
SSL_free(ssl);
ssl = NULL;
SSL_CTX_free(ctx);
ctx = NULL;
return -3;
}
//should be safer to do this last as nothing will ever try to use a ssl socket that isnt fully setup
isssl = true;
issslserver = false;
return 0;
}
|
285364534121786496260977042144971081331
|
Socket.cpp
|
283084154597152068392957992825637904487
|
CWE-295
|
CVE-2021-44273
|
e2guardian v5.4.x <= v5.4.3r is affected by missing SSL certificate validation in the SSL MITM engine. In standalone mode (i.e., acting as a proxy or a transparent proxy), with SSL MITM enabled, e2guardian, if built with OpenSSL v1.1.x, did not validate hostnames in certificates of the web servers that it connected to, and thus was itself vulnerable to MITM attacks.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-44273
|
221,678
|
e2guardian
|
eae46a7e2a57103aadca903c4a24cca94dc502a2
|
https://github.com/e2guardian/e2guardian
|
https://github.com/e2guardian/e2guardian/commit/eae46a7e2a57103aadca903c4a24cca94dc502a2
|
Fix bug #707 cert hostnames not being checked
- only happened when openssl v1.1 is used
| 0
|
int Socket::startSslClient(const std::string &certificate_path, String hostname)
{
if (isssl) {
stopSsl();
}
ERR_clear_error();
#if OPENSSL_VERSION_NUMBER < 0x10100000L
ctx = SSL_CTX_new(SSLv23_client_method());
#else
ctx = SSL_CTX_new(TLS_client_method());
#endif
if (ctx == NULL) {
#ifdef NETDEBUG
std::cout << thread_id << "Error ssl context is null (check that openssl has been inited)" << std::endl;
#endif
log_ssl_errors("Error ssl context is null for %s", hostname.c_str());
return -1;
}
//set the timeout for the ssl session
if (SSL_CTX_set_timeout(ctx, 130l) < 1) {
SSL_CTX_free(ctx);
ctx = NULL;
return -1;
}
//load certs
ERR_clear_error();
if (certificate_path.length()) {
if (!SSL_CTX_load_verify_locations(ctx, NULL, certificate_path.c_str())) {
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load certificates from %s", certificate_path.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
} else if (!SSL_CTX_set_default_verify_paths(ctx)) //use default if no certPpath given
{
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load default certificates for %s", hostname.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
// add validation params
ERR_clear_error();
X509_VERIFY_PARAM *x509_param = X509_VERIFY_PARAM_new();
if (!x509_param) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
//X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!X509_VERIFY_PARAM_set_flags(x509_param, X509_V_FLAG_TRUSTED_FIRST)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!SSL_CTX_set1_param(ctx, x509_param)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
X509_VERIFY_PARAM_free(x509_param); // try not freeing this as SSL_CTX_free seems to be ring to free it
//hand socket over to ssl lib
ERR_clear_error();
ssl = SSL_new(ctx);
SSL_set_options(ssl, SSL_OP_ALL);
SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
SSL_set_connect_state(ssl);
//fcntl(this->getFD() ,F_SETFL, O_NONBLOCK); // blocking mode used currently
SSL_set_fd(ssl, this->getFD());
SSL_set_tlsext_host_name(ssl, hostname.c_str());
#if OPENSSL_VERSION_NUMBER < 0x10100000L
#else
X509_VERIFY_PARAM_set1_host(SSL_get0_param(ssl),hostname.c_str(),0);
#endif
//make io non blocking as select wont tell us if we can do a read without blocking
//BIO_set_nbio(SSL_get_rbio(ssl),1l); // blocking mode used currently
//BIO_set_nbio(SSL_get_wbio(ssl),1l); // blocking mode used currently
ERR_clear_error();
int rc = SSL_connect(ssl);
if (rc < 0) {
log_ssl_errors("ssl_connect failed to %s", hostname.c_str());
#ifdef NETDEBUG
std::cout << thread_id << "ssl_connect failed with error " << SSL_get_error(ssl, rc) << std::endl;
#endif
// tidy up
SSL_free(ssl);
ssl = NULL;
SSL_CTX_free(ctx);
ctx = NULL;
return -3;
}
//should be safer to do this last as nothing will ever try to use a ssl socket that isnt fully setup
isssl = true;
issslserver = false;
return 0;
}
|
137664252347139284732164058213112105028
|
Socket.cpp
|
316707147744091371767800459911917453562
|
CWE-295
|
CVE-2021-44273
|
e2guardian v5.4.x <= v5.4.3r is affected by missing SSL certificate validation in the SSL MITM engine. In standalone mode (i.e., acting as a proxy or a transparent proxy), with SSL MITM enabled, e2guardian, if built with OpenSSL v1.1.x, did not validate hostnames in certificates of the web servers that it connected to, and thus was itself vulnerable to MITM attacks.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-44273
|
195,220
|
tmate-ssh-server
|
1c020d1f5ca462f5b150b46a027aaa1bbe3c9596
|
https://github.com/tmate-io/tmate-ssh-server
|
https://github.com/tmate-io/tmate-ssh-server/commit/1c020d1f5ca462f5b150b46a027aaa1bbe3c9596
|
Harden /tmp/tmate directory
Suggested by Matthias Gerstner
| 1
|
int main(int argc, char **argv, char **envp)
{
int opt;
while ((opt = getopt(argc, argv, "b:h:k:p:q:w:z:xv")) != -1) {
switch (opt) {
case 'b':
tmate_settings->bind_addr = xstrdup(optarg);
break;
case 'h':
tmate_settings->tmate_host = xstrdup(optarg);
break;
case 'k':
tmate_settings->keys_dir = xstrdup(optarg);
break;
case 'p':
tmate_settings->ssh_port = atoi(optarg);
break;
case 'q':
tmate_settings->ssh_port_advertized = atoi(optarg);
break;
case 'w':
tmate_settings->websocket_hostname = xstrdup(optarg);
break;
case 'z':
tmate_settings->websocket_port = atoi(optarg);
break;
case 'x':
tmate_settings->use_proxy_protocol = true;
break;
case 'v':
tmate_settings->log_level++;
break;
default:
usage();
return 1;
}
}
init_logging(tmate_settings->log_level);
setup_locale();
if (!tmate_settings->tmate_host)
tmate_settings->tmate_host = get_full_hostname();
cmdline = *argv;
cmdline_end = *envp;
tmate_preload_trace_lib();
tmate_catch_sigsegv();
tmate_init_rand();
if ((mkdir(TMATE_WORKDIR, 0701) < 0 && errno != EEXIST) ||
(mkdir(TMATE_WORKDIR "/sessions", 0703) < 0 && errno != EEXIST) ||
(mkdir(TMATE_WORKDIR "/jail", 0700) < 0 && errno != EEXIST))
tmate_fatal("Cannot prepare session in " TMATE_WORKDIR);
/* The websocket server needs to access the /session dir to rename sockets */
if ((chmod(TMATE_WORKDIR, 0701) < 0) ||
(chmod(TMATE_WORKDIR "/sessions", 0703) < 0) ||
(chmod(TMATE_WORKDIR "/jail", 0700) < 0))
tmate_fatal("Cannot prepare session in " TMATE_WORKDIR);
tmate_ssh_server_main(tmate_session,
tmate_settings->keys_dir, tmate_settings->bind_addr, tmate_settings->ssh_port);
return 0;
}
|
154027151645284385944526585123822701001
|
tmate-main.c
|
280350825550794138629823084137678566150
|
CWE-362
|
CVE-2021-44512
|
World-writable permissions on the /tmp/tmate/sessions directory in tmate-ssh-server 2.3.0 allow a local attacker to compromise the integrity of session handling, or obtain the read-write session ID from a read-only session symlink in this directory.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-44512
|
222,666
|
tmate-ssh-server
|
1c020d1f5ca462f5b150b46a027aaa1bbe3c9596
|
https://github.com/tmate-io/tmate-ssh-server
|
https://github.com/tmate-io/tmate-ssh-server/commit/1c020d1f5ca462f5b150b46a027aaa1bbe3c9596
|
Harden /tmp/tmate directory
Suggested by Matthias Gerstner
| 0
|
int main(int argc, char **argv, char **envp)
{
int opt;
while ((opt = getopt(argc, argv, "b:h:k:p:q:w:z:xv")) != -1) {
switch (opt) {
case 'b':
tmate_settings->bind_addr = xstrdup(optarg);
break;
case 'h':
tmate_settings->tmate_host = xstrdup(optarg);
break;
case 'k':
tmate_settings->keys_dir = xstrdup(optarg);
break;
case 'p':
tmate_settings->ssh_port = atoi(optarg);
break;
case 'q':
tmate_settings->ssh_port_advertized = atoi(optarg);
break;
case 'w':
tmate_settings->websocket_hostname = xstrdup(optarg);
break;
case 'z':
tmate_settings->websocket_port = atoi(optarg);
break;
case 'x':
tmate_settings->use_proxy_protocol = true;
break;
case 'v':
tmate_settings->log_level++;
break;
default:
usage();
return 1;
}
}
init_logging(tmate_settings->log_level);
setup_locale();
if (!tmate_settings->tmate_host)
tmate_settings->tmate_host = get_full_hostname();
cmdline = *argv;
cmdline_end = *envp;
tmate_preload_trace_lib();
tmate_catch_sigsegv();
tmate_init_rand();
if ((mkdir(TMATE_WORKDIR, 0700) < 0 && errno != EEXIST) ||
(mkdir(TMATE_WORKDIR "/sessions", 0700) < 0 && errno != EEXIST) ||
(mkdir(TMATE_WORKDIR "/jail", 0700) < 0 && errno != EEXIST))
tmate_fatal("Cannot prepare session in " TMATE_WORKDIR);
if ((chmod(TMATE_WORKDIR, 0700) < 0) ||
(chmod(TMATE_WORKDIR "/sessions", 0700) < 0) ||
(chmod(TMATE_WORKDIR "/jail", 0700) < 0))
tmate_fatal("Cannot prepare session in " TMATE_WORKDIR);
if (check_owned_directory_mode(TMATE_WORKDIR, 0700) ||
check_owned_directory_mode(TMATE_WORKDIR "/sessions", 0700) ||
check_owned_directory_mode(TMATE_WORKDIR "/jail", 0700))
tmate_fatal(TMATE_WORKDIR " and subdirectories has incorrect ownership/mode. "
"Try deleting " TMATE_WORKDIR " and try again");
tmate_ssh_server_main(tmate_session,
tmate_settings->keys_dir, tmate_settings->bind_addr, tmate_settings->ssh_port);
return 0;
}
|
271664953315961811306024013861867052587
|
tmate-main.c
|
90810006588409733709876992634170493502
|
CWE-362
|
CVE-2021-44512
|
World-writable permissions on the /tmp/tmate/sessions directory in tmate-ssh-server 2.3.0 allow a local attacker to compromise the integrity of session handling, or obtain the read-write session ID from a read-only session symlink in this directory.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-44512
|
195,230
|
pjproject
|
f74c1fc22b760d2a24369aa72c74c4a9ab985859
|
https://github.com/pjsip/pjproject
|
https://github.com/pjsip/pjproject/commit/f74c1fc22b760d2a24369aa72c74c4a9ab985859
|
Merge pull request from GHSA-r374-qrwv-86hh
| 1
|
void pjmedia_rtcp_xr_rx_rtcp_xr( pjmedia_rtcp_xr_session *sess,
const void *pkt,
pj_size_t size)
{
const pjmedia_rtcp_xr_pkt *rtcp_xr = (pjmedia_rtcp_xr_pkt*) pkt;
const pjmedia_rtcp_xr_rb_rr_time *rb_rr_time = NULL;
const pjmedia_rtcp_xr_rb_dlrr *rb_dlrr = NULL;
const pjmedia_rtcp_xr_rb_stats *rb_stats = NULL;
const pjmedia_rtcp_xr_rb_voip_mtc *rb_voip_mtc = NULL;
const pjmedia_rtcp_xr_rb_header *rb_hdr = (pjmedia_rtcp_xr_rb_header*)
rtcp_xr->buf;
unsigned pkt_len, rb_len;
if (rtcp_xr->common.pt != RTCP_XR)
return;
pkt_len = pj_ntohs((pj_uint16_t)rtcp_xr->common.length);
if ((pkt_len + 1) > (size / 4))
return;
/* Parse report rpt_types */
while ((pj_int32_t*)rb_hdr < (pj_int32_t*)pkt + pkt_len)
{
rb_len = pj_ntohs((pj_uint16_t)rb_hdr->length);
/* Just skip any block with length == 0 (no report content) */
if (rb_len) {
switch (rb_hdr->bt) {
case BT_RR_TIME:
rb_rr_time = (pjmedia_rtcp_xr_rb_rr_time*) rb_hdr;
break;
case BT_DLRR:
rb_dlrr = (pjmedia_rtcp_xr_rb_dlrr*) rb_hdr;
break;
case BT_STATS:
rb_stats = (pjmedia_rtcp_xr_rb_stats*) rb_hdr;
break;
case BT_VOIP_METRICS:
rb_voip_mtc = (pjmedia_rtcp_xr_rb_voip_mtc*) rb_hdr;
break;
default:
break;
}
}
rb_hdr = (pjmedia_rtcp_xr_rb_header*)
((pj_int32_t*)rb_hdr + rb_len + 1);
}
/* Receiving RR Time */
if (rb_rr_time) {
/* Save LRR from NTP timestamp of the RR time block report */
sess->rx_lrr = ((pj_ntohl(rb_rr_time->ntp_sec) & 0x0000FFFF) << 16) |
((pj_ntohl(rb_rr_time->ntp_frac) >> 16) & 0xFFFF);
/* Calculate RR arrival time for DLRR */
pj_get_timestamp(&sess->rx_lrr_time);
TRACE_((sess->name, "Rx RTCP SR: ntp_ts=%p", sess->rx_lrr,
(pj_uint32_t)(sess->rx_lrr_time.u64*65536/
sess->rtcp_session->ts_freq.u64)));
}
/* Receiving DLRR */
if (rb_dlrr) {
pj_uint32_t lrr, now, dlrr;
pj_uint64_t eedelay;
pjmedia_rtcp_ntp_rec ntp;
/* LRR is the middle 32bit of NTP. It has 1/65536 second
* resolution
*/
lrr = pj_ntohl(rb_dlrr->item.lrr);
/* DLRR is delay since LRR, also in 1/65536 resolution */
dlrr = pj_ntohl(rb_dlrr->item.dlrr);
/* Get current time, and convert to 1/65536 resolution */
pjmedia_rtcp_get_ntp_time(sess->rtcp_session, &ntp);
now = ((ntp.hi & 0xFFFF) << 16) + (ntp.lo >> 16);
/* End-to-end delay is (now-lrr-dlrr) */
eedelay = now - lrr - dlrr;
/* Convert end to end delay to usec (keeping the calculation in
* 64bit space)::
* sess->ee_delay = (eedelay * 1000) / 65536;
*/
if (eedelay < 4294) {
eedelay = (eedelay * 1000000) >> 16;
} else {
eedelay = (eedelay * 1000) >> 16;
eedelay *= 1000;
}
TRACE_((sess->name, "Rx RTCP XR DLRR: lrr=%p, dlrr=%p (%d:%03dms), "
"now=%p, rtt=%p",
lrr, dlrr, dlrr/65536, (dlrr%65536)*1000/65536,
now, (pj_uint32_t)eedelay));
/* Only save calculation if "now" is greater than lrr, or
* otherwise rtt will be invalid
*/
if (now-dlrr >= lrr) {
unsigned rtt = (pj_uint32_t)eedelay;
/* Check that eedelay value really makes sense.
* We allow up to 30 seconds RTT!
*/
if (eedelay <= 30 * 1000 * 1000UL) {
/* "Normalize" rtt value that is exceptionally high.
* For such values, "normalize" the rtt to be three times
* the average value.
*/
if (rtt>((unsigned)sess->stat.rtt.mean*3) && sess->stat.rtt.n!=0)
{
unsigned orig_rtt = rtt;
rtt = (unsigned)sess->stat.rtt.mean*3;
PJ_LOG(5,(sess->name,
"RTT value %d usec is normalized to %d usec",
orig_rtt, rtt));
}
TRACE_((sess->name, "RTCP RTT is set to %d usec", rtt));
pj_math_stat_update(&sess->stat.rtt, rtt);
}
} else {
PJ_LOG(5, (sess->name, "Internal RTCP NTP clock skew detected: "
"lrr=%p, now=%p, dlrr=%p (%d:%03dms), "
"diff=%d",
lrr, now, dlrr, dlrr/65536,
(dlrr%65536)*1000/65536,
dlrr-(now-lrr)));
}
}
/* Receiving Statistics Summary */
if (rb_stats) {
pj_uint8_t flags = rb_stats->header.specific;
pj_bzero(&sess->stat.tx.stat_sum, sizeof(sess->stat.tx.stat_sum));
/* Range of packets sequence reported in this blocks */
sess->stat.tx.stat_sum.begin_seq = pj_ntohs(rb_stats->begin_seq);
sess->stat.tx.stat_sum.end_seq = pj_ntohs(rb_stats->end_seq);
/* Get flags of valid fields */
sess->stat.tx.stat_sum.l = (flags & (1 << 7)) != 0;
sess->stat.tx.stat_sum.d = (flags & (1 << 6)) != 0;
sess->stat.tx.stat_sum.j = (flags & (1 << 5)) != 0;
sess->stat.tx.stat_sum.t = (flags & (3 << 3)) != 0;
/* Fetch the reports info */
if (sess->stat.tx.stat_sum.l) {
sess->stat.tx.stat_sum.lost = pj_ntohl(rb_stats->lost);
}
if (sess->stat.tx.stat_sum.d) {
sess->stat.tx.stat_sum.dup = pj_ntohl(rb_stats->dup);
}
if (sess->stat.tx.stat_sum.j) {
sess->stat.tx.stat_sum.jitter.min = pj_ntohl(rb_stats->jitter_min);
sess->stat.tx.stat_sum.jitter.max = pj_ntohl(rb_stats->jitter_max);
sess->stat.tx.stat_sum.jitter.mean= pj_ntohl(rb_stats->jitter_mean);
pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.jitter,
pj_ntohl(rb_stats->jitter_dev));
}
if (sess->stat.tx.stat_sum.t) {
sess->stat.tx.stat_sum.toh.min = rb_stats->toh_min;
sess->stat.tx.stat_sum.toh.max = rb_stats->toh_max;
sess->stat.tx.stat_sum.toh.mean= rb_stats->toh_mean;
pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.toh,
pj_ntohl(rb_stats->toh_dev));
}
pj_gettimeofday(&sess->stat.tx.stat_sum.update);
}
/* Receiving VoIP Metrics */
if (rb_voip_mtc) {
sess->stat.tx.voip_mtc.loss_rate = rb_voip_mtc->loss_rate;
sess->stat.tx.voip_mtc.discard_rate = rb_voip_mtc->discard_rate;
sess->stat.tx.voip_mtc.burst_den = rb_voip_mtc->burst_den;
sess->stat.tx.voip_mtc.gap_den = rb_voip_mtc->gap_den;
sess->stat.tx.voip_mtc.burst_dur = pj_ntohs(rb_voip_mtc->burst_dur);
sess->stat.tx.voip_mtc.gap_dur = pj_ntohs(rb_voip_mtc->gap_dur);
sess->stat.tx.voip_mtc.rnd_trip_delay =
pj_ntohs(rb_voip_mtc->rnd_trip_delay);
sess->stat.tx.voip_mtc.end_sys_delay =
pj_ntohs(rb_voip_mtc->end_sys_delay);
/* signal & noise level encoded in two's complement form */
sess->stat.tx.voip_mtc.signal_lvl = (pj_int8_t)
((rb_voip_mtc->signal_lvl > 127)?
((int)rb_voip_mtc->signal_lvl - 256) :
rb_voip_mtc->signal_lvl);
sess->stat.tx.voip_mtc.noise_lvl = (pj_int8_t)
((rb_voip_mtc->noise_lvl > 127)?
((int)rb_voip_mtc->noise_lvl - 256) :
rb_voip_mtc->noise_lvl);
sess->stat.tx.voip_mtc.rerl = rb_voip_mtc->rerl;
sess->stat.tx.voip_mtc.gmin = rb_voip_mtc->gmin;
sess->stat.tx.voip_mtc.r_factor = rb_voip_mtc->r_factor;
sess->stat.tx.voip_mtc.ext_r_factor = rb_voip_mtc->ext_r_factor;
sess->stat.tx.voip_mtc.mos_lq = rb_voip_mtc->mos_lq;
sess->stat.tx.voip_mtc.mos_cq = rb_voip_mtc->mos_cq;
sess->stat.tx.voip_mtc.rx_config = rb_voip_mtc->rx_config;
sess->stat.tx.voip_mtc.jb_nom = pj_ntohs(rb_voip_mtc->jb_nom);
sess->stat.tx.voip_mtc.jb_max = pj_ntohs(rb_voip_mtc->jb_max);
sess->stat.tx.voip_mtc.jb_abs_max = pj_ntohs(rb_voip_mtc->jb_abs_max);
pj_gettimeofday(&sess->stat.tx.voip_mtc.update);
}
}
|
128531615202269817130665554219664776865
|
rtcp_xr.c
|
114410540091951766279707779044798368853
|
CWE-125
|
CVE-2021-43845
|
PJSIP is a free and open source multimedia communication library. In version 2.11.1 and prior, if incoming RTCP XR message contain block, the data field is not checked against the received packet size, potentially resulting in an out-of-bound read access. This affects all users that use PJMEDIA and RTCP XR. A malicious actor can send a RTCP XR message with an invalid packet size.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-43845
|
222,737
|
pjproject
|
f74c1fc22b760d2a24369aa72c74c4a9ab985859
|
https://github.com/pjsip/pjproject
|
https://github.com/pjsip/pjproject/commit/f74c1fc22b760d2a24369aa72c74c4a9ab985859
|
Merge pull request from GHSA-r374-qrwv-86hh
| 0
|
void pjmedia_rtcp_xr_rx_rtcp_xr( pjmedia_rtcp_xr_session *sess,
const void *pkt,
pj_size_t size)
{
const pjmedia_rtcp_xr_pkt *rtcp_xr = (pjmedia_rtcp_xr_pkt*) pkt;
const pjmedia_rtcp_xr_rb_rr_time *rb_rr_time = NULL;
const pjmedia_rtcp_xr_rb_dlrr *rb_dlrr = NULL;
const pjmedia_rtcp_xr_rb_stats *rb_stats = NULL;
const pjmedia_rtcp_xr_rb_voip_mtc *rb_voip_mtc = NULL;
const pjmedia_rtcp_xr_rb_header *rb_hdr = (pjmedia_rtcp_xr_rb_header*)
rtcp_xr->buf;
unsigned pkt_len, rb_len;
if (rtcp_xr->common.pt != RTCP_XR)
return;
pkt_len = pj_ntohs((pj_uint16_t)rtcp_xr->common.length);
if ((pkt_len + 1) > (size / 4))
return;
/* Parse report rpt_types */
while ((pj_int32_t*)rb_hdr < (pj_int32_t*)pkt + pkt_len)
{
rb_len = pj_ntohs((pj_uint16_t)rb_hdr->length);
/* Just skip any block with length == 0 (no report content) */
if (rb_len) {
switch (rb_hdr->bt) {
case BT_RR_TIME:
if ((char*)rb_hdr + sizeof(*rb_rr_time) <=
(char*)pkt + size)
{
rb_rr_time = (pjmedia_rtcp_xr_rb_rr_time*)rb_hdr;
}
break;
case BT_DLRR:
if ((char*)rb_hdr + sizeof(*rb_dlrr) <=
(char*)pkt + size)
{
rb_dlrr = (pjmedia_rtcp_xr_rb_dlrr*)rb_hdr;
}
break;
case BT_STATS:
if ((char*)rb_hdr + sizeof(*rb_stats) <=
(char*)pkt + size)
{
rb_stats = (pjmedia_rtcp_xr_rb_stats*)rb_hdr;
}
break;
case BT_VOIP_METRICS:
if ((char*)rb_hdr + sizeof(*rb_voip_mtc) <=
(char*)pkt + size)
{
rb_voip_mtc = (pjmedia_rtcp_xr_rb_voip_mtc*)rb_hdr;
}
break;
default:
break;
}
}
rb_hdr = (pjmedia_rtcp_xr_rb_header*)
((pj_int32_t*)rb_hdr + rb_len + 1);
}
/* Receiving RR Time */
if (rb_rr_time) {
/* Save LRR from NTP timestamp of the RR time block report */
sess->rx_lrr = ((pj_ntohl(rb_rr_time->ntp_sec) & 0x0000FFFF) << 16) |
((pj_ntohl(rb_rr_time->ntp_frac) >> 16) & 0xFFFF);
/* Calculate RR arrival time for DLRR */
pj_get_timestamp(&sess->rx_lrr_time);
TRACE_((sess->name, "Rx RTCP SR: ntp_ts=%p", sess->rx_lrr,
(pj_uint32_t)(sess->rx_lrr_time.u64*65536/
sess->rtcp_session->ts_freq.u64)));
}
/* Receiving DLRR */
if (rb_dlrr) {
pj_uint32_t lrr, now, dlrr;
pj_uint64_t eedelay;
pjmedia_rtcp_ntp_rec ntp;
/* LRR is the middle 32bit of NTP. It has 1/65536 second
* resolution
*/
lrr = pj_ntohl(rb_dlrr->item.lrr);
/* DLRR is delay since LRR, also in 1/65536 resolution */
dlrr = pj_ntohl(rb_dlrr->item.dlrr);
/* Get current time, and convert to 1/65536 resolution */
pjmedia_rtcp_get_ntp_time(sess->rtcp_session, &ntp);
now = ((ntp.hi & 0xFFFF) << 16) + (ntp.lo >> 16);
/* End-to-end delay is (now-lrr-dlrr) */
eedelay = now - lrr - dlrr;
/* Convert end to end delay to usec (keeping the calculation in
* 64bit space)::
* sess->ee_delay = (eedelay * 1000) / 65536;
*/
if (eedelay < 4294) {
eedelay = (eedelay * 1000000) >> 16;
} else {
eedelay = (eedelay * 1000) >> 16;
eedelay *= 1000;
}
TRACE_((sess->name, "Rx RTCP XR DLRR: lrr=%p, dlrr=%p (%d:%03dms), "
"now=%p, rtt=%p",
lrr, dlrr, dlrr/65536, (dlrr%65536)*1000/65536,
now, (pj_uint32_t)eedelay));
/* Only save calculation if "now" is greater than lrr, or
* otherwise rtt will be invalid
*/
if (now-dlrr >= lrr) {
unsigned rtt = (pj_uint32_t)eedelay;
/* Check that eedelay value really makes sense.
* We allow up to 30 seconds RTT!
*/
if (eedelay <= 30 * 1000 * 1000UL) {
/* "Normalize" rtt value that is exceptionally high.
* For such values, "normalize" the rtt to be three times
* the average value.
*/
if (rtt>((unsigned)sess->stat.rtt.mean*3) && sess->stat.rtt.n!=0)
{
unsigned orig_rtt = rtt;
rtt = (unsigned)sess->stat.rtt.mean*3;
PJ_LOG(5,(sess->name,
"RTT value %d usec is normalized to %d usec",
orig_rtt, rtt));
}
TRACE_((sess->name, "RTCP RTT is set to %d usec", rtt));
pj_math_stat_update(&sess->stat.rtt, rtt);
}
} else {
PJ_LOG(5, (sess->name, "Internal RTCP NTP clock skew detected: "
"lrr=%p, now=%p, dlrr=%p (%d:%03dms), "
"diff=%d",
lrr, now, dlrr, dlrr/65536,
(dlrr%65536)*1000/65536,
dlrr-(now-lrr)));
}
}
/* Receiving Statistics Summary */
if (rb_stats) {
pj_uint8_t flags = rb_stats->header.specific;
pj_bzero(&sess->stat.tx.stat_sum, sizeof(sess->stat.tx.stat_sum));
/* Range of packets sequence reported in this blocks */
sess->stat.tx.stat_sum.begin_seq = pj_ntohs(rb_stats->begin_seq);
sess->stat.tx.stat_sum.end_seq = pj_ntohs(rb_stats->end_seq);
/* Get flags of valid fields */
sess->stat.tx.stat_sum.l = (flags & (1 << 7)) != 0;
sess->stat.tx.stat_sum.d = (flags & (1 << 6)) != 0;
sess->stat.tx.stat_sum.j = (flags & (1 << 5)) != 0;
sess->stat.tx.stat_sum.t = (flags & (3 << 3)) != 0;
/* Fetch the reports info */
if (sess->stat.tx.stat_sum.l) {
sess->stat.tx.stat_sum.lost = pj_ntohl(rb_stats->lost);
}
if (sess->stat.tx.stat_sum.d) {
sess->stat.tx.stat_sum.dup = pj_ntohl(rb_stats->dup);
}
if (sess->stat.tx.stat_sum.j) {
sess->stat.tx.stat_sum.jitter.min = pj_ntohl(rb_stats->jitter_min);
sess->stat.tx.stat_sum.jitter.max = pj_ntohl(rb_stats->jitter_max);
sess->stat.tx.stat_sum.jitter.mean= pj_ntohl(rb_stats->jitter_mean);
pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.jitter,
pj_ntohl(rb_stats->jitter_dev));
}
if (sess->stat.tx.stat_sum.t) {
sess->stat.tx.stat_sum.toh.min = rb_stats->toh_min;
sess->stat.tx.stat_sum.toh.max = rb_stats->toh_max;
sess->stat.tx.stat_sum.toh.mean= rb_stats->toh_mean;
pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.toh,
pj_ntohl(rb_stats->toh_dev));
}
pj_gettimeofday(&sess->stat.tx.stat_sum.update);
}
/* Receiving VoIP Metrics */
if (rb_voip_mtc) {
sess->stat.tx.voip_mtc.loss_rate = rb_voip_mtc->loss_rate;
sess->stat.tx.voip_mtc.discard_rate = rb_voip_mtc->discard_rate;
sess->stat.tx.voip_mtc.burst_den = rb_voip_mtc->burst_den;
sess->stat.tx.voip_mtc.gap_den = rb_voip_mtc->gap_den;
sess->stat.tx.voip_mtc.burst_dur = pj_ntohs(rb_voip_mtc->burst_dur);
sess->stat.tx.voip_mtc.gap_dur = pj_ntohs(rb_voip_mtc->gap_dur);
sess->stat.tx.voip_mtc.rnd_trip_delay =
pj_ntohs(rb_voip_mtc->rnd_trip_delay);
sess->stat.tx.voip_mtc.end_sys_delay =
pj_ntohs(rb_voip_mtc->end_sys_delay);
/* signal & noise level encoded in two's complement form */
sess->stat.tx.voip_mtc.signal_lvl = (pj_int8_t)
((rb_voip_mtc->signal_lvl > 127)?
((int)rb_voip_mtc->signal_lvl - 256) :
rb_voip_mtc->signal_lvl);
sess->stat.tx.voip_mtc.noise_lvl = (pj_int8_t)
((rb_voip_mtc->noise_lvl > 127)?
((int)rb_voip_mtc->noise_lvl - 256) :
rb_voip_mtc->noise_lvl);
sess->stat.tx.voip_mtc.rerl = rb_voip_mtc->rerl;
sess->stat.tx.voip_mtc.gmin = rb_voip_mtc->gmin;
sess->stat.tx.voip_mtc.r_factor = rb_voip_mtc->r_factor;
sess->stat.tx.voip_mtc.ext_r_factor = rb_voip_mtc->ext_r_factor;
sess->stat.tx.voip_mtc.mos_lq = rb_voip_mtc->mos_lq;
sess->stat.tx.voip_mtc.mos_cq = rb_voip_mtc->mos_cq;
sess->stat.tx.voip_mtc.rx_config = rb_voip_mtc->rx_config;
sess->stat.tx.voip_mtc.jb_nom = pj_ntohs(rb_voip_mtc->jb_nom);
sess->stat.tx.voip_mtc.jb_max = pj_ntohs(rb_voip_mtc->jb_max);
sess->stat.tx.voip_mtc.jb_abs_max = pj_ntohs(rb_voip_mtc->jb_abs_max);
pj_gettimeofday(&sess->stat.tx.voip_mtc.update);
}
}
|
134123814969944330757535232446891920936
|
rtcp_xr.c
|
238632209992792964107720594546671940595
|
CWE-125
|
CVE-2021-43845
|
PJSIP is a free and open source multimedia communication library. In version 2.11.1 and prior, if incoming RTCP XR message contain block, the data field is not checked against the received packet size, potentially resulting in an out-of-bound read access. This affects all users that use PJMEDIA and RTCP XR. A malicious actor can send a RTCP XR message with an invalid packet size.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-43845
|
195,231
|
gpac
|
893fb99b606eebfae46cde151846a980e689039b
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/893fb99b606eebfae46cde151846a980e689039b
|
fixed #1902
| 1
|
s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc)
{
u8 idr_flag;
s32 slice, ret;
u32 nal_hdr;
AVCSliceInfo n_state;
gf_bs_enable_emulation_byte_removal(bs, GF_TRUE);
nal_hdr = gf_bs_read_u8(bs);
slice = 0;
memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo));
avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F;
n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3;
idr_flag = 0;
switch (n_state.nal_unit_type) {
case GF_AVC_NALU_ACCESS_UNIT:
case GF_AVC_NALU_END_OF_SEQ:
case GF_AVC_NALU_END_OF_STREAM:
ret = 1;
break;
case GF_AVC_NALU_SVC_SLICE:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
// slice buffer - read the info and compare.
/*ret = */svc_parse_slice(bs, avc, &n_state);
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
avc_compute_poc(&n_state);
if (avc->s_info.poc != n_state.poc) {
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 1;
}
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 0;
case GF_AVC_NALU_SVC_PREFIX_NALU:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
return 0;
case GF_AVC_NALU_IDR_SLICE:
case GF_AVC_NALU_NON_IDR_SLICE:
case GF_AVC_NALU_DP_A_SLICE:
case GF_AVC_NALU_DP_B_SLICE:
case GF_AVC_NALU_DP_C_SLICE:
slice = 1;
/* slice buffer - read the info and compare.*/
ret = avc_parse_slice(bs, avc, idr_flag, &n_state);
if (ret < 0) return ret;
ret = 0;
if (
((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE))
&& (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE)
) {
break;
}
if (avc->s_info.frame_num != n_state.frame_num) {
ret = 1;
break;
}
if (avc->s_info.field_pic_flag != n_state.field_pic_flag) {
ret = 1;
break;
}
if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) &&
(!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) {
ret = 1;
break;
}
assert(avc->s_info.sps);
if (avc->s_info.sps->poc_type == n_state.sps->poc_type) {
if (!avc->s_info.sps->poc_type) {
if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) {
ret = 1;
break;
}
if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) {
ret = 1;
break;
}
}
else if (avc->s_info.sps->poc_type == 1) {
if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) {
ret = 1;
break;
}
if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) {
ret = 1;
break;
}
}
}
if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) {
if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/
ret = 1;
break;
}
else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/
ret = 1;
break;
}
}
break;
case GF_AVC_NALU_SEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_PIC_PARAM:
avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEQ_PARAM_EXT:
avc->last_ps_idx = (s32) gf_bs_read_ue(bs);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEI:
case GF_AVC_NALU_FILLER_DATA:
return 0;
default:
if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1;
//To detect change of AU when multiple sps and pps in stream
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else
ret = 0;
break;
}
/* save _prev values */
if (ret && avc->s_info.sps) {
n_state.frame_num_offset_prev = avc->s_info.frame_num_offset;
if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0))
n_state.frame_num_prev = avc->s_info.frame_num;
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
}
if (slice)
avc_compute_poc(&n_state);
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return ret;
}
|
99100226875075764129164909998725433232
|
av_parsers.c
|
168517587328341017594269375399465893964
|
CWE-476
|
CVE-2021-40565
|
A Segmentation fault caused by a null pointer dereference vulnerability exists in Gpac through 1.0.1 via the gf_avc_parse_nalu function in av_parsers.c when using mp4box, which causes a denial of service.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40565
|
222,739
|
gpac
|
893fb99b606eebfae46cde151846a980e689039b
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/893fb99b606eebfae46cde151846a980e689039b
|
fixed #1902
| 0
|
s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc)
{
u8 idr_flag;
s32 slice, ret;
u32 nal_hdr;
AVCSliceInfo n_state;
gf_bs_enable_emulation_byte_removal(bs, GF_TRUE);
nal_hdr = gf_bs_read_u8(bs);
slice = 0;
memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo));
avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F;
n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3;
idr_flag = 0;
switch (n_state.nal_unit_type) {
case GF_AVC_NALU_ACCESS_UNIT:
case GF_AVC_NALU_END_OF_SEQ:
case GF_AVC_NALU_END_OF_STREAM:
ret = 1;
break;
case GF_AVC_NALU_SVC_SLICE:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
// slice buffer - read the info and compare.
/*ret = */svc_parse_slice(bs, avc, &n_state);
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
avc_compute_poc(&n_state);
if (avc->s_info.poc != n_state.poc) {
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 1;
}
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 0;
case GF_AVC_NALU_SVC_PREFIX_NALU:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
return 0;
case GF_AVC_NALU_IDR_SLICE:
case GF_AVC_NALU_NON_IDR_SLICE:
case GF_AVC_NALU_DP_A_SLICE:
case GF_AVC_NALU_DP_B_SLICE:
case GF_AVC_NALU_DP_C_SLICE:
slice = 1;
/* slice buffer - read the info and compare.*/
ret = avc_parse_slice(bs, avc, idr_flag, &n_state);
if (ret < 0) return ret;
ret = 0;
if (
((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE))
&& (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE)
) {
break;
}
if (avc->s_info.frame_num != n_state.frame_num) {
ret = 1;
break;
}
if (avc->s_info.field_pic_flag != n_state.field_pic_flag) {
ret = 1;
break;
}
if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) &&
(!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) {
ret = 1;
break;
}
if (!avc->s_info.sps)
return -1;
if (avc->s_info.sps->poc_type == n_state.sps->poc_type) {
if (!avc->s_info.sps->poc_type) {
if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) {
ret = 1;
break;
}
if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) {
ret = 1;
break;
}
}
else if (avc->s_info.sps->poc_type == 1) {
if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) {
ret = 1;
break;
}
if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) {
ret = 1;
break;
}
}
}
if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) {
if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/
ret = 1;
break;
}
else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/
ret = 1;
break;
}
}
break;
case GF_AVC_NALU_SEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_PIC_PARAM:
avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEQ_PARAM_EXT:
avc->last_ps_idx = (s32) gf_bs_read_ue(bs);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEI:
case GF_AVC_NALU_FILLER_DATA:
return 0;
default:
if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1;
//To detect change of AU when multiple sps and pps in stream
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else
ret = 0;
break;
}
/* save _prev values */
if (ret && avc->s_info.sps) {
n_state.frame_num_offset_prev = avc->s_info.frame_num_offset;
if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0))
n_state.frame_num_prev = avc->s_info.frame_num;
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
}
if (slice)
avc_compute_poc(&n_state);
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return ret;
}
|
151148020238252563376772611575796474624
|
av_parsers.c
|
336095072032702615903888752582747164805
|
CWE-476
|
CVE-2021-40565
|
A Segmentation fault caused by a null pointer dereference vulnerability exists in Gpac through 1.0.1 via the gf_avc_parse_nalu function in av_parsers.c when using mp4box, which causes a denial of service.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40565
|
195,233
|
tensorflow
|
97282c6d0d34476b6ba033f961590b783fa184cd
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/97282c6d0d34476b6ba033f961590b783fa184cd
|
Prevent a crash due to heap OOB write in grappler.
PiperOrigin-RevId: 408318417
Change-Id: If095feb8c001e3a8ac4a85b7387b81e8309df47d
| 1
|
Status SetUnknownShape(const NodeDef* node, int output_port) {
shape_inference::ShapeHandle shape =
GetUnknownOutputShape(node, output_port);
InferenceContext* ctx = GetContext(node);
if (ctx == nullptr) {
return errors::InvalidArgument("Missing context");
}
ctx->set_output(output_port, shape);
return Status::OK();
}
|
128649942367020682781968122888762734954
|
graph_properties.cc
|
2473148557397819170260688514824580473
|
CWE-787
|
CVE-2022-23566
|
Tensorflow is an Open Source Machine Learning Framework. TensorFlow is vulnerable to a heap OOB write in `Grappler`. The `set_output` function writes to an array at the specified index. Hence, this gives a malicious user a write primitive. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23566
|
222,906
|
tensorflow
|
97282c6d0d34476b6ba033f961590b783fa184cd
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/97282c6d0d34476b6ba033f961590b783fa184cd
|
Prevent a crash due to heap OOB write in grappler.
PiperOrigin-RevId: 408318417
Change-Id: If095feb8c001e3a8ac4a85b7387b81e8309df47d
| 0
|
Status SetUnknownShape(const NodeDef* node, int output_port) {
shape_inference::ShapeHandle shape =
GetUnknownOutputShape(node, output_port);
InferenceContext* ctx = GetContext(node);
if (ctx == nullptr) {
return errors::InvalidArgument("SetUnknownShape: Missing context");
}
if (output_port < 0 || output_port >= ctx->num_outputs()) {
return errors::InvalidArgument(
"SetUnknownShape: output_port must be in [0, ", ctx->num_outputs(),
") but was ", output_port);
}
ctx->set_output(output_port, shape);
return Status::OK();
}
|
99847293040079140904834991959660799210
|
graph_properties.cc
|
331642930758873609530408829371906855419
|
CWE-787
|
CVE-2022-23566
|
Tensorflow is an Open Source Machine Learning Framework. TensorFlow is vulnerable to a heap OOB write in `Grappler`. The `set_output` function writes to an array at the specified index. Hence, this gives a malicious user a write primitive. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23566
|
195,234
|
tensorflow
|
dcc21c7bc972b10b6fb95c2fb0f4ab5a59680ec2
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/dcc21c7bc972b10b6fb95c2fb0f4ab5a59680ec2
|
Eliminate debug `CHECK`-fail from `function.cc`
PiperOrigin-RevId: 409416119
Change-Id: I8376ee464d434e9b970ff0ad49edfdaa2a273cfe
| 1
|
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values,
const FunctionDef::ArgAttrs* arg_attrs,
bool ints_on_device,
int64_t resource_arg_unique_id) {
bool is_type_list;
DataTypeVector dtypes;
TF_RETURN_IF_ERROR(
ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
if (dtypes.size() < size_t{1}) {
return errors::Internal("Expected a list of at least one dtype");
}
int arg_index = result_.nodes.size();
TF_RETURN_IF_ERROR(
AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes}));
// Creates dtypes.size() nodes in the graph.
for (size_t i = 0; i < dtypes.size(); ++i) {
TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i),
{true, arg_index, 0, false, {dtypes[i]}}));
DCHECK_EQ(arg_index, result_.nodes.size());
string name = arg_def.name();
if (dtypes.size() > 1) {
strings::StrAppend(&name, "_", i);
}
NodeDef* gnode = AddNode(name);
if (ints_on_device && dtypes[i] == DataType::DT_INT32) {
gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp);
} else {
gnode->set_op(FunctionLibraryDefinition::kArgOp);
}
DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i];
AddAttr("T", dtype, gnode);
AddAttr("index", arg_index, gnode);
if (resource_arg_unique_id >= 0) {
AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode);
}
if (arg_attrs) {
for (const auto& arg_attr : arg_attrs->attr()) {
AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr());
}
}
result_.arg_types.push_back(dtypes[i]);
++arg_index;
}
return Status::OK();
}
|
149416980820983113024454385523610384435
|
function.cc
|
275755455359751936167516531130081059449
|
CWE-617
|
CVE-2022-23586
|
Tensorflow is an Open Source Machine Learning Framework. A malicious user can cause a denial of service by altering a `SavedModel` such that assertions in `function.cc` would be falsified and crash the Python interpreter. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23586
|
222,925
|
tensorflow
|
dcc21c7bc972b10b6fb95c2fb0f4ab5a59680ec2
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/dcc21c7bc972b10b6fb95c2fb0f4ab5a59680ec2
|
Eliminate debug `CHECK`-fail from `function.cc`
PiperOrigin-RevId: 409416119
Change-Id: I8376ee464d434e9b970ff0ad49edfdaa2a273cfe
| 0
|
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values,
const FunctionDef::ArgAttrs* arg_attrs,
bool ints_on_device,
int64_t resource_arg_unique_id) {
bool is_type_list;
DataTypeVector dtypes;
TF_RETURN_IF_ERROR(
ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
if (dtypes.size() < size_t{1}) {
return errors::Internal("Expected a list of at least one dtype");
}
int arg_index = result_.nodes.size();
TF_RETURN_IF_ERROR(
AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes}));
// Creates dtypes.size() nodes in the graph.
for (size_t i = 0; i < dtypes.size(); ++i) {
TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i),
{true, arg_index, 0, false, {dtypes[i]}}));
if (arg_index != result_.nodes.size()) {
return errors::Internal(
"Expected arg_index to be equal to the number of nodes in result.",
" Got ", arg_index, " and ", result_.nodes.size());
}
string name = arg_def.name();
if (dtypes.size() > 1) {
strings::StrAppend(&name, "_", i);
}
NodeDef* gnode = AddNode(name);
if (ints_on_device && dtypes[i] == DataType::DT_INT32) {
gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp);
} else {
gnode->set_op(FunctionLibraryDefinition::kArgOp);
}
DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i];
AddAttr("T", dtype, gnode);
AddAttr("index", arg_index, gnode);
if (resource_arg_unique_id >= 0) {
AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode);
}
if (arg_attrs) {
for (const auto& arg_attr : arg_attrs->attr()) {
AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr());
}
}
result_.arg_types.push_back(dtypes[i]);
++arg_index;
}
return Status::OK();
}
|
331541162268703307546562637087708050413
|
None
|
CWE-617
|
CVE-2022-23586
|
Tensorflow is an Open Source Machine Learning Framework. A malicious user can cause a denial of service by altering a `SavedModel` such that assertions in `function.cc` would be falsified and crash the Python interpreter. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23586
|
|
195,237
|
ImageMagick
|
f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
|
https://github.com/ImageMagick/ImageMagick
|
https://github.com/ImageMagick/ImageMagick/commit/f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
|
Fixes #4985: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 (#4986)
* fix Division by zero in XMenuWidget() of MagickCore/widget.c
* Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c
* fix Division by zero in ReadEnhMetaFile() of coders/emf.c
* Resolve conflicts
* fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025
* fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299
Co-authored-by: zhailiangliang <[email protected]>
| 1
|
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CropBox "CropBox"
#define DeviceCMYK "DeviceCMYK"
#define MediaBox "MediaBox"
#define RenderPCLText " Rendering PCL... "
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
geometry[MagickPathExtent],
*options,
input_filename[MagickPathExtent];
const DelegateInfo
*delegate_info;
Image
*image,
*next_image;
ImageInfo
*read_info;
MagickBooleanType
cmyk,
status;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
char
*p;
ssize_t
c;
SegmentInfo
bounds;
size_t
height,
width;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0))
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
/*
Determine page geometry from the PCL media box.
*/
cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
count=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&page,0,sizeof(page));
(void) memset(command,0,sizeof(command));
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if (image_info->page != (char *) NULL)
continue;
/*
Note PCL elements.
*/
*p++=(char) c;
if ((c != (int) '/') && (c != '\n') &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
/*
Set PCL render geometry.
*/
width=(size_t) floor(bounds.x2-bounds.x1+0.5);
height=(size_t) floor(bounds.y2-bounds.y1+0.5);
if (width > page.width)
page.width=width;
if (height > page.height)
page.height=height;
}
(void) CloseBlob(image);
/*
Render PCL with the GhostPCL delegate.
*/
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double)
page.width,(double) page.height);
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImage(image);
return((Image *) NULL);
}
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->resolution.x,image->resolution.y);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5);
page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5);
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
if (read_info->number_scenes != 1)
(void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g",
(double) (read_info->scene+read_info->number_scenes));
else
(void) FormatLocaleString(options,MagickPathExtent,
"-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1,
(double) (read_info->scene+read_info->number_scenes));
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) AcquireUniqueFilename(read_info->filename);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) RelinquishUniqueFileResource(input_filename);
read_info=DestroyImageInfo(read_info);
if (image == (Image *) NULL)
ThrowReaderException(DelegateError,"PCLDelegateFailed");
if (LocaleCompare(image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(image,exception);
if (cmyk_image != (Image *) NULL)
{
image=DestroyImageList(image);
image=cmyk_image;
}
}
do
{
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
image->page=page;
if (image_info->ping != MagickFalse)
{
image->magick_columns*=image->resolution.x/2.0;
image->magick_rows*=image->resolution.y/2.0;
image->columns*=image->resolution.x/2.0;
image->rows*=image->resolution.y/2.0;
}
next_image=SyncNextImageInList(image);
if (next_image != (Image *) NULL)
image=next_image;
} while (next_image != (Image *) NULL);
return(GetFirstImageInList(image));
}
|
164108098598115354275502589345492195560
|
pcl.c
|
226900089914426038554396055314138187051
|
CWE-190
|
CVE-2022-32546
|
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned long' at coders/pcl.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-32546
|
223,089
|
ImageMagick
|
f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
|
https://github.com/ImageMagick/ImageMagick
|
https://github.com/ImageMagick/ImageMagick/commit/f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
|
Fixes #4985: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 (#4986)
* fix Division by zero in XMenuWidget() of MagickCore/widget.c
* Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c
* fix Division by zero in ReadEnhMetaFile() of coders/emf.c
* Resolve conflicts
* fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025
* fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299
Co-authored-by: zhailiangliang <[email protected]>
| 0
|
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CropBox "CropBox"
#define DeviceCMYK "DeviceCMYK"
#define MediaBox "MediaBox"
#define RenderPCLText " Rendering PCL... "
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
geometry[MagickPathExtent],
*options,
input_filename[MagickPathExtent];
const DelegateInfo
*delegate_info;
Image
*image,
*next_image;
ImageInfo
*read_info;
MagickBooleanType
cmyk,
status;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
char
*p;
ssize_t
c;
SegmentInfo
bounds;
size_t
height,
width;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0))
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
/*
Determine page geometry from the PCL media box.
*/
cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
count=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&page,0,sizeof(page));
(void) memset(command,0,sizeof(command));
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if (image_info->page != (char *) NULL)
continue;
/*
Note PCL elements.
*/
*p++=(char) c;
if ((c != (int) '/') && (c != '\n') &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
/*
Set PCL render geometry.
*/
width=(size_t)CastDoubleToLong(floor(bounds.x2-bounds.x1+0.5));
height=(size_t)CastDoubleToLong(floor(bounds.y2-bounds.y1+0.5));
if (width > page.width)
page.width=width;
if (height > page.height)
page.height=height;
}
(void) CloseBlob(image);
/*
Render PCL with the GhostPCL delegate.
*/
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double)
page.width,(double) page.height);
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImage(image);
return((Image *) NULL);
}
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->resolution.x,image->resolution.y);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5);
page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5);
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
if (read_info->number_scenes != 1)
(void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g",
(double) (read_info->scene+read_info->number_scenes));
else
(void) FormatLocaleString(options,MagickPathExtent,
"-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1,
(double) (read_info->scene+read_info->number_scenes));
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) AcquireUniqueFilename(read_info->filename);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) RelinquishUniqueFileResource(input_filename);
read_info=DestroyImageInfo(read_info);
if (image == (Image *) NULL)
ThrowReaderException(DelegateError,"PCLDelegateFailed");
if (LocaleCompare(image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(image,exception);
if (cmyk_image != (Image *) NULL)
{
image=DestroyImageList(image);
image=cmyk_image;
}
}
do
{
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
image->page=page;
if (image_info->ping != MagickFalse)
{
image->magick_columns*=image->resolution.x/2.0;
image->magick_rows*=image->resolution.y/2.0;
image->columns*=image->resolution.x/2.0;
image->rows*=image->resolution.y/2.0;
}
next_image=SyncNextImageInList(image);
if (next_image != (Image *) NULL)
image=next_image;
} while (next_image != (Image *) NULL);
return(GetFirstImageInList(image));
}
|
19106273782202991773902274267597206156
|
pcl.c
|
107050694639473008713363285641232916868
|
CWE-190
|
CVE-2022-32546
|
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned long' at coders/pcl.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-32546
|
195,264
|
pcre2
|
d4fa336fbcc388f89095b184ba6d99422cfc676c
|
https://github.com/PCRE2Project/pcre2
|
https://github.com/PCRE2Project/pcre2/commit/d4fa336fbcc388f89095b184ba6d99422cfc676c
|
Fix incorrect value reading in JIT.
| 1
|
static void compile_xclass_matchingpath(compiler_common *common, PCRE2_SPTR cc, jump_list **backtracks)
{
DEFINE_COMPILER;
jump_list *found = NULL;
jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks;
sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX;
struct sljit_jump *jump = NULL;
PCRE2_SPTR ccbegin;
int compares, invertcmp, numberofcmps;
#if defined SUPPORT_UNICODE && (PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16)
BOOL utf = common->utf;
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == [8|16] */
#ifdef SUPPORT_UNICODE
sljit_u32 unicode_status = 0;
int typereg = TMP1;
const sljit_u32 *other_cases;
sljit_uw typeoffset;
#endif /* SUPPORT_UNICODE */
/* Scanning the necessary info. */
cc++;
ccbegin = cc;
compares = 0;
if (cc[-1] & XCL_MAP)
{
min = 0;
cc += 32 / sizeof(PCRE2_UCHAR);
}
while (*cc != XCL_END)
{
compares++;
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (c > max) max = c;
if (c < min) min = c;
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_SAVE_CHAR;
#endif /* SUPPORT_UNICODE */
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (c < min) min = c;
GETCHARINCTEST(c, cc);
if (c > max) max = c;
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_SAVE_CHAR;
#endif /* SUPPORT_UNICODE */
}
#ifdef SUPPORT_UNICODE
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_CLIST && *cc == XCL_PROP)
{
other_cases = PRIV(ucd_caseless_sets) + cc[1];
while (*other_cases != NOTACHAR)
{
if (*other_cases > max) max = *other_cases;
if (*other_cases < min) min = *other_cases;
other_cases++;
}
}
else
{
max = READ_CHAR_MAX;
min = 0;
}
switch(*cc)
{
case PT_ANY:
/* Any either accepts everything or ignored. */
if (cc[-1] == XCL_PROP)
{
compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE);
if (list == backtracks)
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
return;
}
break;
case PT_LAMP:
case PT_GC:
case PT_PC:
case PT_ALNUM:
unicode_status |= XCLASS_HAS_TYPE;
break;
case PT_SCX:
unicode_status |= XCLASS_HAS_SCRIPT_EXTENSION;
if (cc[-1] == XCL_NOTPROP)
{
unicode_status |= XCLASS_SCRIPT_EXTENSION_NOTPROP;
break;
}
compares++;
/* Fall through */
case PT_SC:
unicode_status |= XCLASS_HAS_SCRIPT;
break;
case PT_SPACE:
case PT_PXSPACE:
case PT_WORD:
case PT_PXGRAPH:
case PT_PXPRINT:
case PT_PXPUNCT:
unicode_status |= XCLASS_SAVE_CHAR | XCLASS_HAS_TYPE;
break;
case PT_CLIST:
case PT_UCNC:
unicode_status |= XCLASS_SAVE_CHAR;
break;
case PT_BOOL:
unicode_status |= XCLASS_HAS_BOOL;
break;
case PT_BIDICL:
unicode_status |= XCLASS_HAS_BIDICL;
break;
default:
SLJIT_UNREACHABLE();
break;
}
cc += 2;
}
#endif /* SUPPORT_UNICODE */
}
SLJIT_ASSERT(compares > 0);
/* We are not necessary in utf mode even in 8 bit mode. */
cc = ccbegin;
if ((cc[-1] & XCL_NOT) != 0)
read_char(common, min, max, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
{
#ifdef SUPPORT_UNICODE
read_char(common, min, max, (unicode_status & XCLASS_NEEDS_UCD) ? backtracks : NULL, 0);
#else /* !SUPPORT_UNICODE */
read_char(common, min, max, NULL, 0);
#endif /* SUPPORT_UNICODE */
}
if ((cc[-1] & XCL_HASPROP) == 0)
{
if ((cc[-1] & XCL_MAP) != 0)
{
jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
if (!optimize_class(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found))
{
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO));
}
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
JUMPHERE(jump);
cc += 32 / sizeof(PCRE2_UCHAR);
}
else
{
OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min);
add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min));
}
}
else if ((cc[-1] & XCL_MAP) != 0)
{
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_CHAR_SAVED;
#endif /* SUPPORT_UNICODE */
if (!optimize_class(common, (const sljit_u8 *)cc, FALSE, TRUE, list))
{
#if PCRE2_CODE_UNIT_WIDTH == 8
jump = NULL;
if (common->utf)
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO));
#if PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf)
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
JUMPHERE(jump);
}
OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0);
cc += 32 / sizeof(PCRE2_UCHAR);
}
#ifdef SUPPORT_UNICODE
if (unicode_status & XCLASS_NEEDS_UCD)
{
if ((unicode_status & (XCLASS_SAVE_CHAR | XCLASS_CHAR_SAVED)) == XCLASS_SAVE_CHAR)
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 32
if (!common->utf)
{
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, MAX_UTF_CODE_POINT + 1);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, UNASSIGNED_UTF_CHAR);
JUMPHERE(jump);
}
#endif /* PCRE2_CODE_UNIT_WIDTH == 32 */
OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1);
OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0);
OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2));
OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1);
OP2(SLJIT_SHL, TMP1, 0, TMP2, 0, SLJIT_IMM, 3);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 2);
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
ccbegin = cc;
if (unicode_status & XCLASS_HAS_BIDICL)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass));
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BIDICLASS_SHIFT);
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_BIDICL)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]);
add_jump(compiler, compares > 0 ? list : backtracks, jump);
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_BOOL)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, bprops));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BPROPS_MASK);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2);
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_BOOL)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_boolprop_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f));
add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp));
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_SCRIPT)
{
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script));
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
switch (*cc)
{
case PT_SCX:
if (cc[-1] == XCL_NOTPROP)
break;
/* Fall through */
case PT_SC:
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
add_jump(compiler, compares > 0 ? list : backtracks, CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]));
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_SCRIPT_EXTENSION)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_SCRIPTX_MASK);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2);
if (unicode_status & XCLASS_SCRIPT_EXTENSION_NOTPROP)
{
if (unicode_status & XCLASS_HAS_TYPE)
{
if (unicode_status & XCLASS_SAVE_CHAR)
{
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP2, 0);
unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0;
}
else
{
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0);
unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR;
}
}
OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script));
}
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_SCX)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
jump = NULL;
if (cc[-1] == XCL_NOTPROP)
{
jump = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, (int)cc[1]);
if (invertcmp)
{
add_jump(compiler, backtracks, jump);
jump = NULL;
}
invertcmp ^= 0x1;
}
OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_script_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f));
add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp));
if (jump != NULL)
JUMPHERE(jump);
}
cc += 2;
}
}
if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
else if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR)
OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0);
cc = ccbegin;
}
if (unicode_status & XCLASS_SAVE_CHAR)
OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0);
if (unicode_status & XCLASS_HAS_TYPE)
{
if (unicode_status & XCLASS_SAVE_CHAR)
typereg = RETURN_ADDR;
OP1(SLJIT_MOV_U8, typereg, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype));
}
}
#endif /* SUPPORT_UNICODE */
/* Generating code. */
charoffset = 0;
numberofcmps = 0;
#ifdef SUPPORT_UNICODE
typeoffset = 0;
#endif /* SUPPORT_UNICODE */
while (*cc != XCL_END)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
jump = NULL;
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
else
{
jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
numberofcmps = 0;
}
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
SET_CHAR_OFFSET(c);
GETCHARINCTEST(c, cc);
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
else
{
jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
numberofcmps = 0;
}
}
#ifdef SUPPORT_UNICODE
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
if (*cc == XCL_NOTPROP)
invertcmp ^= 0x1;
cc++;
switch(*cc)
{
case PT_ANY:
if (!invertcmp)
jump = JUMP(SLJIT_JUMP);
break;
case PT_LAMP:
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_GC:
c = PRIV(ucp_typerange)[(int)cc[1] * 2];
SET_TYPE_OFFSET(c);
jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c);
break;
case PT_PC:
jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset);
break;
case PT_SC:
case PT_SCX:
case PT_BOOL:
case PT_BIDICL:
compares++;
/* Do nothing. */
break;
case PT_SPACE:
case PT_PXSPACE:
SET_CHAR_OFFSET(9);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0xd - 0x9);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x85 - 0x9);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x9);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_TYPE_OFFSET(ucp_Zl);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_WORD:
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
/* Fall through. */
case PT_ALNUM:
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Nd);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_CLIST:
other_cases = PRIV(ucd_caseless_sets) + cc[1];
/* At least three characters are required.
Otherwise this case would be handled by the normal code path. */
SLJIT_ASSERT(other_cases[0] != NOTACHAR && other_cases[1] != NOTACHAR && other_cases[2] != NOTACHAR);
SLJIT_ASSERT(other_cases[0] < other_cases[1] && other_cases[1] < other_cases[2]);
/* Optimizing character pairs, if their difference is power of 2. */
if (is_powerof2(other_cases[1] ^ other_cases[0]))
{
if (charoffset == 0)
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
else
{
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[1]);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
other_cases += 2;
}
else if (is_powerof2(other_cases[2] ^ other_cases[1]))
{
if (charoffset == 0)
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[2] ^ other_cases[1]);
else
{
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[2]);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset));
OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
other_cases += 3;
}
else
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
}
while (*other_cases != NOTACHAR)
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
}
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_UCNC:
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_CHAR_OFFSET(0xa0);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_PXGRAPH:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
break;
case PT_PXPRINT:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll);
OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
break;
case PT_PXPUNCT:
SET_TYPE_OFFSET(ucp_Sc);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x7f);
OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Pc);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
default:
SLJIT_UNREACHABLE();
break;
}
cc += 2;
}
#endif /* SUPPORT_UNICODE */
if (jump != NULL)
add_jump(compiler, compares > 0 ? list : backtracks, jump);
}
if (found != NULL)
set_jumps(found, LABEL());
}
|
183419698766008283102134937176756315954
|
pcre2_jit_compile.c
|
284265016287060690142505784626516203619
|
CWE-125
|
CVE-2022-1586
|
An out-of-bounds read vulnerability was discovered in the PCRE2 library in the compile_xclass_matchingpath() function of the pcre2_jit_compile.c file. This involves a unicode property matching issue in JIT-compiled regular expressions. The issue occurs because the character was not fully read in case-less matching within JIT.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-1586
|
223,368
|
pcre2
|
d4fa336fbcc388f89095b184ba6d99422cfc676c
|
https://github.com/PCRE2Project/pcre2
|
https://github.com/PCRE2Project/pcre2/commit/d4fa336fbcc388f89095b184ba6d99422cfc676c
|
Fix incorrect value reading in JIT.
| 0
|
static void compile_xclass_matchingpath(compiler_common *common, PCRE2_SPTR cc, jump_list **backtracks)
{
DEFINE_COMPILER;
jump_list *found = NULL;
jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks;
sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX;
struct sljit_jump *jump = NULL;
PCRE2_SPTR ccbegin;
int compares, invertcmp, numberofcmps;
#if defined SUPPORT_UNICODE && (PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16)
BOOL utf = common->utf;
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == [8|16] */
#ifdef SUPPORT_UNICODE
sljit_u32 unicode_status = 0;
int typereg = TMP1;
const sljit_u32 *other_cases;
sljit_uw typeoffset;
#endif /* SUPPORT_UNICODE */
/* Scanning the necessary info. */
cc++;
ccbegin = cc;
compares = 0;
if (cc[-1] & XCL_MAP)
{
min = 0;
cc += 32 / sizeof(PCRE2_UCHAR);
}
while (*cc != XCL_END)
{
compares++;
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (c > max) max = c;
if (c < min) min = c;
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_SAVE_CHAR;
#endif /* SUPPORT_UNICODE */
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (c < min) min = c;
GETCHARINCTEST(c, cc);
if (c > max) max = c;
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_SAVE_CHAR;
#endif /* SUPPORT_UNICODE */
}
#ifdef SUPPORT_UNICODE
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_CLIST && cc[-1] == XCL_PROP)
{
other_cases = PRIV(ucd_caseless_sets) + cc[1];
while (*other_cases != NOTACHAR)
{
if (*other_cases > max) max = *other_cases;
if (*other_cases < min) min = *other_cases;
other_cases++;
}
}
else
{
max = READ_CHAR_MAX;
min = 0;
}
switch(*cc)
{
case PT_ANY:
/* Any either accepts everything or ignored. */
if (cc[-1] == XCL_PROP)
{
compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE);
if (list == backtracks)
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
return;
}
break;
case PT_LAMP:
case PT_GC:
case PT_PC:
case PT_ALNUM:
unicode_status |= XCLASS_HAS_TYPE;
break;
case PT_SCX:
unicode_status |= XCLASS_HAS_SCRIPT_EXTENSION;
if (cc[-1] == XCL_NOTPROP)
{
unicode_status |= XCLASS_SCRIPT_EXTENSION_NOTPROP;
break;
}
compares++;
/* Fall through */
case PT_SC:
unicode_status |= XCLASS_HAS_SCRIPT;
break;
case PT_SPACE:
case PT_PXSPACE:
case PT_WORD:
case PT_PXGRAPH:
case PT_PXPRINT:
case PT_PXPUNCT:
unicode_status |= XCLASS_SAVE_CHAR | XCLASS_HAS_TYPE;
break;
case PT_CLIST:
case PT_UCNC:
unicode_status |= XCLASS_SAVE_CHAR;
break;
case PT_BOOL:
unicode_status |= XCLASS_HAS_BOOL;
break;
case PT_BIDICL:
unicode_status |= XCLASS_HAS_BIDICL;
break;
default:
SLJIT_UNREACHABLE();
break;
}
cc += 2;
}
#endif /* SUPPORT_UNICODE */
}
SLJIT_ASSERT(compares > 0);
/* We are not necessary in utf mode even in 8 bit mode. */
cc = ccbegin;
if ((cc[-1] & XCL_NOT) != 0)
read_char(common, min, max, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
{
#ifdef SUPPORT_UNICODE
read_char(common, min, max, (unicode_status & XCLASS_NEEDS_UCD) ? backtracks : NULL, 0);
#else /* !SUPPORT_UNICODE */
read_char(common, min, max, NULL, 0);
#endif /* SUPPORT_UNICODE */
}
if ((cc[-1] & XCL_HASPROP) == 0)
{
if ((cc[-1] & XCL_MAP) != 0)
{
jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
if (!optimize_class(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found))
{
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO));
}
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
JUMPHERE(jump);
cc += 32 / sizeof(PCRE2_UCHAR);
}
else
{
OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min);
add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min));
}
}
else if ((cc[-1] & XCL_MAP) != 0)
{
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_CHAR_SAVED;
#endif /* SUPPORT_UNICODE */
if (!optimize_class(common, (const sljit_u8 *)cc, FALSE, TRUE, list))
{
#if PCRE2_CODE_UNIT_WIDTH == 8
jump = NULL;
if (common->utf)
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO));
#if PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf)
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
JUMPHERE(jump);
}
OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0);
cc += 32 / sizeof(PCRE2_UCHAR);
}
#ifdef SUPPORT_UNICODE
if (unicode_status & XCLASS_NEEDS_UCD)
{
if ((unicode_status & (XCLASS_SAVE_CHAR | XCLASS_CHAR_SAVED)) == XCLASS_SAVE_CHAR)
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 32
if (!common->utf)
{
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, MAX_UTF_CODE_POINT + 1);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, UNASSIGNED_UTF_CHAR);
JUMPHERE(jump);
}
#endif /* PCRE2_CODE_UNIT_WIDTH == 32 */
OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1);
OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0);
OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2));
OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1);
OP2(SLJIT_SHL, TMP1, 0, TMP2, 0, SLJIT_IMM, 3);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 2);
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
ccbegin = cc;
if (unicode_status & XCLASS_HAS_BIDICL)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass));
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BIDICLASS_SHIFT);
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_BIDICL)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]);
add_jump(compiler, compares > 0 ? list : backtracks, jump);
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_BOOL)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, bprops));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BPROPS_MASK);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2);
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_BOOL)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_boolprop_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f));
add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp));
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_SCRIPT)
{
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script));
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
switch (*cc)
{
case PT_SCX:
if (cc[-1] == XCL_NOTPROP)
break;
/* Fall through */
case PT_SC:
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
add_jump(compiler, compares > 0 ? list : backtracks, CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]));
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_SCRIPT_EXTENSION)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_SCRIPTX_MASK);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2);
if (unicode_status & XCLASS_SCRIPT_EXTENSION_NOTPROP)
{
if (unicode_status & XCLASS_HAS_TYPE)
{
if (unicode_status & XCLASS_SAVE_CHAR)
{
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP2, 0);
unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0;
}
else
{
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0);
unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR;
}
}
OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script));
}
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_SCX)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
jump = NULL;
if (cc[-1] == XCL_NOTPROP)
{
jump = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, (int)cc[1]);
if (invertcmp)
{
add_jump(compiler, backtracks, jump);
jump = NULL;
}
invertcmp ^= 0x1;
}
OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_script_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f));
add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp));
if (jump != NULL)
JUMPHERE(jump);
}
cc += 2;
}
}
if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
else if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR)
OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0);
cc = ccbegin;
}
if (unicode_status & XCLASS_SAVE_CHAR)
OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0);
if (unicode_status & XCLASS_HAS_TYPE)
{
if (unicode_status & XCLASS_SAVE_CHAR)
typereg = RETURN_ADDR;
OP1(SLJIT_MOV_U8, typereg, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype));
}
}
#endif /* SUPPORT_UNICODE */
/* Generating code. */
charoffset = 0;
numberofcmps = 0;
#ifdef SUPPORT_UNICODE
typeoffset = 0;
#endif /* SUPPORT_UNICODE */
while (*cc != XCL_END)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
jump = NULL;
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
else
{
jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
numberofcmps = 0;
}
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
SET_CHAR_OFFSET(c);
GETCHARINCTEST(c, cc);
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
else
{
jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
numberofcmps = 0;
}
}
#ifdef SUPPORT_UNICODE
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
if (*cc == XCL_NOTPROP)
invertcmp ^= 0x1;
cc++;
switch(*cc)
{
case PT_ANY:
if (!invertcmp)
jump = JUMP(SLJIT_JUMP);
break;
case PT_LAMP:
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_GC:
c = PRIV(ucp_typerange)[(int)cc[1] * 2];
SET_TYPE_OFFSET(c);
jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c);
break;
case PT_PC:
jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset);
break;
case PT_SC:
case PT_SCX:
case PT_BOOL:
case PT_BIDICL:
compares++;
/* Do nothing. */
break;
case PT_SPACE:
case PT_PXSPACE:
SET_CHAR_OFFSET(9);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0xd - 0x9);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x85 - 0x9);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x9);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_TYPE_OFFSET(ucp_Zl);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_WORD:
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
/* Fall through. */
case PT_ALNUM:
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Nd);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_CLIST:
other_cases = PRIV(ucd_caseless_sets) + cc[1];
/* At least three characters are required.
Otherwise this case would be handled by the normal code path. */
SLJIT_ASSERT(other_cases[0] != NOTACHAR && other_cases[1] != NOTACHAR && other_cases[2] != NOTACHAR);
SLJIT_ASSERT(other_cases[0] < other_cases[1] && other_cases[1] < other_cases[2]);
/* Optimizing character pairs, if their difference is power of 2. */
if (is_powerof2(other_cases[1] ^ other_cases[0]))
{
if (charoffset == 0)
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
else
{
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[1]);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
other_cases += 2;
}
else if (is_powerof2(other_cases[2] ^ other_cases[1]))
{
if (charoffset == 0)
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[2] ^ other_cases[1]);
else
{
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[2]);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset));
OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
other_cases += 3;
}
else
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
}
while (*other_cases != NOTACHAR)
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
}
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_UCNC:
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_CHAR_OFFSET(0xa0);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_PXGRAPH:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
break;
case PT_PXPRINT:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll);
OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
break;
case PT_PXPUNCT:
SET_TYPE_OFFSET(ucp_Sc);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x7f);
OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Pc);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
default:
SLJIT_UNREACHABLE();
break;
}
cc += 2;
}
#endif /* SUPPORT_UNICODE */
if (jump != NULL)
add_jump(compiler, compares > 0 ? list : backtracks, jump);
}
if (found != NULL)
set_jumps(found, LABEL());
}
|
144276531294134211562638848702422655084
|
pcre2_jit_compile.c
|
52374969195278947710795935639555031915
|
CWE-125
|
CVE-2022-1586
|
An out-of-bounds read vulnerability was discovered in the PCRE2 library in the compile_xclass_matchingpath() function of the pcre2_jit_compile.c file. This involves a unicode property matching issue in JIT-compiled regular expressions. The issue occurs because the character was not fully read in case-less matching within JIT.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-1586
|
195,274
|
tensorflow
|
0a365c029e437be0349c31f8d4c9926b69fa3fa1
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/0a365c029e437be0349c31f8d4c9926b69fa3fa1
|
Prevent null pointer dereference in constant folding.
Under certain conditions, an invalid protobuf saved model with invalid nodes would be loaded. During optimization phase, Grappler optimizer will then dereference a null pointer.
PiperOrigin-RevId: 409683530
Change-Id: I1f10340a7ec384bc9bc587300390f1078cf5caa0
| 1
|
bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node,
const GraphProperties& properties) {
// Push down multiplication on ConvND.
// * ConvND
// / \ / \
// ConvND C2 -- > X *
// / \ / \
// X C1 C1 C2
//
// where C1 and C2 are constants and X is non-constant.
//
// TODO(rmlarsen): Use PrepareConstantPushDown() to simplify this code.
if (!IsAnyMul(*node) || NumNonControlInputs(*node) != 2) return false;
NodeDef* mul_left_child = node_map_->GetNode(node->input(0));
NodeDef* mul_right_child = node_map_->GetNode(node->input(1));
// One child must be constant, and the second must be Conv op.
const bool left_child_is_constant = IsReallyConstant(*mul_left_child);
const bool right_child_is_constant = IsReallyConstant(*mul_right_child);
if (!left_child_is_constant && !right_child_is_constant) {
return false;
}
NodeDef* conv_node =
left_child_is_constant ? mul_right_child : mul_left_child;
if (!IsConv2D(*conv_node) && !IsConv3D(*conv_node)) {
return false;
}
if (node->device() != mul_left_child->device() ||
node->device() != mul_right_child->device()) {
return false;
}
// Make sure that it is safe to change the value of the convolution
// output.
if (conv_node->input_size() < 2 ||
NumNonControlOutputs(*conv_node, *node_map_) > 1 ||
nodes_to_preserve_.find(conv_node->name()) != nodes_to_preserve_.end()) {
return false;
}
// Identify the nodes to swap.
NodeDef* conv_left_child = node_map_->GetNode(conv_node->input(0));
NodeDef* conv_right_child = node_map_->GetNode(conv_node->input(1));
const bool conv_left_is_constant = IsReallyConstant(*conv_left_child);
const bool conv_right_is_constant = IsReallyConstant(*conv_right_child);
if (!conv_left_is_constant && !conv_right_is_constant) {
// At least one of the convolution inputs should be constant.
return false;
}
if (conv_left_is_constant && conv_right_is_constant) {
// Leverage regular constant folding to handle this.
return false;
}
const auto& mul_props = properties.GetOutputProperties(node->name());
const auto& conv_props = properties.GetOutputProperties(conv_node->name());
if (mul_props.empty() || conv_props.empty()) {
return false;
}
const auto& mul_shape = mul_props[0].shape();
const auto& conv_shape = conv_props[0].shape();
if (!ShapesSymbolicallyEqual(mul_shape, conv_shape)) {
return false;
}
const auto& input_props = properties.GetInputProperties(conv_node->name());
if (input_props.size() < 2) {
return false;
}
const auto& filter_shape = input_props[1].shape();
NodeDef* const_node =
left_child_is_constant ? mul_left_child : mul_right_child;
const auto& const_props = properties.GetOutputProperties(const_node->name());
if (const_props.empty()) {
return false;
}
const auto& const_shape = const_props[0].shape();
if (!IsValidConstShapeForMulConvPushDown(
conv_node->attr().at("data_format").s(), filter_shape, const_shape)) {
return false;
}
string mul_new_name = AddPrefixToNodeName("merged_input", conv_node->name());
if (node_map_->NodeExists(mul_new_name)) {
return false;
}
// Make sure we don't introduce loops in the graph by removing control
// dependencies from the conv2d node to c2.
string conv_const_input =
conv_left_is_constant ? conv_node->input(0) : conv_node->input(1);
if (MaybeRemoveControlInput(conv_node->name(), const_node, optimized_graph,
node_map_.get())) {
// Add a control dep from c1 to c2 to ensure c2 is in the right frame
MaybeAddControlInput(conv_const_input, const_node, optimized_graph,
node_map_.get());
}
conv_node->set_name(node->name());
node->set_name(mul_new_name);
if (conv_left_is_constant) {
node_map_->UpdateInput(conv_node->name(), node->input(0), mul_new_name);
conv_node->set_input(0, mul_new_name);
} else {
node_map_->UpdateInput(conv_node->name(), node->input(1), mul_new_name);
conv_node->set_input(1, mul_new_name);
}
NodeDef* conv_const_node =
conv_left_is_constant ? conv_left_child : conv_right_child;
if (left_child_is_constant) {
node->set_input(1, conv_const_node->name());
} else {
node->set_input(0, conv_const_node->name());
}
node_map_->AddNode(mul_new_name, node);
return true;
}
|
134451371039665673916173676790439576039
|
constant_folding.cc
|
221573695858123615640237954647315751120
|
CWE-476
|
CVE-2022-23589
|
Tensorflow is an Open Source Machine Learning Framework. Under certain scenarios, Grappler component of TensorFlow can trigger a null pointer dereference. There are 2 places where this can occur, for the same malicious alteration of a `SavedModel` file (fixing the first one would trigger the same dereference in the second place). First, during constant folding, the `GraphDef` might not have the required nodes for the binary operation. If a node is missing, the correposning `mul_*child` would be null, and the dereference in the subsequent line would be incorrect. We have a similar issue during `IsIdentityConsumingSwitch`. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23589
|
223,728
|
tensorflow
|
0a365c029e437be0349c31f8d4c9926b69fa3fa1
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/0a365c029e437be0349c31f8d4c9926b69fa3fa1
|
Prevent null pointer dereference in constant folding.
Under certain conditions, an invalid protobuf saved model with invalid nodes would be loaded. During optimization phase, Grappler optimizer will then dereference a null pointer.
PiperOrigin-RevId: 409683530
Change-Id: I1f10340a7ec384bc9bc587300390f1078cf5caa0
| 0
|
bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node,
const GraphProperties& properties) {
// Push down multiplication on ConvND.
// * ConvND
// / \ / \
// ConvND C2 -- > X *
// / \ / \
// X C1 C1 C2
//
// where C1 and C2 are constants and X is non-constant.
//
// TODO(rmlarsen): Use PrepareConstantPushDown() to simplify this code.
if (!IsAnyMul(*node) || NumNonControlInputs(*node) != 2) return false;
NodeDef* mul_left_child = node_map_->GetNode(node->input(0));
NodeDef* mul_right_child = node_map_->GetNode(node->input(1));
if (mul_left_child == nullptr || mul_right_child == nullptr) {
return false;
}
// One child must be constant, and the second must be Conv op.
const bool left_child_is_constant = IsReallyConstant(*mul_left_child);
const bool right_child_is_constant = IsReallyConstant(*mul_right_child);
if (!left_child_is_constant && !right_child_is_constant) {
return false;
}
NodeDef* conv_node =
left_child_is_constant ? mul_right_child : mul_left_child;
if (!IsConv2D(*conv_node) && !IsConv3D(*conv_node)) {
return false;
}
if (node->device() != mul_left_child->device() ||
node->device() != mul_right_child->device()) {
return false;
}
// Make sure that it is safe to change the value of the convolution
// output.
if (conv_node->input_size() < 2 ||
NumNonControlOutputs(*conv_node, *node_map_) > 1 ||
nodes_to_preserve_.find(conv_node->name()) != nodes_to_preserve_.end()) {
return false;
}
// Identify the nodes to swap.
NodeDef* conv_left_child = node_map_->GetNode(conv_node->input(0));
NodeDef* conv_right_child = node_map_->GetNode(conv_node->input(1));
const bool conv_left_is_constant = IsReallyConstant(*conv_left_child);
const bool conv_right_is_constant = IsReallyConstant(*conv_right_child);
if (!conv_left_is_constant && !conv_right_is_constant) {
// At least one of the convolution inputs should be constant.
return false;
}
if (conv_left_is_constant && conv_right_is_constant) {
// Leverage regular constant folding to handle this.
return false;
}
const auto& mul_props = properties.GetOutputProperties(node->name());
const auto& conv_props = properties.GetOutputProperties(conv_node->name());
if (mul_props.empty() || conv_props.empty()) {
return false;
}
const auto& mul_shape = mul_props[0].shape();
const auto& conv_shape = conv_props[0].shape();
if (!ShapesSymbolicallyEqual(mul_shape, conv_shape)) {
return false;
}
const auto& input_props = properties.GetInputProperties(conv_node->name());
if (input_props.size() < 2) {
return false;
}
const auto& filter_shape = input_props[1].shape();
NodeDef* const_node =
left_child_is_constant ? mul_left_child : mul_right_child;
const auto& const_props = properties.GetOutputProperties(const_node->name());
if (const_props.empty()) {
return false;
}
const auto& const_shape = const_props[0].shape();
if (!IsValidConstShapeForMulConvPushDown(
conv_node->attr().at("data_format").s(), filter_shape, const_shape)) {
return false;
}
string mul_new_name = AddPrefixToNodeName("merged_input", conv_node->name());
if (node_map_->NodeExists(mul_new_name)) {
return false;
}
// Make sure we don't introduce loops in the graph by removing control
// dependencies from the conv2d node to c2.
string conv_const_input =
conv_left_is_constant ? conv_node->input(0) : conv_node->input(1);
if (MaybeRemoveControlInput(conv_node->name(), const_node, optimized_graph,
node_map_.get())) {
// Add a control dep from c1 to c2 to ensure c2 is in the right frame
MaybeAddControlInput(conv_const_input, const_node, optimized_graph,
node_map_.get());
}
conv_node->set_name(node->name());
node->set_name(mul_new_name);
if (conv_left_is_constant) {
node_map_->UpdateInput(conv_node->name(), node->input(0), mul_new_name);
conv_node->set_input(0, mul_new_name);
} else {
node_map_->UpdateInput(conv_node->name(), node->input(1), mul_new_name);
conv_node->set_input(1, mul_new_name);
}
NodeDef* conv_const_node =
conv_left_is_constant ? conv_left_child : conv_right_child;
if (left_child_is_constant) {
node->set_input(1, conv_const_node->name());
} else {
node->set_input(0, conv_const_node->name());
}
node_map_->AddNode(mul_new_name, node);
return true;
}
|
283045777051933437914385494105493059717
|
constant_folding.cc
|
149567017925135188078420823730624272905
|
CWE-476
|
CVE-2022-23589
|
Tensorflow is an Open Source Machine Learning Framework. Under certain scenarios, Grappler component of TensorFlow can trigger a null pointer dereference. There are 2 places where this can occur, for the same malicious alteration of a `SavedModel` file (fixing the first one would trigger the same dereference in the second place). First, during constant folding, the `GraphDef` might not have the required nodes for the binary operation. If a node is missing, the correposning `mul_*child` would be null, and the dereference in the subsequent line would be incorrect. We have a similar issue during `IsIdentityConsumingSwitch`. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23589
|
195,291
|
tensorflow
|
ef1d027be116f25e25bb94a60da491c2cf55bd0b
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/ef1d027be116f25e25bb94a60da491c2cf55bd0b
|
Prevent copying uninitialized data in `AssignOp`.
This prevents harder to debug undefined behaviors that cannot be traced back to the original tensor after assignments occur earlier in the graph execution. Several of these undefined behaviors are just reference bindings to null pointers, which are caught when running under ubsan/asan.
PiperOrigin-RevId: 408654780
Change-Id: Iad2ec40d43f5fd7ea016c20283356c12d5ddeab1
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& rhs = context->input(1);
// We always return the input ref.
context->forward_ref_input_to_ref_output(0, 0);
// We can't always know how this value will be used downstream, so make
// conservative assumptions in specifying constraints on the memory
// allocation attributes, unless the Grappler graph analysis determined that
// it was safe not to.
AllocatorAttributes attr;
if (!relax_constraints_) {
attr.set_gpu_compatible(true);
attr.set_nic_compatible(true);
}
{
mutex_lock l(*context->input_ref_mutex(0));
const Tensor& old_lhs = context->mutable_input(0, /* lock_held */ true);
const bool same_shape = old_lhs.shape().IsSameSize(rhs.shape());
if (validate_shape_) {
OP_REQUIRES(context, same_shape,
errors::InvalidArgument(
"Assign requires shapes of both tensors to match. "
"lhs shape= ",
old_lhs.shape().DebugString(),
" rhs shape= ", rhs.shape().DebugString()));
}
// In the code below we try to minimize the amount of memory allocation
// and copying by trying the following two shortcuts:
// 1. If the lhs is initialized and has the same number of elements as
// the rhs we can avoid a memory allocation.
// 2. If we can reuse the rhs buffer we avoid both a memory allocation
// and copying.
// 1. Try to copy into an existing buffer.
if (old_lhs.IsInitialized() &&
old_lhs.shape().num_elements() == rhs.shape().num_elements()) {
// The existing lhs tensor has already been initialized and the right
// hand side can fit in the underlying buffer.
Tensor reshaped_old_lhs;
if (same_shape) {
reshaped_old_lhs = old_lhs;
} else {
CHECK(reshaped_old_lhs.CopyFrom(old_lhs, rhs.shape()));
context->replace_ref_input(0, reshaped_old_lhs,
/* lock_held */ true);
}
if (use_exclusive_lock_) {
Copy(context, &reshaped_old_lhs, rhs);
return;
}
} else {
// 2. Try to reuse the rhs.
std::unique_ptr<Tensor> input_alias = context->forward_input(
1, OpKernelContext::Params::kNoReservation /*output_index*/,
rhs.dtype(), rhs.shape(), DEVICE_MEMORY, attr);
if (input_alias != nullptr) {
// Update the ref to point to the new buffer.
context->replace_ref_input(0, *input_alias, /* lock_held */ true);
return;
}
// Otherwise, create a new tensor whose shape matches the
// right hand side, hand off to lhs and copy the rhs into it.
Tensor copy_tensor;
OP_REQUIRES_OK(context,
context->allocate_temp(old_lhs.dtype(), rhs.shape(),
©_tensor, attr));
// We track memory of variables in variable ops instead of in this
// assign op.
context->clear_recorded_memory();
context->replace_ref_input(0, copy_tensor, /* lock_held */ true);
if (use_exclusive_lock_) {
Copy(context, ©_tensor, rhs);
return;
}
}
}
// The tensor has already been initialized and the right hand side
// matches the left hand side's shape. We have been told to do the
// copy outside the lock.
Tensor old_unlocked_lhs = context->mutable_input(0, /* lock_held */ false);
Copy(context, &old_unlocked_lhs, rhs);
}
|
110563830933859876998490806365273446744
|
assign_op.h
|
69919930869774703131816695670485389180
|
CWE-908
|
CVE-2022-23573
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `AssignOp` can result in copying uninitialized data to a new tensor. This later results in undefined behavior. The implementation has a check that the left hand side of the assignment is initialized (to minimize number of allocations), but does not check that the right hand side is also initialized. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23573
|
224,153
|
tensorflow
|
ef1d027be116f25e25bb94a60da491c2cf55bd0b
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/ef1d027be116f25e25bb94a60da491c2cf55bd0b
|
Prevent copying uninitialized data in `AssignOp`.
This prevents harder to debug undefined behaviors that cannot be traced back to the original tensor after assignments occur earlier in the graph execution. Several of these undefined behaviors are just reference bindings to null pointers, which are caught when running under ubsan/asan.
PiperOrigin-RevId: 408654780
Change-Id: Iad2ec40d43f5fd7ea016c20283356c12d5ddeab1
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor& rhs = context->input(1);
// We always return the input ref.
context->forward_ref_input_to_ref_output(0, 0);
// Prevent copying uninitialized data, to solve harder to debug undefined
// behaviors that cannot be traced back to the original tensor.
OP_REQUIRES(
context, rhs.IsInitialized(),
errors::Internal("Right hand side of AssignOp is not initialized"));
// We can't always know how this value will be used downstream, so make
// conservative assumptions in specifying constraints on the memory
// allocation attributes, unless the Grappler graph analysis determined that
// it was safe not to.
AllocatorAttributes attr;
if (!relax_constraints_) {
attr.set_gpu_compatible(true);
attr.set_nic_compatible(true);
}
{
mutex_lock l(*context->input_ref_mutex(0));
const Tensor& old_lhs = context->mutable_input(0, /* lock_held */ true);
const bool same_shape = old_lhs.shape().IsSameSize(rhs.shape());
if (validate_shape_) {
OP_REQUIRES(context, same_shape,
errors::InvalidArgument(
"Assign requires shapes of both tensors to match. "
"lhs shape= ",
old_lhs.shape().DebugString(),
" rhs shape= ", rhs.shape().DebugString()));
}
// In the code below we try to minimize the amount of memory allocation
// and copying by trying the following two shortcuts:
// 1. If the lhs is initialized and has the same number of elements as
// the rhs we can avoid a memory allocation.
// 2. If we can reuse the rhs buffer we avoid both a memory allocation
// and copying.
// 1. Try to copy into an existing buffer.
if (old_lhs.IsInitialized() &&
old_lhs.shape().num_elements() == rhs.shape().num_elements()) {
// The existing lhs tensor has already been initialized and the right
// hand side can fit in the underlying buffer.
Tensor reshaped_old_lhs;
if (same_shape) {
reshaped_old_lhs = old_lhs;
} else {
CHECK(reshaped_old_lhs.CopyFrom(old_lhs, rhs.shape()));
context->replace_ref_input(0, reshaped_old_lhs,
/* lock_held */ true);
}
if (use_exclusive_lock_) {
Copy(context, &reshaped_old_lhs, rhs);
return;
}
} else {
// 2. Try to reuse the rhs.
std::unique_ptr<Tensor> input_alias = context->forward_input(
1, OpKernelContext::Params::kNoReservation /*output_index*/,
rhs.dtype(), rhs.shape(), DEVICE_MEMORY, attr);
if (input_alias != nullptr) {
// Update the ref to point to the new buffer.
context->replace_ref_input(0, *input_alias, /* lock_held */ true);
return;
}
// Otherwise, create a new tensor whose shape matches the
// right hand side, hand off to lhs and copy the rhs into it.
Tensor copy_tensor;
OP_REQUIRES_OK(context,
context->allocate_temp(old_lhs.dtype(), rhs.shape(),
©_tensor, attr));
// We track memory of variables in variable ops instead of in this
// assign op.
context->clear_recorded_memory();
context->replace_ref_input(0, copy_tensor, /* lock_held */ true);
if (use_exclusive_lock_) {
Copy(context, ©_tensor, rhs);
return;
}
}
}
// The tensor has already been initialized and the right hand side
// matches the left hand side's shape. We have been told to do the
// copy outside the lock.
Tensor old_unlocked_lhs = context->mutable_input(0, /* lock_held */ false);
Copy(context, &old_unlocked_lhs, rhs);
}
|
305037740106398797533289727050001288809
|
assign_op.h
|
227740622376075800348272805227748641889
|
CWE-908
|
CVE-2022-23573
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `AssignOp` can result in copying uninitialized data to a new tensor. This later results in undefined behavior. The implementation has a check that the left hand side of the assignment is initialized (to minimize number of allocations), but does not check that the right hand side is also initialized. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23573
|
195,293
|
mruby
|
ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
|
https://github.com/mruby/mruby
|
https://github.com/mruby/mruby/commit/ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
|
codegen.c: fixed a bug in hash code generation with `!val`.
| 1
|
gen_hash(codegen_scope *s, node *tree, int val, int limit)
{
int slimit = GEN_VAL_STACK_MAX;
if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX;
int len = 0;
mrb_bool update = FALSE;
while (tree) {
if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) {
if (len > 0) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
}
codegen(s, tree->car->cdr, val);
if (len > 0 || update) {
pop(); pop();
genop_1(s, OP_HASHCAT, cursp());
push();
}
update = TRUE;
len = 0;
}
else {
codegen(s, tree->car->car, val);
codegen(s, tree->car->cdr, val);
len++;
}
tree = tree->cdr;
if (val && cursp() >= slimit) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
update = TRUE;
len = 0;
}
}
if (update) {
if (val && len > 0) {
pop_n(len*2+1);
genop_2(s, OP_HASHADD, cursp(), len);
push();
}
return -1; /* variable length */
}
return len;
}
|
193019522040384116683756187518117428466
|
codegen.c
|
187346573288549092337421927147361320618
|
CWE-476
|
CVE-2022-0481
|
NULL Pointer Dereference in Homebrew mruby prior to 3.2.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-0481
|
224,154
|
mruby
|
ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
|
https://github.com/mruby/mruby
|
https://github.com/mruby/mruby/commit/ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
|
codegen.c: fixed a bug in hash code generation with `!val`.
| 0
|
gen_hash(codegen_scope *s, node *tree, int val, int limit)
{
int slimit = GEN_VAL_STACK_MAX;
if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX;
int len = 0;
mrb_bool update = FALSE;
while (tree) {
if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) {
if (val && len > 0) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
}
codegen(s, tree->car->cdr, val);
if (val && (len > 0 || update)) {
pop(); pop();
genop_1(s, OP_HASHCAT, cursp());
push();
}
update = TRUE;
len = 0;
}
else {
codegen(s, tree->car->car, val);
codegen(s, tree->car->cdr, val);
len++;
}
tree = tree->cdr;
if (val && cursp() >= slimit) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
update = TRUE;
len = 0;
}
}
if (update) {
if (val && len > 0) {
pop_n(len*2+1);
genop_2(s, OP_HASHADD, cursp(), len);
push();
}
return -1; /* variable length */
}
return len;
}
|
86249756338106133378324988596644701448
|
None
|
CWE-476
|
CVE-2022-0481
|
NULL Pointer Dereference in Homebrew mruby prior to 3.2.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-0481
|
|
195,294
|
tensorflow
|
f57315566d7094f322b784947093406c2aea0d7d
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/f57315566d7094f322b784947093406c2aea0d7d
|
Add a check for Key being scalar tensor for MapStage and OrderedMapStage ops.
According to documentation[1][2], key must be int64 value, but this wasn't enforced and the ops would fail with check failure for non-scalar key value.
[1]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/ordered-map-stage
[2]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/map-stage
PiperOrigin-RevId: 413822112
Change-Id: I9d118faf990e6361900aa32272eff486ad9f0e2e
| 1
|
void Compute(OpKernelContext* ctx) override {
StagingMap<Ordered>* map = nullptr;
OP_REQUIRES_OK(ctx, GetStagingMap(ctx, def(), &map));
core::ScopedUnref scope(map);
typename StagingMap<Ordered>::OptionalTuple tuple;
const Tensor* key_tensor;
const Tensor* indices_tensor;
OpInputList values_tensor;
OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor));
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor));
OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor));
OP_REQUIRES(ctx, key_tensor->NumElements() > 0,
errors::InvalidArgument("key must not be empty"));
// Create copy for insertion into Staging Area
Tensor key(*key_tensor);
// Create the tuple to store
for (std::size_t i = 0; i < values_tensor.size(); ++i) {
tuple.push_back(values_tensor[i]);
}
// Store the tuple in the map
OP_REQUIRES_OK(ctx, map->put(&key, indices_tensor, &tuple));
}
|
121343016950748954777477429164526353429
|
map_stage_op.cc
|
156634864064326951745718193254274952325
|
CWE-843
|
CVE-2022-21734
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `MapStage` is vulnerable a `CHECK`-fail if the key tensor is not a scalar. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21734
|
224,181
|
tensorflow
|
f57315566d7094f322b784947093406c2aea0d7d
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/f57315566d7094f322b784947093406c2aea0d7d
|
Add a check for Key being scalar tensor for MapStage and OrderedMapStage ops.
According to documentation[1][2], key must be int64 value, but this wasn't enforced and the ops would fail with check failure for non-scalar key value.
[1]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/ordered-map-stage
[2]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/map-stage
PiperOrigin-RevId: 413822112
Change-Id: I9d118faf990e6361900aa32272eff486ad9f0e2e
| 0
|
void Compute(OpKernelContext* ctx) override {
StagingMap<Ordered>* map = nullptr;
OP_REQUIRES_OK(ctx, GetStagingMap(ctx, def(), &map));
core::ScopedUnref scope(map);
typename StagingMap<Ordered>::OptionalTuple tuple;
const Tensor* key_tensor;
const Tensor* indices_tensor;
OpInputList values_tensor;
OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor));
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor));
OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor));
OP_REQUIRES(ctx, key_tensor->NumElements() > 0,
errors::InvalidArgument("key must not be empty"));
OP_REQUIRES(ctx, key_tensor->NumElements() == 1,
errors::InvalidArgument(
"key must be an int64 scalar, got tensor with shape: ",
key_tensor->shape()));
// Create copy for insertion into Staging Area
Tensor key(*key_tensor);
// Create the tuple to store
for (std::size_t i = 0; i < values_tensor.size(); ++i) {
tuple.push_back(values_tensor[i]);
}
// Store the tuple in the map
OP_REQUIRES_OK(ctx, map->put(&key, indices_tensor, &tuple));
}
|
155761287933118551217083530225572870599
|
map_stage_op.cc
|
98786060535014659477894352363015597620
|
CWE-843
|
CVE-2022-21734
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `MapStage` is vulnerable a `CHECK`-fail if the key tensor is not a scalar. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21734
|
195,295
|
mruby
|
c8c083cb750606b2da81582cd8e43b442bb143e6
|
https://github.com/mruby/mruby
|
https://github.com/mruby/mruby/commit/c8c083cb750606b2da81582cd8e43b442bb143e6
|
codegen.c: need to pack argument when `n==13` too.
Because we have extra 2 arguments coming (kw and rhs).
| 1
|
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val)
{
int idx;
int type = nint(tree->car);
switch (type) {
case NODE_GVAR:
case NODE_ARG:
case NODE_LVAR:
case NODE_IVAR:
case NODE_CVAR:
case NODE_CONST:
case NODE_NIL:
case NODE_MASGN:
if (rhs) {
codegen(s, rhs, VAL);
pop();
sp = cursp();
}
break;
case NODE_COLON2:
case NODE_CALL:
case NODE_SCALL:
/* keep evaluation order */
break;
case NODE_NVAR:
codegen_error(s, "Can't assign to numbered parameter");
break;
default:
codegen_error(s, "unknown lhs");
break;
}
tree = tree->cdr;
switch (type) {
case NODE_GVAR:
gen_setxv(s, OP_SETGV, sp, nsym(tree), val);
break;
case NODE_ARG:
case NODE_LVAR:
idx = lv_idx(s, nsym(tree));
if (idx > 0) {
if (idx != sp) {
gen_move(s, idx, sp, val);
}
break;
}
else { /* upvar */
gen_setupvar(s, sp, nsym(tree));
}
break;
case NODE_IVAR:
gen_setxv(s, OP_SETIV, sp, nsym(tree), val);
break;
case NODE_CVAR:
gen_setxv(s, OP_SETCV, sp, nsym(tree), val);
break;
case NODE_CONST:
gen_setxv(s, OP_SETCONST, sp, nsym(tree), val);
break;
case NODE_COLON2:
if (sp) {
gen_move(s, cursp(), sp, 0);
}
sp = cursp();
push();
codegen(s, tree->car, VAL);
if (rhs) {
codegen(s, rhs, VAL); pop();
gen_move(s, sp, cursp(), 0);
}
pop_n(2);
idx = new_sym(s, nsym(tree->cdr));
genop_2(s, OP_SETMCNST, sp, idx);
break;
case NODE_CALL:
case NODE_SCALL:
{
int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0;
mrb_sym mid = nsym(tree->cdr->car);
top = cursp();
if (val || sp == cursp()) {
push(); /* room for retval */
}
call = cursp();
if (!tree->car) {
noself = 1;
push();
}
else {
codegen(s, tree->car, VAL); /* receiver */
}
if (safe) {
int recv = cursp()-1;
gen_move(s, cursp(), recv, 1);
skip = genjmp2_0(s, OP_JMPNIL, cursp(), val);
}
tree = tree->cdr->cdr->car;
if (tree) {
if (tree->car) { /* positional arguments */
n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14);
if (n < 0) { /* variable length */
n = 15;
push();
}
}
if (tree->cdr->car) { /* keyword arguments */
if (n == 14) {
pop_n(n);
genop_2(s, OP_ARRAY, cursp(), n);
push();
n = 15;
}
gen_hash(s, tree->cdr->car->cdr, VAL, 0);
if (n < 14) {
n++;
}
else {
pop_n(2);
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
push();
}
}
if (rhs) {
codegen(s, rhs, VAL);
pop();
}
else {
gen_move(s, cursp(), sp, 0);
}
if (val) {
gen_move(s, top, cursp(), 1);
}
if (n < 15) {
n++;
if (n == 15) {
pop_n(14);
genop_2(s, OP_ARRAY, cursp(), 15);
}
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
s->sp = call;
if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) {
genop_1(s, OP_SETIDX, cursp());
}
else {
genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n);
}
if (safe) {
dispatch(s, skip);
}
s->sp = top;
}
break;
case NODE_MASGN:
gen_massignment(s, tree->car, sp, val);
break;
/* splat without assignment */
case NODE_NIL:
break;
default:
codegen_error(s, "unknown lhs");
break;
}
if (val) push();
}
|
41954898060187653186067747247923282324
|
codegen.c
|
166306575091061367964033452033729491771
|
CWE-125
|
CVE-2022-1276
|
Out-of-bounds Read in mrb_get_args in GitHub repository mruby/mruby prior to 3.2. Possible arbitrary code execution if being exploited.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-1276
|
224,192
|
mruby
|
c8c083cb750606b2da81582cd8e43b442bb143e6
|
https://github.com/mruby/mruby
|
https://github.com/mruby/mruby/commit/c8c083cb750606b2da81582cd8e43b442bb143e6
|
codegen.c: need to pack argument when `n==13` too.
Because we have extra 2 arguments coming (kw and rhs).
| 0
|
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val)
{
int idx;
int type = nint(tree->car);
switch (type) {
case NODE_GVAR:
case NODE_ARG:
case NODE_LVAR:
case NODE_IVAR:
case NODE_CVAR:
case NODE_CONST:
case NODE_NIL:
case NODE_MASGN:
if (rhs) {
codegen(s, rhs, VAL);
pop();
sp = cursp();
}
break;
case NODE_COLON2:
case NODE_CALL:
case NODE_SCALL:
/* keep evaluation order */
break;
case NODE_NVAR:
codegen_error(s, "Can't assign to numbered parameter");
break;
default:
codegen_error(s, "unknown lhs");
break;
}
tree = tree->cdr;
switch (type) {
case NODE_GVAR:
gen_setxv(s, OP_SETGV, sp, nsym(tree), val);
break;
case NODE_ARG:
case NODE_LVAR:
idx = lv_idx(s, nsym(tree));
if (idx > 0) {
if (idx != sp) {
gen_move(s, idx, sp, val);
}
break;
}
else { /* upvar */
gen_setupvar(s, sp, nsym(tree));
}
break;
case NODE_IVAR:
gen_setxv(s, OP_SETIV, sp, nsym(tree), val);
break;
case NODE_CVAR:
gen_setxv(s, OP_SETCV, sp, nsym(tree), val);
break;
case NODE_CONST:
gen_setxv(s, OP_SETCONST, sp, nsym(tree), val);
break;
case NODE_COLON2:
if (sp) {
gen_move(s, cursp(), sp, 0);
}
sp = cursp();
push();
codegen(s, tree->car, VAL);
if (rhs) {
codegen(s, rhs, VAL); pop();
gen_move(s, sp, cursp(), 0);
}
pop_n(2);
idx = new_sym(s, nsym(tree->cdr));
genop_2(s, OP_SETMCNST, sp, idx);
break;
case NODE_CALL:
case NODE_SCALL:
{
int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0;
mrb_sym mid = nsym(tree->cdr->car);
top = cursp();
if (val || sp == cursp()) {
push(); /* room for retval */
}
call = cursp();
if (!tree->car) {
noself = 1;
push();
}
else {
codegen(s, tree->car, VAL); /* receiver */
}
if (safe) {
int recv = cursp()-1;
gen_move(s, cursp(), recv, 1);
skip = genjmp2_0(s, OP_JMPNIL, cursp(), val);
}
tree = tree->cdr->cdr->car;
if (tree) {
if (tree->car) { /* positional arguments */
n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14);
if (n < 0) { /* variable length */
n = 15;
push();
}
}
if (tree->cdr->car) { /* keyword arguments */
if (n == 13 || n == 14) {
pop_n(n);
genop_2(s, OP_ARRAY, cursp(), n);
push();
n = 15;
}
gen_hash(s, tree->cdr->car->cdr, VAL, 0);
if (n < 14) {
n++;
}
else {
pop_n(2);
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
push();
}
}
if (rhs) {
codegen(s, rhs, VAL);
pop();
}
else {
gen_move(s, cursp(), sp, 0);
}
if (val) {
gen_move(s, top, cursp(), 1);
}
if (n < 15) {
n++;
if (n == 15) {
pop_n(14);
genop_2(s, OP_ARRAY, cursp(), 15);
}
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
s->sp = call;
if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) {
genop_1(s, OP_SETIDX, cursp());
}
else {
genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n);
}
if (safe) {
dispatch(s, skip);
}
s->sp = top;
}
break;
case NODE_MASGN:
gen_massignment(s, tree->car, sp, val);
break;
/* splat without assignment */
case NODE_NIL:
break;
default:
codegen_error(s, "unknown lhs");
break;
}
if (val) push();
}
|
72719402367551467019466616883888506134
|
codegen.c
|
21664036215001583958906059719703447460
|
CWE-125
|
CVE-2022-1276
|
Out-of-bounds Read in mrb_get_args in GitHub repository mruby/mruby prior to 3.2. Possible arbitrary code execution if being exploited.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-1276
|
195,296
|
uWebSockets
|
03fca626a95130ab80f86adada54b29d27242759
|
https://github.com/uWebSockets/uWebSockets
|
https://github.com/uWebSockets/uWebSockets/commit/03fca626a95130ab80f86adada54b29d27242759
|
Fix overflow of triggered topics
| 1
|
void publish(Topic *iterator, size_t start, size_t stop, std::string_view topic, std::pair<std::string_view, std::string_view> message) {
/* If we already have 64 triggered topics make sure to drain it here */
if (numTriggeredTopics == 64) {
drain();
}
/* Iterate over all segments in given topic */
for (; stop != std::string::npos; start = stop + 1) {
stop = topic.find('/', start);
std::string_view segment = topic.substr(start, stop - start);
/* It is very important to disallow wildcards when publishing.
* We will not catch EVERY misuse this lazy way, but enough to hinder
* explosive recursion.
* Terminating wildcards MAY still get triggered along the way, if for
* instace the error is found late while iterating the topic segments. */
if (segment.length() == 1) {
if (segment[0] == '+' || segment[0] == '#') {
return;
}
}
/* Do we have a terminating wildcard child? */
if (iterator->terminatingWildcardChild) {
iterator->terminatingWildcardChild->messages[messageId] = message;
/* Add this topic to triggered */
if (!iterator->terminatingWildcardChild->triggered) {
triggeredTopics[numTriggeredTopics++] = iterator->terminatingWildcardChild;
iterator->terminatingWildcardChild->triggered = true;
}
}
/* Do we have a wildcard child? */
if (iterator->wildcardChild) {
publish(iterator->wildcardChild, stop + 1, stop, topic, message);
}
std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment);
if (it == iterator->children.end()) {
/* Stop trying to match by exact string */
return;
}
iterator = it->second;
}
/* If we went all the way we matched exactly */
iterator->messages[messageId] = message;
/* Add this topic to triggered */
if (!iterator->triggered) {
triggeredTopics[numTriggeredTopics++] = iterator;
iterator->triggered = true;
}
}
|
134169268379037550524482088626348972483
|
TopicTree.h
|
203813195164088557620454025257855576407
|
CWE-787
|
CVE-2020-36406
|
uWebSockets 18.11.0 and 18.12.0 has a stack-based buffer overflow in uWS::TopicTree::trimTree (called from uWS::TopicTree::unsubscribeAll). NOTE: the vendor's position is that this is "a minor issue or not even an issue at all" because the developer of an application (that uses uWebSockets) should not be allowing the large number of triggered topics to accumulate
|
https://nvd.nist.gov/vuln/detail/CVE-2020-36406
|
224,208
|
uWebSockets
|
03fca626a95130ab80f86adada54b29d27242759
|
https://github.com/uWebSockets/uWebSockets
|
https://github.com/uWebSockets/uWebSockets/commit/03fca626a95130ab80f86adada54b29d27242759
|
Fix overflow of triggered topics
| 0
|
void publish(Topic *iterator, size_t start, size_t stop, std::string_view topic, std::pair<std::string_view, std::string_view> message) {
/* Iterate over all segments in given topic */
for (; stop != std::string::npos; start = stop + 1) {
stop = topic.find('/', start);
std::string_view segment = topic.substr(start, stop - start);
/* It is very important to disallow wildcards when publishing.
* We will not catch EVERY misuse this lazy way, but enough to hinder
* explosive recursion.
* Terminating wildcards MAY still get triggered along the way, if for
* instace the error is found late while iterating the topic segments. */
if (segment.length() == 1) {
if (segment[0] == '+' || segment[0] == '#') {
return;
}
}
/* Do we have a terminating wildcard child? */
if (iterator->terminatingWildcardChild) {
iterator->terminatingWildcardChild->messages[messageId] = message;
/* Add this topic to triggered */
if (!iterator->terminatingWildcardChild->triggered) {
/* If we already have 64 triggered topics make sure to drain it here */
if (numTriggeredTopics == 64) {
drain();
}
triggeredTopics[numTriggeredTopics++] = iterator->terminatingWildcardChild;
iterator->terminatingWildcardChild->triggered = true;
}
}
/* Do we have a wildcard child? */
if (iterator->wildcardChild) {
publish(iterator->wildcardChild, stop + 1, stop, topic, message);
}
std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment);
if (it == iterator->children.end()) {
/* Stop trying to match by exact string */
return;
}
iterator = it->second;
}
/* If we went all the way we matched exactly */
iterator->messages[messageId] = message;
/* Add this topic to triggered */
if (!iterator->triggered) {
/* If we already have 64 triggered topics make sure to drain it here */
if (numTriggeredTopics == 64) {
drain();
}
triggeredTopics[numTriggeredTopics++] = iterator;
iterator->triggered = true;
}
}
|
239871427944982498561615868059507506120
|
TopicTree.h
|
110499788872538385962916215167459222787
|
CWE-787
|
CVE-2020-36406
|
uWebSockets 18.11.0 and 18.12.0 has a stack-based buffer overflow in uWS::TopicTree::trimTree (called from uWS::TopicTree::unsubscribeAll). NOTE: the vendor's position is that this is "a minor issue or not even an issue at all" because the developer of an application (that uses uWebSockets) should not be allowing the large number of triggered topics to accumulate
|
https://nvd.nist.gov/vuln/detail/CVE-2020-36406
|
195,308
|
flatpak
|
462fca2c666e0cd2b60d6d2593a7216a83047aaf
|
https://github.com/flatpak/flatpak
|
https://github.com/flatpak/flatpak/commit/462fca2c666e0cd2b60d6d2593a7216a83047aaf
|
run: Don't allow chroot()
If we don't allow pivot_root() then there seems no reason why we should
allow chroot().
Partially fixes GHSA-67h7-w3jq-vh4q.
Signed-off-by: Simon McVittie <[email protected]>
| 1
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
* Other useful resources:
* https://github.com/systemd/systemd/blob/HEAD/src/shared/seccomp-util.c
* https://github.com/moby/moby/blob/HEAD/profiles/seccomp/default.json
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog), EPERM},
/* Useless old syscall */
{SCMP_SYS (uselib), EPERM},
/* Don't allow disabling accounting */
{SCMP_SYS (acct), EPERM},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt), EPERM},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl), EPERM},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key), EPERM},
{SCMP_SYS (keyctl), EPERM},
{SCMP_SYS (request_key), EPERM},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages), EPERM},
{SCMP_SYS (mbind), EPERM},
{SCMP_SYS (get_mempolicy), EPERM},
{SCMP_SYS (set_mempolicy), EPERM},
{SCMP_SYS (migrate_pages), EPERM},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare), EPERM},
{SCMP_SYS (setns), EPERM},
{SCMP_SYS (mount), EPERM},
{SCMP_SYS (umount), EPERM},
{SCMP_SYS (umount2), EPERM},
{SCMP_SYS (pivot_root), EPERM},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
/* seccomp can't look into clone3()'s struct clone_args to check whether
* the flags are OK, so we have no choice but to block clone3().
* Return ENOSYS so user-space will fall back to clone().
* (GHSA-67h7-w3jq-vh4q; see also https://github.com/moby/moby/commit/9f6b562d) */
{SCMP_SYS (clone3), ENOSYS},
/* New mount manipulation APIs can also change our VFS. There's no
* legitimate reason to do these in the sandbox, so block all of them
* rather than thinking about which ones might be dangerous.
* (GHSA-67h7-w3jq-vh4q) */
{SCMP_SYS (open_tree), ENOSYS},
{SCMP_SYS (move_mount), ENOSYS},
{SCMP_SYS (fsopen), ENOSYS},
{SCMP_SYS (fsconfig), ENOSYS},
{SCMP_SYS (fsmount), ENOSYS},
{SCMP_SYS (fspick), ENOSYS},
{SCMP_SYS (mount_setattr), ENOSYS},
};
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open), EPERM},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace), EPERM}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
int errnum = syscall_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
int errnum = syscall_nondevel_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
|
116661486604620809625071911593237669795
|
flatpak-run.c
|
32398709380082441128978861691951488575
|
CWE-20
|
CVE-2021-41133
|
Flatpak is a system for building, distributing, and running sandboxed desktop applications on Linux. In versions prior to 1.10.4 and 1.12.0, Flatpak apps with direct access to AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can trick portals and other host-OS services into treating the Flatpak app as though it was an ordinary, non-sandboxed host-OS process. They can do this by manipulating the VFS using recent mount-related syscalls that are not blocked by Flatpak's denylist seccomp filter, in order to substitute a crafted `/.flatpak-info` or make that file disappear entirely. Flatpak apps that act as clients for AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can escalate the privileges that the corresponding services will believe the Flatpak app has. Note that protocols that operate entirely over the D-Bus session bus (user bus), system bus or accessibility bus are not affected by this. This is due to the use of a proxy process `xdg-dbus-proxy`, whose VFS cannot be manipulated by the Flatpak app, when interacting with these buses. Patches exist for versions 1.10.4 and 1.12.0, and as of time of publication, a patch for version 1.8.2 is being planned. There are no workarounds aside from upgrading to a patched version.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-41133
|
224,277
|
flatpak
|
462fca2c666e0cd2b60d6d2593a7216a83047aaf
|
https://github.com/flatpak/flatpak
|
https://github.com/flatpak/flatpak/commit/462fca2c666e0cd2b60d6d2593a7216a83047aaf
|
run: Don't allow chroot()
If we don't allow pivot_root() then there seems no reason why we should
allow chroot().
Partially fixes GHSA-67h7-w3jq-vh4q.
Signed-off-by: Simon McVittie <[email protected]>
| 0
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
* Other useful resources:
* https://github.com/systemd/systemd/blob/HEAD/src/shared/seccomp-util.c
* https://github.com/moby/moby/blob/HEAD/profiles/seccomp/default.json
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog), EPERM},
/* Useless old syscall */
{SCMP_SYS (uselib), EPERM},
/* Don't allow disabling accounting */
{SCMP_SYS (acct), EPERM},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt), EPERM},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl), EPERM},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key), EPERM},
{SCMP_SYS (keyctl), EPERM},
{SCMP_SYS (request_key), EPERM},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages), EPERM},
{SCMP_SYS (mbind), EPERM},
{SCMP_SYS (get_mempolicy), EPERM},
{SCMP_SYS (set_mempolicy), EPERM},
{SCMP_SYS (migrate_pages), EPERM},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare), EPERM},
{SCMP_SYS (setns), EPERM},
{SCMP_SYS (mount), EPERM},
{SCMP_SYS (umount), EPERM},
{SCMP_SYS (umount2), EPERM},
{SCMP_SYS (pivot_root), EPERM},
{SCMP_SYS (chroot), EPERM},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
/* seccomp can't look into clone3()'s struct clone_args to check whether
* the flags are OK, so we have no choice but to block clone3().
* Return ENOSYS so user-space will fall back to clone().
* (GHSA-67h7-w3jq-vh4q; see also https://github.com/moby/moby/commit/9f6b562d) */
{SCMP_SYS (clone3), ENOSYS},
/* New mount manipulation APIs can also change our VFS. There's no
* legitimate reason to do these in the sandbox, so block all of them
* rather than thinking about which ones might be dangerous.
* (GHSA-67h7-w3jq-vh4q) */
{SCMP_SYS (open_tree), ENOSYS},
{SCMP_SYS (move_mount), ENOSYS},
{SCMP_SYS (fsopen), ENOSYS},
{SCMP_SYS (fsconfig), ENOSYS},
{SCMP_SYS (fsmount), ENOSYS},
{SCMP_SYS (fspick), ENOSYS},
{SCMP_SYS (mount_setattr), ENOSYS},
};
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open), EPERM},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace), EPERM}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
int errnum = syscall_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
int errnum = syscall_nondevel_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
|
98799963199923512278205367577377591800
|
flatpak-run.c
|
147844647821640300549119412024985340412
|
CWE-20
|
CVE-2021-41133
|
Flatpak is a system for building, distributing, and running sandboxed desktop applications on Linux. In versions prior to 1.10.4 and 1.12.0, Flatpak apps with direct access to AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can trick portals and other host-OS services into treating the Flatpak app as though it was an ordinary, non-sandboxed host-OS process. They can do this by manipulating the VFS using recent mount-related syscalls that are not blocked by Flatpak's denylist seccomp filter, in order to substitute a crafted `/.flatpak-info` or make that file disappear entirely. Flatpak apps that act as clients for AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can escalate the privileges that the corresponding services will believe the Flatpak app has. Note that protocols that operate entirely over the D-Bus session bus (user bus), system bus or accessibility bus are not affected by this. This is due to the use of a proxy process `xdg-dbus-proxy`, whose VFS cannot be manipulated by the Flatpak app, when interacting with these buses. Patches exist for versions 1.10.4 and 1.12.0, and as of time of publication, a patch for version 1.8.2 is being planned. There are no workarounds aside from upgrading to a patched version.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-41133
|
195,309
|
squid
|
5e2ea2b13bd98f53e29964ca26bb0d602a8a12b9
|
https://github.com/squid-cache/squid
|
https://github.com/squid-cache/squid/commit/5e2ea2b13bd98f53e29964ca26bb0d602a8a12b9
|
Improve handling of Gopher responses (#1022)
| 1
|
gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
{
char *pos = inbuf;
char *lpos = NULL;
char *tline = NULL;
LOCAL_ARRAY(char, line, TEMP_BUF_SIZE);
LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE);
char *name = NULL;
char *selector = NULL;
char *host = NULL;
char *port = NULL;
char *escaped_selector = NULL;
const char *icon_url = NULL;
char gtype;
StoreEntry *entry = NULL;
memset(tmpbuf, '\0', TEMP_BUF_SIZE);
memset(line, '\0', TEMP_BUF_SIZE);
entry = gopherState->entry;
if (gopherState->conversion == GopherStateData::HTML_INDEX_PAGE) {
char *html_url = html_quote(entry->url());
gopherHTMLHeader(entry, "Gopher Index %s", html_url);
storeAppendPrintf(entry,
"<p>This is a searchable Gopher index. Use the search\n"
"function of your browser to enter search terms.\n"
"<ISINDEX>\n");
gopherHTMLFooter(entry);
/* now let start sending stuff to client */
entry->flush();
gopherState->HTML_header_added = 1;
return;
}
if (gopherState->conversion == GopherStateData::HTML_CSO_PAGE) {
char *html_url = html_quote(entry->url());
gopherHTMLHeader(entry, "CSO Search of %s", html_url);
storeAppendPrintf(entry,
"<P>A CSO database usually contains a phonebook or\n"
"directory. Use the search function of your browser to enter\n"
"search terms.</P><ISINDEX>\n");
gopherHTMLFooter(entry);
/* now let start sending stuff to client */
entry->flush();
gopherState->HTML_header_added = 1;
return;
}
String outbuf;
if (!gopherState->HTML_header_added) {
if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT)
gopherHTMLHeader(entry, "CSO Search Result", NULL);
else
gopherHTMLHeader(entry, "Gopher Menu", NULL);
outbuf.append ("<PRE>");
gopherState->HTML_header_added = 1;
gopherState->HTML_pre = 1;
}
while (pos < inbuf + len) {
int llen;
int left = len - (pos - inbuf);
lpos = (char *)memchr(pos, '\n', left);
if (lpos) {
++lpos; /* Next line is after \n */
llen = lpos - pos;
} else {
llen = left;
}
if (gopherState->len + llen >= TEMP_BUF_SIZE) {
debugs(10, DBG_IMPORTANT, "GopherHTML: Buffer overflow. Lost some data on URL: " << entry->url() );
llen = TEMP_BUF_SIZE - gopherState->len - 1;
gopherState->overflowed = true; // may already be true
}
if (!lpos) {
/* there is no complete line in inbuf */
/* copy it to temp buffer */
/* note: llen is adjusted above */
memcpy(gopherState->buf + gopherState->len, pos, llen);
gopherState->len += llen;
break;
}
if (gopherState->len != 0) {
/* there is something left from last tx. */
memcpy(line, gopherState->buf, gopherState->len);
memcpy(line + gopherState->len, pos, llen);
llen += gopherState->len;
gopherState->len = 0;
} else {
memcpy(line, pos, llen);
}
line[llen + 1] = '\0';
/* move input to next line */
pos = lpos;
/* at this point. We should have one line in buffer to process */
if (*line == '.') {
/* skip it */
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
switch (gopherState->conversion) {
case GopherStateData::HTML_INDEX_RESULT:
case GopherStateData::HTML_DIR: {
tline = line;
gtype = *tline;
++tline;
name = tline;
selector = strchr(tline, TAB);
if (selector) {
*selector = '\0';
++selector;
host = strchr(selector, TAB);
if (host) {
*host = '\0';
++host;
port = strchr(host, TAB);
if (port) {
char *junk;
port[0] = ':';
junk = strchr(host, TAB);
if (junk)
*junk++ = 0; /* Chop port */
else {
junk = strchr(host, '\r');
if (junk)
*junk++ = 0; /* Chop port */
else {
junk = strchr(host, '\n');
if (junk)
*junk++ = 0; /* Chop port */
}
}
if ((port[1] == '0') && (!port[2]))
port[0] = 0; /* 0 means none */
}
/* escape a selector here */
escaped_selector = xstrdup(rfc1738_escape_part(selector));
switch (gtype) {
case GOPHER_DIRECTORY:
icon_url = mimeGetIconURL("internal-menu");
break;
case GOPHER_HTML:
case GOPHER_FILE:
icon_url = mimeGetIconURL("internal-text");
break;
case GOPHER_INDEX:
case GOPHER_CSO:
icon_url = mimeGetIconURL("internal-index");
break;
case GOPHER_IMAGE:
case GOPHER_GIF:
case GOPHER_PLUS_IMAGE:
icon_url = mimeGetIconURL("internal-image");
break;
case GOPHER_SOUND:
case GOPHER_PLUS_SOUND:
icon_url = mimeGetIconURL("internal-sound");
break;
case GOPHER_PLUS_MOVIE:
icon_url = mimeGetIconURL("internal-movie");
break;
case GOPHER_TELNET:
case GOPHER_3270:
icon_url = mimeGetIconURL("internal-telnet");
break;
case GOPHER_BIN:
case GOPHER_MACBINHEX:
case GOPHER_DOSBIN:
case GOPHER_UUENCODED:
icon_url = mimeGetIconURL("internal-binary");
break;
case GOPHER_INFO:
icon_url = NULL;
break;
case GOPHER_WWW:
icon_url = mimeGetIconURL("internal-link");
break;
default:
icon_url = mimeGetIconURL("internal-unknown");
break;
}
memset(tmpbuf, '\0', TEMP_BUF_SIZE);
if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) {
if (strlen(escaped_selector) != 0)
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
icon_url, escaped_selector, rfc1738_escape_part(host),
*port ? ":" : "", port, html_quote(name));
else
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
icon_url, rfc1738_escape_part(host), *port ? ":" : "",
port, html_quote(name));
} else if (gtype == GOPHER_INFO) {
snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name));
} else {
if (strncmp(selector, "GET /", 5) == 0) {
/* WWW link */
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
} else if (gtype == GOPHER_WWW) {
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"%s\">%s</A>\n",
icon_url, rfc1738_escape_unescaped(selector), html_quote(name));
} else {
/* Standard link */
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
icon_url, host, gtype, escaped_selector, html_quote(name));
}
}
safe_free(escaped_selector);
outbuf.append(tmpbuf);
} else {
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
} else {
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
break;
} /* HTML_DIR, HTML_INDEX_RESULT */
case GopherStateData::HTML_CSO_RESULT: {
if (line[0] == '-') {
int code, recno;
char *s_code, *s_recno, *result;
s_code = strtok(line + 1, ":\n");
s_recno = strtok(NULL, ":\n");
result = strtok(NULL, "\n");
if (!result)
break;
code = atoi(s_code);
recno = atoi(s_recno);
if (code != 200)
break;
if (gopherState->cso_recno != recno) {
snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
gopherState->cso_recno = recno;
} else {
snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
}
outbuf.append(tmpbuf);
break;
} else {
int code;
char *s_code, *result;
s_code = strtok(line, ":");
result = strtok(NULL, "\n");
if (!result)
break;
code = atoi(s_code);
switch (code) {
case 200: {
/* OK */
/* Do nothing here */
break;
}
case 102: /* Number of matches */
case 501: /* No Match */
case 502: { /* Too Many Matches */
/* Print the message the server returns */
snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
outbuf.append(tmpbuf);
break;
}
}
}
break;
} /* HTML_CSO_RESULT */
default:
break; /* do nothing */
} /* switch */
} /* while loop */
if (outbuf.size() > 0) {
entry->append(outbuf.rawBuf(), outbuf.size());
/* now let start sending stuff to client */
entry->flush();
}
outbuf.clean();
return;
}
|
274182330078791984686066092776381139807
|
gopher.cc
|
53940162348551394934251092714566998881
|
CWE-400
|
CVE-2021-46784
|
In Squid 3.x through 3.5.28, 4.x through 4.17, and 5.x before 5.6, due to improper buffer management, a Denial of Service can occur when processing long Gopher server responses.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46784
|
224,281
|
squid
|
5e2ea2b13bd98f53e29964ca26bb0d602a8a12b9
|
https://github.com/squid-cache/squid
|
https://github.com/squid-cache/squid/commit/5e2ea2b13bd98f53e29964ca26bb0d602a8a12b9
|
Improve handling of Gopher responses (#1022)
| 0
|
gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
{
char *pos = inbuf;
char *lpos = NULL;
char *tline = NULL;
LOCAL_ARRAY(char, line, TEMP_BUF_SIZE);
char *name = NULL;
char *selector = NULL;
char *host = NULL;
char *port = NULL;
char *escaped_selector = NULL;
const char *icon_url = NULL;
char gtype;
StoreEntry *entry = NULL;
memset(line, '\0', TEMP_BUF_SIZE);
entry = gopherState->entry;
if (gopherState->conversion == GopherStateData::HTML_INDEX_PAGE) {
char *html_url = html_quote(entry->url());
gopherHTMLHeader(entry, "Gopher Index %s", html_url);
storeAppendPrintf(entry,
"<p>This is a searchable Gopher index. Use the search\n"
"function of your browser to enter search terms.\n"
"<ISINDEX>\n");
gopherHTMLFooter(entry);
/* now let start sending stuff to client */
entry->flush();
gopherState->HTML_header_added = 1;
return;
}
if (gopherState->conversion == GopherStateData::HTML_CSO_PAGE) {
char *html_url = html_quote(entry->url());
gopherHTMLHeader(entry, "CSO Search of %s", html_url);
storeAppendPrintf(entry,
"<P>A CSO database usually contains a phonebook or\n"
"directory. Use the search function of your browser to enter\n"
"search terms.</P><ISINDEX>\n");
gopherHTMLFooter(entry);
/* now let start sending stuff to client */
entry->flush();
gopherState->HTML_header_added = 1;
return;
}
SBuf outbuf;
if (!gopherState->HTML_header_added) {
if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT)
gopherHTMLHeader(entry, "CSO Search Result", NULL);
else
gopherHTMLHeader(entry, "Gopher Menu", NULL);
outbuf.append ("<PRE>");
gopherState->HTML_header_added = 1;
gopherState->HTML_pre = 1;
}
while (pos < inbuf + len) {
int llen;
int left = len - (pos - inbuf);
lpos = (char *)memchr(pos, '\n', left);
if (lpos) {
++lpos; /* Next line is after \n */
llen = lpos - pos;
} else {
llen = left;
}
if (gopherState->len + llen >= TEMP_BUF_SIZE) {
debugs(10, DBG_IMPORTANT, "GopherHTML: Buffer overflow. Lost some data on URL: " << entry->url() );
llen = TEMP_BUF_SIZE - gopherState->len - 1;
gopherState->overflowed = true; // may already be true
}
if (!lpos) {
/* there is no complete line in inbuf */
/* copy it to temp buffer */
/* note: llen is adjusted above */
memcpy(gopherState->buf + gopherState->len, pos, llen);
gopherState->len += llen;
break;
}
if (gopherState->len != 0) {
/* there is something left from last tx. */
memcpy(line, gopherState->buf, gopherState->len);
memcpy(line + gopherState->len, pos, llen);
llen += gopherState->len;
gopherState->len = 0;
} else {
memcpy(line, pos, llen);
}
line[llen + 1] = '\0';
/* move input to next line */
pos = lpos;
/* at this point. We should have one line in buffer to process */
if (*line == '.') {
/* skip it */
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
switch (gopherState->conversion) {
case GopherStateData::HTML_INDEX_RESULT:
case GopherStateData::HTML_DIR: {
tline = line;
gtype = *tline;
++tline;
name = tline;
selector = strchr(tline, TAB);
if (selector) {
*selector = '\0';
++selector;
host = strchr(selector, TAB);
if (host) {
*host = '\0';
++host;
port = strchr(host, TAB);
if (port) {
char *junk;
port[0] = ':';
junk = strchr(host, TAB);
if (junk)
*junk++ = 0; /* Chop port */
else {
junk = strchr(host, '\r');
if (junk)
*junk++ = 0; /* Chop port */
else {
junk = strchr(host, '\n');
if (junk)
*junk++ = 0; /* Chop port */
}
}
if ((port[1] == '0') && (!port[2]))
port[0] = 0; /* 0 means none */
}
/* escape a selector here */
escaped_selector = xstrdup(rfc1738_escape_part(selector));
switch (gtype) {
case GOPHER_DIRECTORY:
icon_url = mimeGetIconURL("internal-menu");
break;
case GOPHER_HTML:
case GOPHER_FILE:
icon_url = mimeGetIconURL("internal-text");
break;
case GOPHER_INDEX:
case GOPHER_CSO:
icon_url = mimeGetIconURL("internal-index");
break;
case GOPHER_IMAGE:
case GOPHER_GIF:
case GOPHER_PLUS_IMAGE:
icon_url = mimeGetIconURL("internal-image");
break;
case GOPHER_SOUND:
case GOPHER_PLUS_SOUND:
icon_url = mimeGetIconURL("internal-sound");
break;
case GOPHER_PLUS_MOVIE:
icon_url = mimeGetIconURL("internal-movie");
break;
case GOPHER_TELNET:
case GOPHER_3270:
icon_url = mimeGetIconURL("internal-telnet");
break;
case GOPHER_BIN:
case GOPHER_MACBINHEX:
case GOPHER_DOSBIN:
case GOPHER_UUENCODED:
icon_url = mimeGetIconURL("internal-binary");
break;
case GOPHER_INFO:
icon_url = NULL;
break;
case GOPHER_WWW:
icon_url = mimeGetIconURL("internal-link");
break;
default:
icon_url = mimeGetIconURL("internal-unknown");
break;
}
if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) {
if (strlen(escaped_selector) != 0)
outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
icon_url, escaped_selector, rfc1738_escape_part(host),
*port ? ":" : "", port, html_quote(name));
else
outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
icon_url, rfc1738_escape_part(host), *port ? ":" : "",
port, html_quote(name));
} else if (gtype == GOPHER_INFO) {
outbuf.appendf("\t%s\n", html_quote(name));
} else {
if (strncmp(selector, "GET /", 5) == 0) {
/* WWW link */
outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
} else if (gtype == GOPHER_WWW) {
outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
icon_url, rfc1738_escape_unescaped(selector), html_quote(name));
} else {
/* Standard link */
outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
icon_url, host, gtype, escaped_selector, html_quote(name));
}
}
safe_free(escaped_selector);
} else {
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
} else {
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
break;
} /* HTML_DIR, HTML_INDEX_RESULT */
case GopherStateData::HTML_CSO_RESULT: {
if (line[0] == '-') {
int code, recno;
char *s_code, *s_recno, *result;
s_code = strtok(line + 1, ":\n");
s_recno = strtok(NULL, ":\n");
result = strtok(NULL, "\n");
if (!result)
break;
code = atoi(s_code);
recno = atoi(s_recno);
if (code != 200)
break;
if (gopherState->cso_recno != recno) {
outbuf.appendf("</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
gopherState->cso_recno = recno;
} else {
outbuf.appendf("%s\n", html_quote(result));
}
break;
} else {
int code;
char *s_code, *result;
s_code = strtok(line, ":");
result = strtok(NULL, "\n");
if (!result)
break;
code = atoi(s_code);
switch (code) {
case 200: {
/* OK */
/* Do nothing here */
break;
}
case 102: /* Number of matches */
case 501: /* No Match */
case 502: { /* Too Many Matches */
/* Print the message the server returns */
outbuf.appendf("</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
break;
}
}
}
break;
} /* HTML_CSO_RESULT */
default:
break; /* do nothing */
} /* switch */
} /* while loop */
if (outbuf.length() > 0) {
entry->append(outbuf.rawContent(), outbuf.length());
/* now let start sending stuff to client */
entry->flush();
}
return;
}
|
21640942922320975299915998505575451737
|
gopher.cc
|
190939052563115961351574589449206812826
|
CWE-400
|
CVE-2021-46784
|
In Squid 3.x through 3.5.28, 4.x through 4.17, and 5.x before 5.6, due to improper buffer management, a Denial of Service can occur when processing long Gopher server responses.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46784
|
195,328
|
gpac
|
30ac5e5236b790accd1f25347eebf2dc8c6c1bcb
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/30ac5e5236b790accd1f25347eebf2dc8c6c1bcb
|
fixed #1897
| 1
|
char *gf_text_get_utf8_line(char *szLine, u32 lineSize, FILE *txt_in, s32 unicode_type)
{
u32 i, j, len;
char *sOK;
char szLineConv[1024];
unsigned short *sptr;
memset(szLine, 0, sizeof(char)*lineSize);
sOK = gf_fgets(szLine, lineSize, txt_in);
if (!sOK) return NULL;
if (unicode_type<=1) {
j=0;
len = (u32) strlen(szLine);
for (i=0; i<len; i++) {
if (!unicode_type && (szLine[i] & 0x80)) {
/*non UTF8 (likely some win-CP)*/
if ((szLine[i+1] & 0xc0) != 0x80) {
szLineConv[j] = 0xc0 | ( (szLine[i] >> 6) & 0x3 );
j++;
szLine[i] &= 0xbf;
}
/*UTF8 2 bytes char*/
else if ( (szLine[i] & 0xe0) == 0xc0) {
szLineConv[j] = szLine[i];
i++;
j++;
}
/*UTF8 3 bytes char*/
else if ( (szLine[i] & 0xf0) == 0xe0) {
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
}
/*UTF8 4 bytes char*/
else if ( (szLine[i] & 0xf8) == 0xf0) {
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
} else {
i+=1;
continue;
}
}
szLineConv[j] = szLine[i];
j++;
}
szLineConv[j] = 0;
strcpy(szLine, szLineConv);
return sOK;
}
#ifdef GPAC_BIG_ENDIAN
if (unicode_type==3)
#else
if (unicode_type==2)
#endif
{
i=0;
while (1) {
char c;
if (!szLine[i] && !szLine[i+1]) break;
c = szLine[i+1];
szLine[i+1] = szLine[i];
szLine[i] = c;
i+=2;
}
}
sptr = (u16 *)szLine;
i = (u32) gf_utf8_wcstombs(szLineConv, 1024, (const unsigned short **) &sptr);
szLineConv[i] = 0;
strcpy(szLine, szLineConv);
/*this is ugly indeed: since input is UTF16-LE, there are many chances the gf_fgets never reads the \0 after a \n*/
if (unicode_type==3) gf_fgetc(txt_in);
return sOK;
}
|
117427486429117846714647428036887377373
|
load_text.c
|
196742273187479774331460388493063544419
|
CWE-415
|
CVE-2021-40574
|
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the gf_text_get_utf8_line function in load_text.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40574
|
224,472
|
gpac
|
30ac5e5236b790accd1f25347eebf2dc8c6c1bcb
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/30ac5e5236b790accd1f25347eebf2dc8c6c1bcb
|
fixed #1897
| 0
|
char *gf_text_get_utf8_line(char *szLine, u32 lineSize, FILE *txt_in, s32 unicode_type)
{
u32 i, j, len;
char *sOK;
char szLineConv[2048];
unsigned short *sptr;
memset(szLine, 0, sizeof(char)*lineSize);
sOK = gf_fgets(szLine, lineSize, txt_in);
if (!sOK) return NULL;
if (unicode_type<=1) {
j=0;
len = (u32) strlen(szLine);
for (i=0; i<len; i++) {
if (!unicode_type && (szLine[i] & 0x80)) {
/*non UTF8 (likely some win-CP)*/
if ((szLine[i+1] & 0xc0) != 0x80) {
szLineConv[j] = 0xc0 | ( (szLine[i] >> 6) & 0x3 );
j++;
szLine[i] &= 0xbf;
}
/*UTF8 2 bytes char*/
else if ( (szLine[i] & 0xe0) == 0xc0) {
szLineConv[j] = szLine[i];
i++;
j++;
}
/*UTF8 3 bytes char*/
else if ( (szLine[i] & 0xf0) == 0xe0) {
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
}
/*UTF8 4 bytes char*/
else if ( (szLine[i] & 0xf8) == 0xf0) {
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
} else {
i+=1;
continue;
}
}
szLineConv[j] = szLine[i];
j++;
}
szLineConv[j] = 0;
strcpy(szLine, szLineConv);
return sOK;
}
#ifdef GPAC_BIG_ENDIAN
if (unicode_type==3)
#else
if (unicode_type==2)
#endif
{
i=0;
while (1) {
char c;
if (!szLine[i] && !szLine[i+1]) break;
c = szLine[i+1];
szLine[i+1] = szLine[i];
szLine[i] = c;
i+=2;
}
}
sptr = (u16 *)szLine;
i = (u32) gf_utf8_wcstombs(szLineConv, 2048, (const unsigned short **) &sptr);
szLineConv[i] = 0;
strcpy(szLine, szLineConv);
/*this is ugly indeed: since input is UTF16-LE, there are many chances the gf_fgets never reads the \0 after a \n*/
if (unicode_type==3) gf_fgetc(txt_in);
return sOK;
}
|
206448432363907113546512643676658303982
|
load_text.c
|
270779576910744156479880021794579702395
|
CWE-415
|
CVE-2021-40574
|
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the gf_text_get_utf8_line function in load_text.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40574
|
195,331
|
tensorflow
|
08d7b00c0a5a20926363849f611729f53f3ec022
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/08d7b00c0a5a20926363849f611729f53f3ec022
|
Fix Segfault in Concat V2 shape function.
PiperOrigin-RevId: 412120654
Change-Id: I3ff915faea694f9ad8b00024e9af2de9909011be
| 1
|
Status ConcatShapeHelper(InferenceContext* c, int start_value_index,
int end_value_index, int dim_index) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused));
const Tensor* concat_dim_t = c->input_tensor(dim_index);
if (concat_dim_t == nullptr) {
// Return an unknown shape with same rank as inputs, or an unknown rank
// if no input's rank is known.
// Find rank.
int32_t rank = InferenceContext::kUnknownRank;
for (int i = start_value_index; i < end_value_index; ++i) {
if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i));
if (rank != InferenceContext::kUnknownRank) {
break;
}
}
if (rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return Status::OK();
} else if (rank == 0) {
return errors::InvalidArgument(
"Can't concatenate scalars (use tf.stack instead)");
} else {
for (int i = start_value_index; i < end_value_index; ++i) {
// Check that all the inputs are of the correct rank.
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused));
}
}
// Build result of <rank> different unknown dims.
std::vector<DimensionHandle> dims;
dims.reserve(rank);
for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim());
c->set_output(0, c->MakeShape(dims));
return Status::OK();
}
// Merge all the non-concat dims, and sum the concat dim to make an output
// shape.
int64_t concat_dim;
if (concat_dim_t->dtype() == DT_INT32) {
concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0));
} else {
concat_dim = concat_dim_t->flat<int64_t>()(0);
}
// Minimum required number of dimensions.
const int min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1;
ShapeHandle output_before;
ShapeHandle output_after;
ShapeHandle input = c->input(end_value_index - 1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before));
DimensionHandle output_middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
output_after = c->Scalar(); // no dimensions.
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after));
}
for (int i = end_value_index - 2; i >= start_value_index; --i) {
ShapeHandle before;
ShapeHandle after;
input = c->input(i);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before));
DimensionHandle middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after));
}
TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before));
TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle));
TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after));
}
ShapeHandle s;
TF_RETURN_IF_ERROR(
c->Concatenate(output_before, c->Vector(output_middle), &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s));
c->set_output(0, s);
return Status::OK();
}
|
115004012549325804010611397133680502113
|
common_shape_fns.cc
|
114394888048780454732842913577124501919
|
CWE-843
|
CVE-2022-21731
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of shape inference for `ConcatV2` can be used to trigger a denial of service attack via a segfault caused by a type confusion. The `axis` argument is translated into `concat_dim` in the `ConcatShapeHelper` helper function. Then, a value for `min_rank` is computed based on `concat_dim`. This is then used to validate that the `values` tensor has at least the required rank. However, `WithRankAtLeast` receives the lower bound as a 64-bits value and then compares it against the maximum 32-bits integer value that could be represented. Due to the fact that `min_rank` is a 32-bits value and the value of `axis`, the `rank` argument is a negative value, so the error check is bypassed. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21731
|
224,570
|
tensorflow
|
08d7b00c0a5a20926363849f611729f53f3ec022
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/08d7b00c0a5a20926363849f611729f53f3ec022
|
Fix Segfault in Concat V2 shape function.
PiperOrigin-RevId: 412120654
Change-Id: I3ff915faea694f9ad8b00024e9af2de9909011be
| 0
|
Status ConcatShapeHelper(InferenceContext* c, int start_value_index,
int end_value_index, int dim_index) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused));
const Tensor* concat_dim_t = c->input_tensor(dim_index);
if (concat_dim_t == nullptr) {
// Return an unknown shape with same rank as inputs, or an unknown rank
// if no input's rank is known.
// Find rank.
int32_t rank = InferenceContext::kUnknownRank;
for (int i = start_value_index; i < end_value_index; ++i) {
if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i));
if (rank != InferenceContext::kUnknownRank) {
break;
}
}
if (rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return Status::OK();
} else if (rank == 0) {
return errors::InvalidArgument(
"Can't concatenate scalars (use tf.stack instead)");
} else {
for (int i = start_value_index; i < end_value_index; ++i) {
// Check that all the inputs are of the correct rank.
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused));
}
}
// Build result of <rank> different unknown dims.
std::vector<DimensionHandle> dims;
dims.reserve(rank);
for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim());
c->set_output(0, c->MakeShape(dims));
return Status::OK();
}
// Merge all the non-concat dims, and sum the concat dim to make an output
// shape.
int64_t concat_dim;
if (concat_dim_t->dtype() == DT_INT32) {
concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0));
} else {
concat_dim = concat_dim_t->flat<int64_t>()(0);
}
// Minimum required number of dimensions.
const int64 min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1;
ShapeHandle output_before;
ShapeHandle output_after;
ShapeHandle input = c->input(end_value_index - 1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before));
DimensionHandle output_middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
output_after = c->Scalar(); // no dimensions.
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after));
}
for (int i = end_value_index - 2; i >= start_value_index; --i) {
ShapeHandle before;
ShapeHandle after;
input = c->input(i);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before));
DimensionHandle middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after));
}
TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before));
TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle));
TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after));
}
ShapeHandle s;
TF_RETURN_IF_ERROR(
c->Concatenate(output_before, c->Vector(output_middle), &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s));
c->set_output(0, s);
return Status::OK();
}
|
224848617993634630925206364943386826300
|
common_shape_fns.cc
|
56524265073448960566855170405566896543
|
CWE-843
|
CVE-2022-21731
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of shape inference for `ConcatV2` can be used to trigger a denial of service attack via a segfault caused by a type confusion. The `axis` argument is translated into `concat_dim` in the `ConcatShapeHelper` helper function. Then, a value for `min_rank` is computed based on `concat_dim`. This is then used to validate that the `values` tensor has at least the required rank. However, `WithRankAtLeast` receives the lower bound as a 64-bits value and then compares it against the maximum 32-bits integer value that could be represented. Due to the fact that `min_rank` is a 32-bits value and the value of `axis`, the `rank` argument is a negative value, so the error check is bypassed. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21731
|
195,334
|
gpac
|
b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
|
fixed #1890
| 1
|
GF_Err iloc_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 item_count, extent_count, i, j;
GF_ItemLocationBox *ptr = (GF_ItemLocationBox *)s;
ISOM_DECREASE_SIZE(ptr, 2)
ptr->offset_size = gf_bs_read_int(bs, 4);
ptr->length_size = gf_bs_read_int(bs, 4);
ptr->base_offset_size = gf_bs_read_int(bs, 4);
if (ptr->version == 1 || ptr->version == 2) {
ptr->index_size = gf_bs_read_int(bs, 4);
} else {
gf_bs_read_int(bs, 4);
}
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
item_count = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
item_count = gf_bs_read_u32(bs);
}
for (i = 0; i < item_count; i++) {
GF_ItemLocationEntry *location_entry = (GF_ItemLocationEntry *)gf_malloc(sizeof(GF_ItemLocationEntry));
if (!location_entry) return GF_OUT_OF_MEM;
gf_list_add(ptr->location_entries, location_entry);
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->item_ID = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
location_entry->item_ID = gf_bs_read_u32(bs);
}
if (ptr->version == 1 || ptr->version == 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->construction_method = gf_bs_read_u16(bs);
}
else {
location_entry->construction_method = 0;
}
ISOM_DECREASE_SIZE(ptr, (2 + ptr->base_offset_size) )
location_entry->data_reference_index = gf_bs_read_u16(bs);
location_entry->base_offset = gf_bs_read_int(bs, 8*ptr->base_offset_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
location_entry->original_base_offset = location_entry->base_offset;
#endif
ISOM_DECREASE_SIZE(ptr, 2)
extent_count = gf_bs_read_u16(bs);
location_entry->extent_entries = gf_list_new();
for (j = 0; j < extent_count; j++) {
GF_ItemExtentEntry *extent_entry = (GF_ItemExtentEntry *)gf_malloc(sizeof(GF_ItemExtentEntry));
if (!extent_entry) return GF_OUT_OF_MEM;
gf_list_add(location_entry->extent_entries, extent_entry);
if ((ptr->version == 1 || ptr->version == 2) && ptr->index_size > 0) {
ISOM_DECREASE_SIZE(ptr, ptr->index_size)
extent_entry->extent_index = gf_bs_read_int(bs, 8 * ptr->index_size);
}
else {
extent_entry->extent_index = 0;
}
ISOM_DECREASE_SIZE(ptr, (ptr->offset_size+ptr->length_size) )
extent_entry->extent_offset = gf_bs_read_int(bs, 8*ptr->offset_size);
extent_entry->extent_length = gf_bs_read_int(bs, 8*ptr->length_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
extent_entry->original_extent_offset = extent_entry->extent_offset;
#endif
}
}
return GF_OK;
}
|
85275035202223574859308673912965262169
|
box_code_meta.c
|
315220373545459860670428553876078791185
|
CWE-415
|
CVE-2021-40573
|
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the gf_list_del function in list.c, which allows attackers to cause a denial of service.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40573
|
224,728
|
gpac
|
b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
|
https://github.com/gpac/gpac
|
https://github.com/gpac/gpac/commit/b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
|
fixed #1890
| 0
|
GF_Err iloc_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 item_count, extent_count, i, j;
GF_ItemLocationBox *ptr = (GF_ItemLocationBox *)s;
ISOM_DECREASE_SIZE(ptr, 2)
ptr->offset_size = gf_bs_read_int(bs, 4);
ptr->length_size = gf_bs_read_int(bs, 4);
ptr->base_offset_size = gf_bs_read_int(bs, 4);
if (ptr->version == 1 || ptr->version == 2) {
ptr->index_size = gf_bs_read_int(bs, 4);
} else {
gf_bs_read_int(bs, 4);
}
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
item_count = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
item_count = gf_bs_read_u32(bs);
}
for (i = 0; i < item_count; i++) {
GF_ItemLocationEntry *location_entry;
GF_SAFEALLOC(location_entry, GF_ItemLocationEntry);
if (!location_entry) return GF_OUT_OF_MEM;
gf_list_add(ptr->location_entries, location_entry);
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->item_ID = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
location_entry->item_ID = gf_bs_read_u32(bs);
}
if (ptr->version == 1 || ptr->version == 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->construction_method = gf_bs_read_u16(bs);
}
else {
location_entry->construction_method = 0;
}
ISOM_DECREASE_SIZE(ptr, (2 + ptr->base_offset_size) )
location_entry->data_reference_index = gf_bs_read_u16(bs);
location_entry->base_offset = gf_bs_read_int(bs, 8*ptr->base_offset_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
location_entry->original_base_offset = location_entry->base_offset;
#endif
ISOM_DECREASE_SIZE(ptr, 2)
extent_count = gf_bs_read_u16(bs);
location_entry->extent_entries = gf_list_new();
for (j = 0; j < extent_count; j++) {
GF_ItemExtentEntry *extent_entry;
GF_SAFEALLOC(extent_entry, GF_ItemExtentEntry);
if (!extent_entry) return GF_OUT_OF_MEM;
gf_list_add(location_entry->extent_entries, extent_entry);
if ((ptr->version == 1 || ptr->version == 2) && ptr->index_size > 0) {
ISOM_DECREASE_SIZE(ptr, ptr->index_size)
extent_entry->extent_index = gf_bs_read_int(bs, 8 * ptr->index_size);
}
else {
extent_entry->extent_index = 0;
}
ISOM_DECREASE_SIZE(ptr, (ptr->offset_size+ptr->length_size) )
extent_entry->extent_offset = gf_bs_read_int(bs, 8*ptr->offset_size);
extent_entry->extent_length = gf_bs_read_int(bs, 8*ptr->length_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
extent_entry->original_extent_offset = extent_entry->extent_offset;
#endif
}
}
return GF_OK;
}
|
326603429121921341428150431158212875495
|
box_code_meta.c
|
99521214022509816524954281629288020612
|
CWE-415
|
CVE-2021-40573
|
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the gf_list_del function in list.c, which allows attackers to cause a denial of service.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-40573
|
195,340
|
tensorflow
|
e952a89b7026b98fe8cbe626514a93ed68b7c510
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e952a89b7026b98fe8cbe626514a93ed68b7c510
|
Prevent overflow in sparse dense cwise ops.
PiperOrigin-RevId: 415543171
Change-Id: I22dab7c41be2121ab5efe5403ca0e2f9b7cb24b8
| 1
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument("Input sp_shape must be a vector. Got: ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
OP_REQUIRES(
ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", shape_t->shape().dim_size(0),
" dimensions, indices shape: ", indices_t->shape().DebugString()));
OP_REQUIRES(ctx, shape_t->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec));
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
|
171281295800875024521736226102204042037
|
sparse_dense_binary_op_shared.cc
|
32986558098184077015668984064800916453
|
CWE-190
|
CVE-2022-23567
|
Tensorflow is an Open Source Machine Learning Framework. The implementations of `Sparse*Cwise*` ops are vulnerable to integer overflows. These can be used to trigger large allocations (so, OOM based denial of service) or `CHECK`-fails when building new `TensorShape` objects (so, assert failures based denial of service). We are missing some validation on the shapes of the input tensors as well as directly constructing a large `TensorShape` with user-provided dimensions. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23567
|
224,862
|
tensorflow
|
e952a89b7026b98fe8cbe626514a93ed68b7c510
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/e952a89b7026b98fe8cbe626514a93ed68b7c510
|
Prevent overflow in sparse dense cwise ops.
PiperOrigin-RevId: 415543171
Change-Id: I22dab7c41be2121ab5efe5403ca0e2f9b7cb24b8
| 0
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument("Input sp_shape must be a vector. Got: ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
OP_REQUIRES(
ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", shape_t->shape().dim_size(0),
" dimensions, indices shape: ", indices_t->shape().DebugString()));
OP_REQUIRES(ctx, shape_t->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
TensorShape lhs_shape;
OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &lhs_shape));
const auto lhs_dims = BCast::FromShape(lhs_shape);
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
|
278624482255408254449933942747189407257
|
sparse_dense_binary_op_shared.cc
|
247841744604256295308587005697650201397
|
CWE-190
|
CVE-2022-23567
|
Tensorflow is an Open Source Machine Learning Framework. The implementations of `Sparse*Cwise*` ops are vulnerable to integer overflows. These can be used to trigger large allocations (so, OOM based denial of service) or `CHECK`-fails when building new `TensorShape` objects (so, assert failures based denial of service). We are missing some validation on the shapes of the input tensors as well as directly constructing a large `TensorShape` with user-provided dimensions. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23567
|
195,343
|
tensorflow
|
002408c3696b173863228223d535f9de72a101a9
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/002408c3696b173863228223d535f9de72a101a9
|
Add negative bound check for row and column pooling_sequence in FractionalAvgPoolGrad op to avoid out of bound heap access
PiperOrigin-RevId: 413837346
Change-Id: I2b86034101df31bee161abcb781755e236c7bccd
| 1
|
void Compute(OpKernelContext* context) override {
// Here's the basic idea:
// Batch and depth dimension are independent from row and col dimension. And
// because FractionalAvgPool currently only support pooling along row and
// col, we can basically think of this 4D tensor backpropagation as
// operation of a series of 2D planes.
//
// For each element of a 'slice' (2D plane) of output_backprop, we need to
// figure out its contributors when doing FractionalAvgPool operation. This
// can be done based on row_pooling_sequence, col_pooling_seq and
// overlapping.
// Once we figure out the original contributors, we just need to evenly
// divide the value of this element among these contributors.
//
// Internally, we divide the out_backprop tensor and store it in a temporary
// tensor of double type. And cast it to the corresponding type.
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>
EigenDoubleMatrixMap;
// Grab the inputs.
const Tensor& orig_input_tensor_shape = context->input(0);
OP_REQUIRES(context,
orig_input_tensor_shape.dims() == 1 &&
orig_input_tensor_shape.NumElements() == 4,
errors::InvalidArgument("original input tensor shape must be"
"1-dimensional and 4 elements"));
const Tensor& out_backprop = context->input(1);
const Tensor& row_seq_tensor = context->input(2);
const Tensor& col_seq_tensor = context->input(3);
const int64_t out_batch = out_backprop.dim_size(0);
const int64_t out_rows = out_backprop.dim_size(1);
const int64_t out_cols = out_backprop.dim_size(2);
const int64_t out_depth = out_backprop.dim_size(3);
OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", row_seq_tensor must have at least ",
out_rows + 1, " elements, but got ",
row_seq_tensor.NumElements()));
OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", col_seq_tensor must have at least ",
out_cols + 1, " elements, but got ",
col_seq_tensor.NumElements()));
auto row_seq_tensor_flat = row_seq_tensor.flat<int64_t>();
auto col_seq_tensor_flat = col_seq_tensor.flat<int64_t>();
auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64_t>();
const int64_t in_batch = orig_input_tensor_shape_flat(0);
const int64_t in_rows = orig_input_tensor_shape_flat(1);
const int64_t in_cols = orig_input_tensor_shape_flat(2);
const int64_t in_depth = orig_input_tensor_shape_flat(3);
OP_REQUIRES(
context, in_batch != 0,
errors::InvalidArgument("Batch dimension of input must not be 0"));
OP_REQUIRES(
context, in_rows != 0,
errors::InvalidArgument("Rows dimension of input must not be 0"));
OP_REQUIRES(
context, in_cols != 0,
errors::InvalidArgument("Columns dimension of input must not be 0"));
OP_REQUIRES(
context, in_depth != 0,
errors::InvalidArgument("Depth dimension of input must not be 0"));
constexpr int tensor_in_and_out_dims = 4;
// Transform orig_input_tensor_shape into TensorShape
TensorShape in_shape;
for (auto i = 0; i < tensor_in_and_out_dims; ++i) {
in_shape.AddDim(orig_input_tensor_shape_flat(i));
}
// Create intermediate in_backprop.
Tensor in_backprop_tensor_temp;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp(
{0}, DataTypeToEnum<double>::v(), in_shape,
&in_backprop_tensor_temp));
in_backprop_tensor_temp.flat<double>().setZero();
// Transform 4D tensor to 2D matrix.
EigenDoubleMatrixMap in_backprop_tensor_temp_mat(
in_backprop_tensor_temp.flat<double>().data(), in_depth,
in_cols * in_rows * in_batch);
ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(),
out_depth,
out_cols * out_rows * out_batch);
// Loop through each element of out_backprop and evenly distribute the
// element to the corresponding pooling cell.
const int64_t in_max_row_index = in_rows - 1;
const int64_t in_max_col_index = in_cols - 1;
for (int64_t b = 0; b < out_batch; ++b) {
for (int64_t r = 0; r < out_rows; ++r) {
const int64_t in_row_start = row_seq_tensor_flat(r);
int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1)
: row_seq_tensor_flat(r + 1) - 1;
in_row_end = std::min(in_row_end, in_max_row_index);
for (int64_t c = 0; c < out_cols; ++c) {
const int64_t in_col_start = col_seq_tensor_flat(c);
int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1)
: col_seq_tensor_flat(c + 1) - 1;
in_col_end = std::min(in_col_end, in_max_col_index);
const int64_t num_elements_in_pooling_cell =
(in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1);
const int64_t out_index = (b * out_rows + r) * out_cols + c;
// Now we can evenly distribute out_backprop(b, h, w, *) to
// in_backprop(b, hs:he, ws:we, *).
for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) {
for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) {
const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c;
// Walk through each channel (depth).
for (int64_t d = 0; d < out_depth; ++d) {
const double out_backprop_element = static_cast<double>(
out_backprop_mat.coeffRef(d, out_index));
double& in_backprop_ref =
in_backprop_tensor_temp_mat.coeffRef(d, in_index);
in_backprop_ref +=
out_backprop_element / num_elements_in_pooling_cell;
}
}
}
}
}
}
// Depending on the type, cast double to type T.
Tensor* in_backprop_tensor = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, in_shape, &in_backprop_tensor));
auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>();
auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>();
for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) {
in_backprop_tensor_flat(i) =
static_cast<T>(in_backprop_tensor_temp_flat(i));
}
}
|
91555834572386312187860770421206034544
|
fractional_avg_pool_op.cc
|
221866619851129952189561551151828727755
|
CWE-125
|
CVE-2022-21730
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `FractionalAvgPoolGrad` does not consider cases where the input tensors are invalid allowing an attacker to read from outside of bounds of heap. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21730
|
224,891
|
tensorflow
|
002408c3696b173863228223d535f9de72a101a9
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/002408c3696b173863228223d535f9de72a101a9
|
Add negative bound check for row and column pooling_sequence in FractionalAvgPoolGrad op to avoid out of bound heap access
PiperOrigin-RevId: 413837346
Change-Id: I2b86034101df31bee161abcb781755e236c7bccd
| 0
|
void Compute(OpKernelContext* context) override {
// Here's the basic idea:
// Batch and depth dimension are independent from row and col dimension. And
// because FractionalAvgPool currently only support pooling along row and
// col, we can basically think of this 4D tensor backpropagation as
// operation of a series of 2D planes.
//
// For each element of a 'slice' (2D plane) of output_backprop, we need to
// figure out its contributors when doing FractionalAvgPool operation. This
// can be done based on row_pooling_sequence, col_pooling_seq and
// overlapping.
// Once we figure out the original contributors, we just need to evenly
// divide the value of this element among these contributors.
//
// Internally, we divide the out_backprop tensor and store it in a temporary
// tensor of double type. And cast it to the corresponding type.
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>
EigenDoubleMatrixMap;
// Grab the inputs.
const Tensor& orig_input_tensor_shape = context->input(0);
OP_REQUIRES(context,
orig_input_tensor_shape.dims() == 1 &&
orig_input_tensor_shape.NumElements() == 4,
errors::InvalidArgument("original input tensor shape must be"
"1-dimensional and 4 elements"));
const Tensor& out_backprop = context->input(1);
const Tensor& row_seq_tensor = context->input(2);
const Tensor& col_seq_tensor = context->input(3);
const int64_t out_batch = out_backprop.dim_size(0);
const int64_t out_rows = out_backprop.dim_size(1);
const int64_t out_cols = out_backprop.dim_size(2);
const int64_t out_depth = out_backprop.dim_size(3);
OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", row_seq_tensor must have at least ",
out_rows + 1, " elements, but got ",
row_seq_tensor.NumElements()));
OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", col_seq_tensor must have at least ",
out_cols + 1, " elements, but got ",
col_seq_tensor.NumElements()));
auto row_seq_tensor_flat = row_seq_tensor.flat<int64_t>();
auto col_seq_tensor_flat = col_seq_tensor.flat<int64_t>();
auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64_t>();
const int64_t in_batch = orig_input_tensor_shape_flat(0);
const int64_t in_rows = orig_input_tensor_shape_flat(1);
const int64_t in_cols = orig_input_tensor_shape_flat(2);
const int64_t in_depth = orig_input_tensor_shape_flat(3);
OP_REQUIRES(
context, in_batch != 0,
errors::InvalidArgument("Batch dimension of input must not be 0"));
OP_REQUIRES(
context, in_rows != 0,
errors::InvalidArgument("Rows dimension of input must not be 0"));
OP_REQUIRES(
context, in_cols != 0,
errors::InvalidArgument("Columns dimension of input must not be 0"));
OP_REQUIRES(
context, in_depth != 0,
errors::InvalidArgument("Depth dimension of input must not be 0"));
constexpr int tensor_in_and_out_dims = 4;
// Transform orig_input_tensor_shape into TensorShape
TensorShape in_shape;
for (auto i = 0; i < tensor_in_and_out_dims; ++i) {
in_shape.AddDim(orig_input_tensor_shape_flat(i));
}
// Create intermediate in_backprop.
Tensor in_backprop_tensor_temp;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp(
{0}, DataTypeToEnum<double>::v(), in_shape,
&in_backprop_tensor_temp));
in_backprop_tensor_temp.flat<double>().setZero();
// Transform 4D tensor to 2D matrix.
EigenDoubleMatrixMap in_backprop_tensor_temp_mat(
in_backprop_tensor_temp.flat<double>().data(), in_depth,
in_cols * in_rows * in_batch);
ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(),
out_depth,
out_cols * out_rows * out_batch);
// Loop through each element of out_backprop and evenly distribute the
// element to the corresponding pooling cell.
const int64_t in_max_row_index = in_rows - 1;
const int64_t in_max_col_index = in_cols - 1;
for (int64_t b = 0; b < out_batch; ++b) {
for (int64_t r = 0; r < out_rows; ++r) {
const int64_t in_row_start = row_seq_tensor_flat(r);
int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1)
: row_seq_tensor_flat(r + 1) - 1;
in_row_end = std::min(in_row_end, in_max_row_index);
OP_REQUIRES(context, in_row_start >= 0 && in_row_end >= 0,
errors::InvalidArgument(
"Row sequence tensor values must not be negative, got ",
row_seq_tensor_flat));
for (int64_t c = 0; c < out_cols; ++c) {
const int64_t in_col_start = col_seq_tensor_flat(c);
int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1)
: col_seq_tensor_flat(c + 1) - 1;
in_col_end = std::min(in_col_end, in_max_col_index);
OP_REQUIRES(
context, in_col_start >= 0 && in_col_end >= 0,
errors::InvalidArgument(
"Column sequence tensor values must not be negative, got ",
col_seq_tensor_flat));
const int64_t num_elements_in_pooling_cell =
(in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1);
const int64_t out_index = (b * out_rows + r) * out_cols + c;
// Now we can evenly distribute out_backprop(b, h, w, *) to
// in_backprop(b, hs:he, ws:we, *).
for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) {
for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) {
const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c;
// Walk through each channel (depth).
for (int64_t d = 0; d < out_depth; ++d) {
const double out_backprop_element = static_cast<double>(
out_backprop_mat.coeffRef(d, out_index));
double& in_backprop_ref =
in_backprop_tensor_temp_mat.coeffRef(d, in_index);
in_backprop_ref +=
out_backprop_element / num_elements_in_pooling_cell;
}
}
}
}
}
}
// Depending on the type, cast double to type T.
Tensor* in_backprop_tensor = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, in_shape, &in_backprop_tensor));
auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>();
auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>();
for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) {
in_backprop_tensor_flat(i) =
static_cast<T>(in_backprop_tensor_temp_flat(i));
}
}
|
7916647560171328705762237734439424087
|
fractional_avg_pool_op.cc
|
273616659727040190050886150342189772450
|
CWE-125
|
CVE-2022-21730
|
Tensorflow is an Open Source Machine Learning Framework. The implementation of `FractionalAvgPoolGrad` does not consider cases where the input tensors are invalid allowing an attacker to read from outside of bounds of heap. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21730
|
195,385
|
flatpak
|
65cbfac982cb1c83993a9e19aa424daee8e9f042
|
https://github.com/flatpak/flatpak
|
https://github.com/flatpak/flatpak/commit/65cbfac982cb1c83993a9e19aa424daee8e9f042
|
Ensure that bundles have metadata on install
If we have a bundle without metadata we wouldn't properly present
the permissions in the transaction.
| 1
|
flatpak_dir_ensure_bundle_remote (FlatpakDir *self,
GFile *file,
GBytes *extra_gpg_data,
FlatpakDecomposed **out_ref,
char **out_checksum,
char **out_metadata,
gboolean *out_created_remote,
GCancellable *cancellable,
GError **error)
{
g_autoptr(FlatpakDecomposed) ref = NULL;
gboolean created_remote = FALSE;
g_autoptr(GBytes) deploy_data = NULL;
g_autoptr(GVariant) metadata = NULL;
g_autofree char *origin = NULL;
g_autofree char *fp_metadata = NULL;
g_autofree char *basename = NULL;
g_autoptr(GBytes) included_gpg_data = NULL;
GBytes *gpg_data = NULL;
g_autofree char *to_checksum = NULL;
g_autofree char *remote = NULL;
g_autofree char *collection_id = NULL;
if (!flatpak_dir_ensure_repo (self, cancellable, error))
return NULL;
metadata = flatpak_bundle_load (file, &to_checksum,
&ref,
&origin,
NULL, &fp_metadata, NULL,
&included_gpg_data,
&collection_id,
error);
if (metadata == NULL)
return NULL;
gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data;
deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL);
if (deploy_data != NULL)
{
remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data));
/* We need to import any gpg keys because otherwise the pull will fail */
if (gpg_data != NULL)
{
g_autoptr(GKeyFile) new_config = NULL;
new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self));
if (!flatpak_dir_modify_remote (self, remote, new_config,
gpg_data, cancellable, error))
return NULL;
}
}
else
{
g_autofree char *id = flatpak_decomposed_dup_id (ref);
/* Add a remote for later updates */
basename = g_file_get_basename (file);
remote = flatpak_dir_create_origin_remote (self,
origin,
id,
basename,
flatpak_decomposed_get_ref (ref),
gpg_data,
collection_id,
&created_remote,
cancellable,
error);
if (remote == NULL)
return NULL;
}
if (out_created_remote)
*out_created_remote = created_remote;
if (out_ref)
*out_ref = g_steal_pointer (&ref);
if (out_checksum)
*out_checksum = g_steal_pointer (&to_checksum);
if (out_metadata)
*out_metadata = g_steal_pointer (&fp_metadata);
return g_steal_pointer (&remote);
}
|
117751554146896350574194025697057651898
|
flatpak-dir.c
|
41005800026546918810123079124181990480
|
CWE-276
|
CVE-2021-43860
|
Flatpak is a Linux application sandboxing and distribution framework. Prior to versions 1.12.3 and 1.10.6, Flatpak doesn't properly validate that the permissions displayed to the user for an app at install time match the actual permissions granted to the app at runtime, in the case that there's a null byte in the metadata file of an app. Therefore apps can grant themselves permissions without the consent of the user. Flatpak shows permissions to the user during install by reading them from the "xa.metadata" key in the commit metadata. This cannot contain a null terminator, because it is an untrusted GVariant. Flatpak compares these permissions to the *actual* metadata, from the "metadata" file to ensure it wasn't lied to. However, the actual metadata contents are loaded in several places where they are read as simple C-style strings. That means that, if the metadata file includes a null terminator, only the content of the file from *before* the terminator gets compared to xa.metadata. Thus, any permissions that appear in the metadata file after a null terminator are applied at runtime but not shown to the user. So maliciously crafted apps can give themselves hidden permissions. Users who have Flatpaks installed from untrusted sources are at risk in case the Flatpak has a maliciously crafted metadata file, either initially or in an update. This issue is patched in versions 1.12.3 and 1.10.6. As a workaround, users can manually check the permissions of installed apps by checking the metadata file or the xa.metadata key on the commit metadata.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-43860
|
224,964
|
flatpak
|
65cbfac982cb1c83993a9e19aa424daee8e9f042
|
https://github.com/flatpak/flatpak
|
https://github.com/flatpak/flatpak/commit/65cbfac982cb1c83993a9e19aa424daee8e9f042
|
Ensure that bundles have metadata on install
If we have a bundle without metadata we wouldn't properly present
the permissions in the transaction.
| 0
|
flatpak_dir_ensure_bundle_remote (FlatpakDir *self,
GFile *file,
GBytes *extra_gpg_data,
FlatpakDecomposed **out_ref,
char **out_checksum,
char **out_metadata,
gboolean *out_created_remote,
GCancellable *cancellable,
GError **error)
{
g_autoptr(FlatpakDecomposed) ref = NULL;
gboolean created_remote = FALSE;
g_autoptr(GBytes) deploy_data = NULL;
g_autoptr(GVariant) metadata = NULL;
g_autofree char *origin = NULL;
g_autofree char *fp_metadata = NULL;
g_autofree char *basename = NULL;
g_autoptr(GBytes) included_gpg_data = NULL;
GBytes *gpg_data = NULL;
g_autofree char *to_checksum = NULL;
g_autofree char *remote = NULL;
g_autofree char *collection_id = NULL;
if (!flatpak_dir_ensure_repo (self, cancellable, error))
return NULL;
metadata = flatpak_bundle_load (file, &to_checksum,
&ref,
&origin,
NULL, &fp_metadata, NULL,
&included_gpg_data,
&collection_id,
error);
if (metadata == NULL)
return NULL;
/* If we rely on metadata (to e.g. print permissions), check it exists before creating the remote */
if (out_metadata && fp_metadata == NULL)
{
flatpak_fail_error (error, FLATPAK_ERROR_INVALID_DATA, "No metadata in bundler header");
return NULL;
}
gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data;
deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL);
if (deploy_data != NULL)
{
remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data));
/* We need to import any gpg keys because otherwise the pull will fail */
if (gpg_data != NULL)
{
g_autoptr(GKeyFile) new_config = NULL;
new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self));
if (!flatpak_dir_modify_remote (self, remote, new_config,
gpg_data, cancellable, error))
return NULL;
}
}
else
{
g_autofree char *id = flatpak_decomposed_dup_id (ref);
/* Add a remote for later updates */
basename = g_file_get_basename (file);
remote = flatpak_dir_create_origin_remote (self,
origin,
id,
basename,
flatpak_decomposed_get_ref (ref),
gpg_data,
collection_id,
&created_remote,
cancellable,
error);
if (remote == NULL)
return NULL;
}
if (out_created_remote)
*out_created_remote = created_remote;
if (out_ref)
*out_ref = g_steal_pointer (&ref);
if (out_checksum)
*out_checksum = g_steal_pointer (&to_checksum);
if (out_metadata)
*out_metadata = g_steal_pointer (&fp_metadata);
return g_steal_pointer (&remote);
}
|
57199236180630525002318569009544856929
|
flatpak-dir.c
|
126847826047276531038327785325265722415
|
CWE-276
|
CVE-2021-43860
|
Flatpak is a Linux application sandboxing and distribution framework. Prior to versions 1.12.3 and 1.10.6, Flatpak doesn't properly validate that the permissions displayed to the user for an app at install time match the actual permissions granted to the app at runtime, in the case that there's a null byte in the metadata file of an app. Therefore apps can grant themselves permissions without the consent of the user. Flatpak shows permissions to the user during install by reading them from the "xa.metadata" key in the commit metadata. This cannot contain a null terminator, because it is an untrusted GVariant. Flatpak compares these permissions to the *actual* metadata, from the "metadata" file to ensure it wasn't lied to. However, the actual metadata contents are loaded in several places where they are read as simple C-style strings. That means that, if the metadata file includes a null terminator, only the content of the file from *before* the terminator gets compared to xa.metadata. Thus, any permissions that appear in the metadata file after a null terminator are applied at runtime but not shown to the user. So maliciously crafted apps can give themselves hidden permissions. Users who have Flatpaks installed from untrusted sources are at risk in case the Flatpak has a maliciously crafted metadata file, either initially or in an update. This issue is patched in versions 1.12.3 and 1.10.6. As a workaround, users can manually check the permissions of installed apps by checking the metadata file or the xa.metadata key on the commit metadata.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-43860
|
195,388
|
postgres
|
160c0258802d10b0600d7671b1bbea55d8e17d45
|
https://github.com/postgres/postgres
|
https://github.com/postgres/postgres/commit/160c0258802d10b0600d7671b1bbea55d8e17d45
|
libpq: reject extraneous data after SSL or GSS encryption handshake.
libpq collects up to a bufferload of data whenever it reads data from
the socket. When SSL or GSS encryption is requested during startup,
any additional data received with the server's yes-or-no reply
remained in the buffer, and would be treated as already-decrypted data
once the encryption handshake completed. Thus, a man-in-the-middle
with the ability to inject data into the TCP connection could stuff
some cleartext data into the start of a supposedly encryption-protected
database session.
This could probably be abused to inject faked responses to the
client's first few queries, although other details of libpq's behavior
make that harder than it sounds. A different line of attack is to
exfiltrate the client's password, or other sensitive data that might
be sent early in the session. That has been shown to be possible with
a server vulnerable to CVE-2021-23214.
To fix, throw a protocol-violation error if the internal buffer
is not empty after the encryption handshake.
Our thanks to Jacob Champion for reporting this problem.
Security: CVE-2021-23222
| 1
|
PQconnectPoll(PGconn *conn)
{
bool reset_connection_state_machine = false;
bool need_new_connection = false;
PGresult *res;
char sebuf[PG_STRERROR_R_BUFLEN];
int optval;
if (conn == NULL)
return PGRES_POLLING_FAILED;
/* Get the new data */
switch (conn->status)
{
/*
* We really shouldn't have been polled in these two cases, but we
* can handle it.
*/
case CONNECTION_BAD:
return PGRES_POLLING_FAILED;
case CONNECTION_OK:
return PGRES_POLLING_OK;
/* These are reading states */
case CONNECTION_AWAITING_RESPONSE:
case CONNECTION_AUTH_OK:
case CONNECTION_CHECK_WRITABLE:
case CONNECTION_CONSUME:
case CONNECTION_CHECK_STANDBY:
{
/* Load waiting data */
int n = pqReadData(conn);
if (n < 0)
goto error_return;
if (n == 0)
return PGRES_POLLING_READING;
break;
}
/* These are writing states, so we just proceed. */
case CONNECTION_STARTED:
case CONNECTION_MADE:
break;
/* Special cases: proceed without waiting. */
case CONNECTION_SSL_STARTUP:
case CONNECTION_NEEDED:
case CONNECTION_GSS_STARTUP:
case CONNECTION_CHECK_TARGET:
break;
default:
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("invalid connection state, probably indicative of memory corruption\n"));
goto error_return;
}
keep_going: /* We will come back to here until there is
* nothing left to do. */
/* Time to advance to next address, or next host if no more addresses? */
if (conn->try_next_addr)
{
if (conn->addr_cur && conn->addr_cur->ai_next)
{
conn->addr_cur = conn->addr_cur->ai_next;
reset_connection_state_machine = true;
}
else
conn->try_next_host = true;
conn->try_next_addr = false;
}
/* Time to advance to next connhost[] entry? */
if (conn->try_next_host)
{
pg_conn_host *ch;
struct addrinfo hint;
int thisport;
int ret;
char portstr[MAXPGPATH];
if (conn->whichhost + 1 < conn->nconnhost)
conn->whichhost++;
else
{
/*
* Oops, no more hosts.
*
* If we are trying to connect in "prefer-standby" mode, then drop
* the standby requirement and start over.
*
* Otherwise, an appropriate error message is already set up, so
* we just need to set the right status.
*/
if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY &&
conn->nconnhost > 0)
{
conn->target_server_type = SERVER_TYPE_PREFER_STANDBY_PASS2;
conn->whichhost = 0;
}
else
goto error_return;
}
/* Drop any address info for previous host */
release_conn_addrinfo(conn);
/*
* Look up info for the new host. On failure, log the problem in
* conn->errorMessage, then loop around to try the next host. (Note
* we don't clear try_next_host until we've succeeded.)
*/
ch = &conn->connhost[conn->whichhost];
/* Initialize hint structure */
MemSet(&hint, 0, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
conn->addrlist_family = hint.ai_family = AF_UNSPEC;
/* Figure out the port number we're going to use. */
if (ch->port == NULL || ch->port[0] == '\0')
thisport = DEF_PGPORT;
else
{
if (!parse_int_param(ch->port, &thisport, conn, "port"))
goto error_return;
if (thisport < 1 || thisport > 65535)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid port number: \"%s\"\n"),
ch->port);
goto keep_going;
}
}
snprintf(portstr, sizeof(portstr), "%d", thisport);
/* Use pg_getaddrinfo_all() to resolve the address */
switch (ch->type)
{
case CHT_HOST_NAME:
ret = pg_getaddrinfo_all(ch->host, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not translate host name \"%s\" to address: %s\n"),
ch->host, gai_strerror(ret));
goto keep_going;
}
break;
case CHT_HOST_ADDRESS:
hint.ai_flags = AI_NUMERICHOST;
ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not parse network address \"%s\": %s\n"),
ch->hostaddr, gai_strerror(ret));
goto keep_going;
}
break;
case CHT_UNIX_SOCKET:
#ifdef HAVE_UNIX_SOCKETS
conn->addrlist_family = hint.ai_family = AF_UNIX;
UNIXSOCK_PATH(portstr, thisport, ch->host);
if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"),
portstr,
(int) (UNIXSOCK_PATH_BUFLEN - 1));
goto keep_going;
}
/*
* NULL hostname tells pg_getaddrinfo_all to parse the service
* name as a Unix-domain socket path.
*/
ret = pg_getaddrinfo_all(NULL, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"),
portstr, gai_strerror(ret));
goto keep_going;
}
#else
Assert(false);
#endif
break;
}
/* OK, scan this addrlist for a working server address */
conn->addr_cur = conn->addrlist;
reset_connection_state_machine = true;
conn->try_next_host = false;
}
/* Reset connection state machine? */
if (reset_connection_state_machine)
{
/*
* (Re) initialize our connection control variables for a set of
* connection attempts to a single server address. These variables
* must persist across individual connection attempts, but we must
* reset them when we start to consider a new server.
*/
conn->pversion = PG_PROTOCOL(3, 0);
conn->send_appname = true;
#ifdef USE_SSL
/* initialize these values based on SSL mode */
conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */
conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */
#endif
#ifdef ENABLE_GSS
conn->try_gss = (conn->gssencmode[0] != 'd'); /* "disable" */
#endif
reset_connection_state_machine = false;
need_new_connection = true;
}
/* Force a new connection (perhaps to the same server as before)? */
if (need_new_connection)
{
/* Drop any existing connection */
pqDropConnection(conn, true);
/* Reset all state obtained from old server */
pqDropServerData(conn);
/* Drop any PGresult we might have, too */
conn->asyncStatus = PGASYNC_IDLE;
conn->xactStatus = PQTRANS_IDLE;
conn->pipelineStatus = PQ_PIPELINE_OFF;
pqClearAsyncResult(conn);
/* Reset conn->status to put the state machine in the right state */
conn->status = CONNECTION_NEEDED;
need_new_connection = false;
}
/* Now try to advance the state machine for this connection */
switch (conn->status)
{
case CONNECTION_NEEDED:
{
/*
* Try to initiate a connection to one of the addresses
* returned by pg_getaddrinfo_all(). conn->addr_cur is the
* next one to try.
*
* The extra level of braces here is historical. It's not
* worth reindenting this whole switch case to remove 'em.
*/
{
struct addrinfo *addr_cur = conn->addr_cur;
char host_addr[NI_MAXHOST];
/*
* Advance to next possible host, if we've tried all of
* the addresses for the current host.
*/
if (addr_cur == NULL)
{
conn->try_next_host = true;
goto keep_going;
}
/* Remember current address for possible use later */
memcpy(&conn->raddr.addr, addr_cur->ai_addr,
addr_cur->ai_addrlen);
conn->raddr.salen = addr_cur->ai_addrlen;
/*
* Set connip, too. Note we purposely ignore strdup
* failure; not a big problem if it fails.
*/
if (conn->connip != NULL)
{
free(conn->connip);
conn->connip = NULL;
}
getHostaddr(conn, host_addr, NI_MAXHOST);
if (host_addr[0])
conn->connip = strdup(host_addr);
/* Try to create the socket */
conn->sock = socket(addr_cur->ai_family, SOCK_STREAM, 0);
if (conn->sock == PGINVALID_SOCKET)
{
int errorno = SOCK_ERRNO;
/*
* Silently ignore socket() failure if we have more
* addresses to try; this reduces useless chatter in
* cases where the address list includes both IPv4 and
* IPv6 but kernel only accepts one family.
*/
if (addr_cur->ai_next != NULL ||
conn->whichhost + 1 < conn->nconnhost)
{
conn->try_next_addr = true;
goto keep_going;
}
emitHostIdentityInfo(conn, host_addr);
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not create socket: %s\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)));
goto error_return;
}
/*
* Once we've identified a target address, all errors
* except the preceding socket()-failure case should be
* prefixed with host-identity information. (If the
* connection succeeds, the contents of conn->errorMessage
* won't matter, so this is harmless.)
*/
emitHostIdentityInfo(conn, host_addr);
/*
* Select socket options: no delay of outgoing data for
* TCP sockets, nonblock mode, close-on-exec. Try the
* next address if any of this fails.
*/
if (!IS_AF_UNIX(addr_cur->ai_family))
{
if (!connectNoDelay(conn))
{
/* error message already created */
conn->try_next_addr = true;
goto keep_going;
}
}
if (!pg_set_noblock(conn->sock))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to nonblocking mode: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
conn->try_next_addr = true;
goto keep_going;
}
#ifdef F_SETFD
if (fcntl(conn->sock, F_SETFD, FD_CLOEXEC) == -1)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to close-on-exec mode: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
conn->try_next_addr = true;
goto keep_going;
}
#endif /* F_SETFD */
if (!IS_AF_UNIX(addr_cur->ai_family))
{
#ifndef WIN32
int on = 1;
#endif
int usekeepalives = useKeepalives(conn);
int err = 0;
if (usekeepalives < 0)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("keepalives parameter must be an integer\n"));
err = 1;
}
else if (usekeepalives == 0)
{
/* Do nothing */
}
#ifndef WIN32
else if (setsockopt(conn->sock,
SOL_SOCKET, SO_KEEPALIVE,
(char *) &on, sizeof(on)) < 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("%s(%s) failed: %s\n"),
"setsockopt",
"SO_KEEPALIVE",
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
err = 1;
}
else if (!setKeepalivesIdle(conn)
|| !setKeepalivesInterval(conn)
|| !setKeepalivesCount(conn))
err = 1;
#else /* WIN32 */
#ifdef SIO_KEEPALIVE_VALS
else if (!setKeepalivesWin32(conn))
err = 1;
#endif /* SIO_KEEPALIVE_VALS */
#endif /* WIN32 */
else if (!setTCPUserTimeout(conn))
err = 1;
if (err)
{
conn->try_next_addr = true;
goto keep_going;
}
}
/*----------
* We have three methods of blocking SIGPIPE during
* send() calls to this socket:
*
* - setsockopt(sock, SO_NOSIGPIPE)
* - send(sock, ..., MSG_NOSIGNAL)
* - setting the signal mask to SIG_IGN during send()
*
* The third method requires three syscalls per send,
* so we prefer either of the first two, but they are
* less portable. The state is tracked in the following
* members of PGconn:
*
* conn->sigpipe_so - we have set up SO_NOSIGPIPE
* conn->sigpipe_flag - we're specifying MSG_NOSIGNAL
*
* If we can use SO_NOSIGPIPE, then set sigpipe_so here
* and we're done. Otherwise, set sigpipe_flag so that
* we will try MSG_NOSIGNAL on sends. If we get an error
* with MSG_NOSIGNAL, we'll clear that flag and revert to
* signal masking.
*----------
*/
conn->sigpipe_so = false;
#ifdef MSG_NOSIGNAL
conn->sigpipe_flag = true;
#else
conn->sigpipe_flag = false;
#endif /* MSG_NOSIGNAL */
#ifdef SO_NOSIGPIPE
optval = 1;
if (setsockopt(conn->sock, SOL_SOCKET, SO_NOSIGPIPE,
(char *) &optval, sizeof(optval)) == 0)
{
conn->sigpipe_so = true;
conn->sigpipe_flag = false;
}
#endif /* SO_NOSIGPIPE */
/*
* Start/make connection. This should not block, since we
* are in nonblock mode. If it does, well, too bad.
*/
if (connect(conn->sock, addr_cur->ai_addr,
addr_cur->ai_addrlen) < 0)
{
if (SOCK_ERRNO == EINPROGRESS ||
#ifdef WIN32
SOCK_ERRNO == EWOULDBLOCK ||
#endif
SOCK_ERRNO == EINTR)
{
/*
* This is fine - we're in non-blocking mode, and
* the connection is in progress. Tell caller to
* wait for write-ready on socket.
*/
conn->status = CONNECTION_STARTED;
return PGRES_POLLING_WRITING;
}
/* otherwise, trouble */
}
else
{
/*
* Hm, we're connected already --- seems the "nonblock
* connection" wasn't. Advance the state machine and
* go do the next stuff.
*/
conn->status = CONNECTION_STARTED;
goto keep_going;
}
/*
* This connection failed. Add the error report to
* conn->errorMessage, then try the next address if any.
*/
connectFailureMessage(conn, SOCK_ERRNO);
conn->try_next_addr = true;
goto keep_going;
}
}
case CONNECTION_STARTED:
{
ACCEPT_TYPE_ARG3 optlen = sizeof(optval);
/*
* Write ready, since we've made it here, so the connection
* has been made ... or has failed.
*/
/*
* Now check (using getsockopt) that there is not an error
* state waiting for us on the socket.
*/
if (getsockopt(conn->sock, SOL_SOCKET, SO_ERROR,
(char *) &optval, &optlen) == -1)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get socket error status: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
else if (optval != 0)
{
/*
* When using a nonblocking connect, we will typically see
* connect failures at this point, so provide a friendly
* error message.
*/
connectFailureMessage(conn, optval);
/*
* Try the next address if any, just as in the case where
* connect() returned failure immediately.
*/
conn->try_next_addr = true;
goto keep_going;
}
/* Fill in the client address */
conn->laddr.salen = sizeof(conn->laddr.addr);
if (getsockname(conn->sock,
(struct sockaddr *) &conn->laddr.addr,
&conn->laddr.salen) < 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get client address from socket: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/*
* Make sure we can write before advancing to next step.
*/
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
case CONNECTION_MADE:
{
char *startpacket;
int packetlen;
/*
* Implement requirepeer check, if requested and it's a
* Unix-domain socket.
*/
if (conn->requirepeer && conn->requirepeer[0] &&
IS_AF_UNIX(conn->raddr.addr.ss_family))
{
#ifndef WIN32
char pwdbuf[BUFSIZ];
struct passwd pass_buf;
struct passwd *pass;
int passerr;
#endif
uid_t uid;
gid_t gid;
errno = 0;
if (getpeereid(conn->sock, &uid, &gid) != 0)
{
/*
* Provide special error message if getpeereid is a
* stub
*/
if (errno == ENOSYS)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("requirepeer parameter is not supported on this platform\n"));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get peer credentials: %s\n"),
strerror_r(errno, sebuf, sizeof(sebuf)));
goto error_return;
}
#ifndef WIN32
passerr = pqGetpwuid(uid, &pass_buf, pwdbuf, sizeof(pwdbuf), &pass);
if (pass == NULL)
{
if (passerr != 0)
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not look up local user ID %d: %s\n"),
(int) uid,
strerror_r(passerr, sebuf, sizeof(sebuf)));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("local user with ID %d does not exist\n"),
(int) uid);
goto error_return;
}
if (strcmp(pass->pw_name, conn->requirepeer) != 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n"),
conn->requirepeer, pass->pw_name);
goto error_return;
}
#else /* WIN32 */
/* should have failed with ENOSYS above */
Assert(false);
#endif /* WIN32 */
}
if (IS_AF_UNIX(conn->raddr.addr.ss_family))
{
/* Don't request SSL or GSSAPI over Unix sockets */
#ifdef USE_SSL
conn->allow_ssl_try = false;
#endif
#ifdef ENABLE_GSS
conn->try_gss = false;
#endif
}
#ifdef ENABLE_GSS
/*
* If GSSAPI encryption is enabled, then call
* pg_GSS_have_cred_cache() which will return true if we can
* acquire credentials (and give us a handle to use in
* conn->gcred), and then send a packet to the server asking
* for GSSAPI Encryption (and skip past SSL negotiation and
* regular startup below).
*/
if (conn->try_gss && !conn->gctx)
conn->try_gss = pg_GSS_have_cred_cache(&conn->gcred);
if (conn->try_gss && !conn->gctx)
{
ProtocolVersion pv = pg_hton32(NEGOTIATE_GSS_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send GSSAPI negotiation packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
conn->status = CONNECTION_GSS_STARTUP;
return PGRES_POLLING_READING;
}
else if (!conn->gctx && conn->gssencmode[0] == 'r')
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("GSSAPI encryption required but was impossible (possibly no credential cache, no server support, or using a local socket)\n"));
goto error_return;
}
#endif
#ifdef USE_SSL
/*
* Enable the libcrypto callbacks before checking if SSL needs
* to be done. This is done before sending the startup packet
* as depending on the type of authentication done, like MD5
* or SCRAM that use cryptohashes, the callbacks would be
* required even without a SSL connection
*/
if (pqsecure_initialize(conn, false, true) < 0)
goto error_return;
/*
* If SSL is enabled and we haven't already got encryption of
* some sort running, request SSL instead of sending the
* startup message.
*/
if (conn->allow_ssl_try && !conn->wait_ssl_try &&
!conn->ssl_in_use
#ifdef ENABLE_GSS
&& !conn->gssenc
#endif
)
{
ProtocolVersion pv;
/*
* Send the SSL request packet.
*
* Theoretically, this could block, but it really
* shouldn't since we only got here if the socket is
* write-ready.
*/
pv = pg_hton32(NEGOTIATE_SSL_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send SSL negotiation packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
conn->status = CONNECTION_SSL_STARTUP;
return PGRES_POLLING_READING;
}
#endif /* USE_SSL */
/*
* Build the startup packet.
*/
startpacket = pqBuildStartupPacket3(conn, &packetlen,
EnvironmentOptions);
if (!startpacket)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("out of memory\n"));
goto error_return;
}
/*
* Send the startup packet.
*
* Theoretically, this could block, but it really shouldn't
* since we only got here if the socket is write-ready.
*/
if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send startup packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
free(startpacket);
goto error_return;
}
free(startpacket);
conn->status = CONNECTION_AWAITING_RESPONSE;
return PGRES_POLLING_READING;
}
/*
* Handle SSL negotiation: wait for postmaster messages and
* respond as necessary.
*/
case CONNECTION_SSL_STARTUP:
{
#ifdef USE_SSL
PostgresPollingStatusType pollres;
/*
* On first time through, get the postmaster's response to our
* SSL negotiation packet.
*/
if (!conn->ssl_in_use)
{
/*
* We use pqReadData here since it has the logic to
* distinguish no-data-yet from connection closure. Since
* conn->ssl isn't set, a plain recv() will occur.
*/
char SSLok;
int rdresult;
rdresult = pqReadData(conn);
if (rdresult < 0)
{
/* errorMessage is already filled in */
goto error_return;
}
if (rdresult == 0)
{
/* caller failed to wait for data */
return PGRES_POLLING_READING;
}
if (pqGetc(&SSLok, conn) < 0)
{
/* should not happen really */
return PGRES_POLLING_READING;
}
if (SSLok == 'S')
{
/* mark byte consumed */
conn->inStart = conn->inCursor;
/*
* Set up global SSL state if required. The crypto
* state has already been set if libpq took care of
* doing that, so there is no need to make that happen
* again.
*/
if (pqsecure_initialize(conn, true, false) != 0)
goto error_return;
}
else if (SSLok == 'N')
{
/* mark byte consumed */
conn->inStart = conn->inCursor;
/* OK to do without SSL? */
if (conn->sslmode[0] == 'r' || /* "require" */
conn->sslmode[0] == 'v') /* "verify-ca" or
* "verify-full" */
{
/* Require SSL, but server does not want it */
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server does not support SSL, but SSL was required\n"));
goto error_return;
}
/* Otherwise, proceed with normal startup */
conn->allow_ssl_try = false;
/* We can proceed using this connection */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (SSLok == 'E')
{
/*
* Server failure of some sort, such as failure to
* fork a backend process. We need to process and
* report the error message, which might be formatted
* according to either protocol 2 or protocol 3.
* Rather than duplicate the code for that, we flip
* into AWAITING_RESPONSE state and let the code there
* deal with it. Note we have *not* consumed the "E"
* byte here.
*/
conn->status = CONNECTION_AWAITING_RESPONSE;
goto keep_going;
}
else
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("received invalid response to SSL negotiation: %c\n"),
SSLok);
goto error_return;
}
}
/*
* Begin or continue the SSL negotiation process.
*/
pollres = pqsecure_open_client(conn);
if (pollres == PGRES_POLLING_OK)
{
/* SSL handshake done, ready to send startup packet */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
if (pollres == PGRES_POLLING_FAILED)
{
/*
* Failed ... if sslmode is "prefer" then do a non-SSL
* retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->allow_ssl_try /* redundant? */
&& !conn->wait_ssl_try) /* redundant? */
{
/* only retry once */
conn->allow_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
/* Else it's a hard failure */
goto error_return;
}
/* Else, return POLLING_READING or POLLING_WRITING status */
return pollres;
#else /* !USE_SSL */
/* can't get here */
goto error_return;
#endif /* USE_SSL */
}
case CONNECTION_GSS_STARTUP:
{
#ifdef ENABLE_GSS
PostgresPollingStatusType pollres;
/*
* If we haven't yet, get the postmaster's response to our
* negotiation packet
*/
if (conn->try_gss && !conn->gctx)
{
char gss_ok;
int rdresult = pqReadData(conn);
if (rdresult < 0)
/* pqReadData fills in error message */
goto error_return;
else if (rdresult == 0)
/* caller failed to wait for data */
return PGRES_POLLING_READING;
if (pqGetc(&gss_ok, conn) < 0)
/* shouldn't happen... */
return PGRES_POLLING_READING;
if (gss_ok == 'E')
{
/*
* Server failure of some sort. Assume it's a
* protocol version support failure, and let's see if
* we can't recover (if it's not, we'll get a better
* error message on retry). Server gets fussy if we
* don't hang up the socket, though.
*/
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
/* mark byte consumed */
conn->inStart = conn->inCursor;
if (gss_ok == 'N')
{
/* Server doesn't want GSSAPI; fall back if we can */
if (conn->gssencmode[0] == 'r')
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server doesn't support GSSAPI encryption, but it was required\n"));
goto error_return;
}
conn->try_gss = false;
/* We can proceed using this connection */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (gss_ok != 'G')
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("received invalid response to GSSAPI negotiation: %c\n"),
gss_ok);
goto error_return;
}
}
/* Begin or continue GSSAPI negotiation */
pollres = pqsecure_open_gss(conn);
if (pollres == PGRES_POLLING_OK)
{
/* All set for startup packet */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (pollres == PGRES_POLLING_FAILED &&
conn->gssencmode[0] == 'p')
{
/*
* We failed, but we can retry on "prefer". Have to drop
* the current connection to do so, though.
*/
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
return pollres;
#else /* !ENABLE_GSS */
/* unreachable */
goto error_return;
#endif /* ENABLE_GSS */
}
/*
* Handle authentication exchange: wait for postmaster messages
* and respond as necessary.
*/
case CONNECTION_AWAITING_RESPONSE:
{
char beresp;
int msgLength;
int avail;
AuthRequest areq;
int res;
/*
* Scan the message from current point (note that if we find
* the message is incomplete, we will return without advancing
* inStart, and resume here next time).
*/
conn->inCursor = conn->inStart;
/* Read type byte */
if (pqGetc(&beresp, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/*
* Validate message type: we expect only an authentication
* request or an error here. Anything else probably means
* it's not Postgres on the other end at all.
*/
if (!(beresp == 'R' || beresp == 'E'))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("expected authentication request from server, but received %c\n"),
beresp);
goto error_return;
}
/* Read message length word */
if (pqGetInt(&msgLength, 4, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/*
* Try to validate message length before using it.
* Authentication requests can't be very large, although GSS
* auth requests may not be that small. Errors can be a
* little larger, but not huge. If we see a large apparent
* length in an error, it means we're really talking to a
* pre-3.0-protocol server; cope. (Before version 14, the
* server also used the old protocol for errors that happened
* before processing the startup packet.)
*/
if (beresp == 'R' && (msgLength < 8 || msgLength > 2000))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("expected authentication request from server, but received %c\n"),
beresp);
goto error_return;
}
if (beresp == 'E' && (msgLength < 8 || msgLength > 30000))
{
/* Handle error from a pre-3.0 server */
conn->inCursor = conn->inStart + 1; /* reread data */
if (pqGets_append(&conn->errorMessage, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* OK, we read the message; mark data consumed */
conn->inStart = conn->inCursor;
/*
* Before 7.2, the postmaster didn't always end its
* messages with a newline, so add one if needed to
* conform to libpq conventions.
*/
if (conn->errorMessage.len == 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
{
appendPQExpBufferChar(&conn->errorMessage, '\n');
}
goto error_return;
}
/*
* Can't process if message body isn't all here yet.
*/
msgLength -= 4;
avail = conn->inEnd - conn->inCursor;
if (avail < msgLength)
{
/*
* Before returning, try to enlarge the input buffer if
* needed to hold the whole message; see notes in
* pqParseInput3.
*/
if (pqCheckInBufferSpace(conn->inCursor + (size_t) msgLength,
conn))
goto error_return;
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* Handle errors. */
if (beresp == 'E')
{
if (pqGetErrorNotice3(conn, true))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* OK, we read the message; mark data consumed */
conn->inStart = conn->inCursor;
/*
* If error is "cannot connect now", try the next host if
* any (but we don't want to consider additional addresses
* for this host, nor is there much point in changing SSL
* or GSS mode). This is helpful when dealing with
* standby servers that might not be in hot-standby state.
*/
if (strcmp(conn->last_sqlstate,
ERRCODE_CANNOT_CONNECT_NOW) == 0)
{
conn->try_next_host = true;
goto keep_going;
}
/* Check to see if we should mention pgpassfile */
pgpassfileWarning(conn);
#ifdef ENABLE_GSS
/*
* If gssencmode is "prefer" and we're using GSSAPI, retry
* without it.
*/
if (conn->gssenc && conn->gssencmode[0] == 'p')
{
/* only retry once */
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
#endif
#ifdef USE_SSL
/*
* if sslmode is "allow" and we haven't tried an SSL
* connection already, then retry with an SSL connection
*/
if (conn->sslmode[0] == 'a' /* "allow" */
&& !conn->ssl_in_use
&& conn->allow_ssl_try
&& conn->wait_ssl_try)
{
/* only retry once */
conn->wait_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
/*
* if sslmode is "prefer" and we're in an SSL connection,
* then do a non-SSL retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->ssl_in_use
&& conn->allow_ssl_try /* redundant? */
&& !conn->wait_ssl_try) /* redundant? */
{
/* only retry once */
conn->allow_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
#endif
goto error_return;
}
/* It is an authentication request. */
conn->auth_req_received = true;
/* Get the type of request. */
if (pqGetInt((int *) &areq, 4, conn))
{
/* We'll come back when there are more data */
return PGRES_POLLING_READING;
}
msgLength -= 4;
/*
* Process the rest of the authentication request message, and
* respond to it if necessary.
*
* Note that conn->pghost must be non-NULL if we are going to
* avoid the Kerberos code doing a hostname look-up.
*/
res = pg_fe_sendauth(areq, msgLength, conn);
/* OK, we have processed the message; mark data consumed */
conn->inStart = conn->inCursor;
if (res != STATUS_OK)
goto error_return;
/*
* Just make sure that any data sent by pg_fe_sendauth is
* flushed out. Although this theoretically could block, it
* really shouldn't since we don't send large auth responses.
*/
if (pqFlush(conn))
goto error_return;
if (areq == AUTH_REQ_OK)
{
/* We are done with authentication exchange */
conn->status = CONNECTION_AUTH_OK;
/*
* Set asyncStatus so that PQgetResult will think that
* what comes back next is the result of a query. See
* below.
*/
conn->asyncStatus = PGASYNC_BUSY;
}
/* Look to see if we have more data yet. */
goto keep_going;
}
case CONNECTION_AUTH_OK:
{
/*
* Now we expect to hear from the backend. A ReadyForQuery
* message indicates that startup is successful, but we might
* also get an Error message indicating failure. (Notice
* messages indicating nonfatal warnings are also allowed by
* the protocol, as are ParameterStatus and BackendKeyData
* messages.) Easiest way to handle this is to let
* PQgetResult() read the messages. We just have to fake it
* out about the state of the connection, by setting
* asyncStatus = PGASYNC_BUSY (done above).
*/
if (PQisBusy(conn))
return PGRES_POLLING_READING;
res = PQgetResult(conn);
/*
* NULL return indicating we have gone to IDLE state is
* expected
*/
if (res)
{
if (res->resultStatus != PGRES_FATAL_ERROR)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("unexpected message from server during startup\n"));
else if (conn->send_appname &&
(conn->appname || conn->fbappname))
{
/*
* If we tried to send application_name, check to see
* if the error is about that --- pre-9.0 servers will
* reject it at this stage of the process. If so,
* close the connection and retry without sending
* application_name. We could possibly get a false
* SQLSTATE match here and retry uselessly, but there
* seems no great harm in that; we'll just get the
* same error again if it's unrelated.
*/
const char *sqlstate;
sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
if (sqlstate &&
strcmp(sqlstate, ERRCODE_APPNAME_UNKNOWN) == 0)
{
PQclear(res);
conn->send_appname = false;
need_new_connection = true;
goto keep_going;
}
}
/*
* if the resultStatus is FATAL, then conn->errorMessage
* already has a copy of the error; needn't copy it back.
* But add a newline if it's not there already, since
* postmaster error messages may not have one.
*/
if (conn->errorMessage.len <= 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
appendPQExpBufferChar(&conn->errorMessage, '\n');
PQclear(res);
goto error_return;
}
/* Almost there now ... */
conn->status = CONNECTION_CHECK_TARGET;
goto keep_going;
}
case CONNECTION_CHECK_TARGET:
{
/*
* If a read-write, read-only, primary, or standby connection
* is required, see if we have one.
*/
if (conn->target_server_type == SERVER_TYPE_READ_WRITE ||
conn->target_server_type == SERVER_TYPE_READ_ONLY)
{
bool read_only_server;
/*
* If the server didn't report
* "default_transaction_read_only" or "in_hot_standby" at
* startup, we must determine its state by sending the
* query "SHOW transaction_read_only". This GUC exists in
* all server versions that support 3.0 protocol.
*/
if (conn->default_transaction_read_only == PG_BOOL_UNKNOWN ||
conn->in_hot_standby == PG_BOOL_UNKNOWN)
{
/*
* We use PQsendQueryContinue so that
* conn->errorMessage does not get cleared. We need
* to preserve any error messages related to previous
* hosts we have tried and failed to connect to.
*/
conn->status = CONNECTION_OK;
if (!PQsendQueryContinue(conn,
"SHOW transaction_read_only"))
goto error_return;
/* We'll return to this state when we have the answer */
conn->status = CONNECTION_CHECK_WRITABLE;
return PGRES_POLLING_READING;
}
/* OK, we can make the test */
read_only_server =
(conn->default_transaction_read_only == PG_BOOL_YES ||
conn->in_hot_standby == PG_BOOL_YES);
if ((conn->target_server_type == SERVER_TYPE_READ_WRITE) ?
read_only_server : !read_only_server)
{
/* Wrong server state, reject and try the next host */
if (conn->target_server_type == SERVER_TYPE_READ_WRITE)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("session is read-only\n"));
else
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("session is not read-only\n"));
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/*
* Try next host if any, but we don't want to consider
* additional addresses for this host.
*/
conn->try_next_host = true;
goto keep_going;
}
}
else if (conn->target_server_type == SERVER_TYPE_PRIMARY ||
conn->target_server_type == SERVER_TYPE_STANDBY ||
conn->target_server_type == SERVER_TYPE_PREFER_STANDBY)
{
/*
* If the server didn't report "in_hot_standby" at
* startup, we must determine its state by sending the
* query "SELECT pg_catalog.pg_is_in_recovery()". Servers
* before 9.0 don't have that function, but by the same
* token they don't have any standby mode, so we may just
* assume the result.
*/
if (conn->sversion < 90000)
conn->in_hot_standby = PG_BOOL_NO;
if (conn->in_hot_standby == PG_BOOL_UNKNOWN)
{
/*
* We use PQsendQueryContinue so that
* conn->errorMessage does not get cleared. We need
* to preserve any error messages related to previous
* hosts we have tried and failed to connect to.
*/
conn->status = CONNECTION_OK;
if (!PQsendQueryContinue(conn,
"SELECT pg_catalog.pg_is_in_recovery()"))
goto error_return;
/* We'll return to this state when we have the answer */
conn->status = CONNECTION_CHECK_STANDBY;
return PGRES_POLLING_READING;
}
/* OK, we can make the test */
if ((conn->target_server_type == SERVER_TYPE_PRIMARY) ?
(conn->in_hot_standby == PG_BOOL_YES) :
(conn->in_hot_standby == PG_BOOL_NO))
{
/* Wrong server state, reject and try the next host */
if (conn->target_server_type == SERVER_TYPE_PRIMARY)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server is in hot standby mode\n"));
else
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server is not in hot standby mode\n"));
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/*
* Try next host if any, but we don't want to consider
* additional addresses for this host.
*/
conn->try_next_host = true;
goto keep_going;
}
}
/* We can release the address list now. */
release_conn_addrinfo(conn);
/*
* Contents of conn->errorMessage are no longer interesting
* (and it seems some clients expect it to be empty after a
* successful connection).
*/
resetPQExpBuffer(&conn->errorMessage);
/* We are open for business! */
conn->status = CONNECTION_OK;
return PGRES_POLLING_OK;
}
case CONNECTION_CONSUME:
{
/*
* This state just makes sure the connection is idle after
* we've obtained the result of a SHOW or SELECT query. Once
* we're clear, return to CONNECTION_CHECK_TARGET state to
* decide what to do next. We must transiently set status =
* CONNECTION_OK in order to use the result-consuming
* subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CONSUME;
return PGRES_POLLING_READING;
}
/* Call PQgetResult() again until we get a NULL result */
res = PQgetResult(conn);
if (res != NULL)
{
PQclear(res);
conn->status = CONNECTION_CONSUME;
return PGRES_POLLING_READING;
}
conn->status = CONNECTION_CHECK_TARGET;
goto keep_going;
}
case CONNECTION_CHECK_WRITABLE:
{
/*
* Waiting for result of "SHOW transaction_read_only". We
* must transiently set status = CONNECTION_OK in order to use
* the result-consuming subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CHECK_WRITABLE;
return PGRES_POLLING_READING;
}
res = PQgetResult(conn);
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
PQntuples(res) == 1)
{
char *val = PQgetvalue(res, 0, 0);
/*
* "transaction_read_only = on" proves that at least one
* of default_transaction_read_only and in_hot_standby is
* on, but we don't actually know which. We don't care
* though for the purpose of identifying a read-only
* session, so satisfy the CONNECTION_CHECK_TARGET code by
* claiming they are both on. On the other hand, if it's
* a read-write session, they are certainly both off.
*/
if (strncmp(val, "on", 2) == 0)
{
conn->default_transaction_read_only = PG_BOOL_YES;
conn->in_hot_standby = PG_BOOL_YES;
}
else
{
conn->default_transaction_read_only = PG_BOOL_NO;
conn->in_hot_standby = PG_BOOL_NO;
}
PQclear(res);
/* Finish reading messages before continuing */
conn->status = CONNECTION_CONSUME;
goto keep_going;
}
/* Something went wrong with "SHOW transaction_read_only". */
if (res)
PQclear(res);
/* Append error report to conn->errorMessage. */
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("\"%s\" failed\n"),
"SHOW transaction_read_only");
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/* Try next host. */
conn->try_next_host = true;
goto keep_going;
}
case CONNECTION_CHECK_STANDBY:
{
/*
* Waiting for result of "SELECT pg_is_in_recovery()". We
* must transiently set status = CONNECTION_OK in order to use
* the result-consuming subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CHECK_STANDBY;
return PGRES_POLLING_READING;
}
res = PQgetResult(conn);
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
PQntuples(res) == 1)
{
char *val = PQgetvalue(res, 0, 0);
if (strncmp(val, "t", 1) == 0)
conn->in_hot_standby = PG_BOOL_YES;
else
conn->in_hot_standby = PG_BOOL_NO;
PQclear(res);
/* Finish reading messages before continuing */
conn->status = CONNECTION_CONSUME;
goto keep_going;
}
/* Something went wrong with "SELECT pg_is_in_recovery()". */
if (res)
PQclear(res);
/* Append error report to conn->errorMessage. */
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("\"%s\" failed\n"),
"SELECT pg_is_in_recovery()");
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/* Try next host. */
conn->try_next_host = true;
goto keep_going;
}
default:
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid connection state %d, "
"probably indicative of memory corruption\n"),
conn->status);
goto error_return;
}
/* Unreachable */
error_return:
/*
* We used to close the socket at this point, but that makes it awkward
* for those above us if they wish to remove this socket from their own
* records (an fd_set for example). We'll just have this socket closed
* when PQfinish is called (which is compulsory even after an error, since
* the connection structure must be freed).
*/
conn->status = CONNECTION_BAD;
return PGRES_POLLING_FAILED;
}
|
145605680477709719969453491383463044279
|
fe-connect.c
|
157537093140020562394962573075779914657
|
CWE-522
|
CVE-2021-23222
|
A man-in-the-middle attacker can inject false responses to the client's first few queries, despite the use of SSL certificate verification and encryption.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-23222
|
225,062
|
postgres
|
160c0258802d10b0600d7671b1bbea55d8e17d45
|
https://github.com/postgres/postgres
|
https://github.com/postgres/postgres/commit/160c0258802d10b0600d7671b1bbea55d8e17d45
|
libpq: reject extraneous data after SSL or GSS encryption handshake.
libpq collects up to a bufferload of data whenever it reads data from
the socket. When SSL or GSS encryption is requested during startup,
any additional data received with the server's yes-or-no reply
remained in the buffer, and would be treated as already-decrypted data
once the encryption handshake completed. Thus, a man-in-the-middle
with the ability to inject data into the TCP connection could stuff
some cleartext data into the start of a supposedly encryption-protected
database session.
This could probably be abused to inject faked responses to the
client's first few queries, although other details of libpq's behavior
make that harder than it sounds. A different line of attack is to
exfiltrate the client's password, or other sensitive data that might
be sent early in the session. That has been shown to be possible with
a server vulnerable to CVE-2021-23214.
To fix, throw a protocol-violation error if the internal buffer
is not empty after the encryption handshake.
Our thanks to Jacob Champion for reporting this problem.
Security: CVE-2021-23222
| 0
|
PQconnectPoll(PGconn *conn)
{
bool reset_connection_state_machine = false;
bool need_new_connection = false;
PGresult *res;
char sebuf[PG_STRERROR_R_BUFLEN];
int optval;
if (conn == NULL)
return PGRES_POLLING_FAILED;
/* Get the new data */
switch (conn->status)
{
/*
* We really shouldn't have been polled in these two cases, but we
* can handle it.
*/
case CONNECTION_BAD:
return PGRES_POLLING_FAILED;
case CONNECTION_OK:
return PGRES_POLLING_OK;
/* These are reading states */
case CONNECTION_AWAITING_RESPONSE:
case CONNECTION_AUTH_OK:
case CONNECTION_CHECK_WRITABLE:
case CONNECTION_CONSUME:
case CONNECTION_CHECK_STANDBY:
{
/* Load waiting data */
int n = pqReadData(conn);
if (n < 0)
goto error_return;
if (n == 0)
return PGRES_POLLING_READING;
break;
}
/* These are writing states, so we just proceed. */
case CONNECTION_STARTED:
case CONNECTION_MADE:
break;
/* Special cases: proceed without waiting. */
case CONNECTION_SSL_STARTUP:
case CONNECTION_NEEDED:
case CONNECTION_GSS_STARTUP:
case CONNECTION_CHECK_TARGET:
break;
default:
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("invalid connection state, probably indicative of memory corruption\n"));
goto error_return;
}
keep_going: /* We will come back to here until there is
* nothing left to do. */
/* Time to advance to next address, or next host if no more addresses? */
if (conn->try_next_addr)
{
if (conn->addr_cur && conn->addr_cur->ai_next)
{
conn->addr_cur = conn->addr_cur->ai_next;
reset_connection_state_machine = true;
}
else
conn->try_next_host = true;
conn->try_next_addr = false;
}
/* Time to advance to next connhost[] entry? */
if (conn->try_next_host)
{
pg_conn_host *ch;
struct addrinfo hint;
int thisport;
int ret;
char portstr[MAXPGPATH];
if (conn->whichhost + 1 < conn->nconnhost)
conn->whichhost++;
else
{
/*
* Oops, no more hosts.
*
* If we are trying to connect in "prefer-standby" mode, then drop
* the standby requirement and start over.
*
* Otherwise, an appropriate error message is already set up, so
* we just need to set the right status.
*/
if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY &&
conn->nconnhost > 0)
{
conn->target_server_type = SERVER_TYPE_PREFER_STANDBY_PASS2;
conn->whichhost = 0;
}
else
goto error_return;
}
/* Drop any address info for previous host */
release_conn_addrinfo(conn);
/*
* Look up info for the new host. On failure, log the problem in
* conn->errorMessage, then loop around to try the next host. (Note
* we don't clear try_next_host until we've succeeded.)
*/
ch = &conn->connhost[conn->whichhost];
/* Initialize hint structure */
MemSet(&hint, 0, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
conn->addrlist_family = hint.ai_family = AF_UNSPEC;
/* Figure out the port number we're going to use. */
if (ch->port == NULL || ch->port[0] == '\0')
thisport = DEF_PGPORT;
else
{
if (!parse_int_param(ch->port, &thisport, conn, "port"))
goto error_return;
if (thisport < 1 || thisport > 65535)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid port number: \"%s\"\n"),
ch->port);
goto keep_going;
}
}
snprintf(portstr, sizeof(portstr), "%d", thisport);
/* Use pg_getaddrinfo_all() to resolve the address */
switch (ch->type)
{
case CHT_HOST_NAME:
ret = pg_getaddrinfo_all(ch->host, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not translate host name \"%s\" to address: %s\n"),
ch->host, gai_strerror(ret));
goto keep_going;
}
break;
case CHT_HOST_ADDRESS:
hint.ai_flags = AI_NUMERICHOST;
ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not parse network address \"%s\": %s\n"),
ch->hostaddr, gai_strerror(ret));
goto keep_going;
}
break;
case CHT_UNIX_SOCKET:
#ifdef HAVE_UNIX_SOCKETS
conn->addrlist_family = hint.ai_family = AF_UNIX;
UNIXSOCK_PATH(portstr, thisport, ch->host);
if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"),
portstr,
(int) (UNIXSOCK_PATH_BUFLEN - 1));
goto keep_going;
}
/*
* NULL hostname tells pg_getaddrinfo_all to parse the service
* name as a Unix-domain socket path.
*/
ret = pg_getaddrinfo_all(NULL, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"),
portstr, gai_strerror(ret));
goto keep_going;
}
#else
Assert(false);
#endif
break;
}
/* OK, scan this addrlist for a working server address */
conn->addr_cur = conn->addrlist;
reset_connection_state_machine = true;
conn->try_next_host = false;
}
/* Reset connection state machine? */
if (reset_connection_state_machine)
{
/*
* (Re) initialize our connection control variables for a set of
* connection attempts to a single server address. These variables
* must persist across individual connection attempts, but we must
* reset them when we start to consider a new server.
*/
conn->pversion = PG_PROTOCOL(3, 0);
conn->send_appname = true;
#ifdef USE_SSL
/* initialize these values based on SSL mode */
conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */
conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */
#endif
#ifdef ENABLE_GSS
conn->try_gss = (conn->gssencmode[0] != 'd'); /* "disable" */
#endif
reset_connection_state_machine = false;
need_new_connection = true;
}
/* Force a new connection (perhaps to the same server as before)? */
if (need_new_connection)
{
/* Drop any existing connection */
pqDropConnection(conn, true);
/* Reset all state obtained from old server */
pqDropServerData(conn);
/* Drop any PGresult we might have, too */
conn->asyncStatus = PGASYNC_IDLE;
conn->xactStatus = PQTRANS_IDLE;
conn->pipelineStatus = PQ_PIPELINE_OFF;
pqClearAsyncResult(conn);
/* Reset conn->status to put the state machine in the right state */
conn->status = CONNECTION_NEEDED;
need_new_connection = false;
}
/* Now try to advance the state machine for this connection */
switch (conn->status)
{
case CONNECTION_NEEDED:
{
/*
* Try to initiate a connection to one of the addresses
* returned by pg_getaddrinfo_all(). conn->addr_cur is the
* next one to try.
*
* The extra level of braces here is historical. It's not
* worth reindenting this whole switch case to remove 'em.
*/
{
struct addrinfo *addr_cur = conn->addr_cur;
char host_addr[NI_MAXHOST];
/*
* Advance to next possible host, if we've tried all of
* the addresses for the current host.
*/
if (addr_cur == NULL)
{
conn->try_next_host = true;
goto keep_going;
}
/* Remember current address for possible use later */
memcpy(&conn->raddr.addr, addr_cur->ai_addr,
addr_cur->ai_addrlen);
conn->raddr.salen = addr_cur->ai_addrlen;
/*
* Set connip, too. Note we purposely ignore strdup
* failure; not a big problem if it fails.
*/
if (conn->connip != NULL)
{
free(conn->connip);
conn->connip = NULL;
}
getHostaddr(conn, host_addr, NI_MAXHOST);
if (host_addr[0])
conn->connip = strdup(host_addr);
/* Try to create the socket */
conn->sock = socket(addr_cur->ai_family, SOCK_STREAM, 0);
if (conn->sock == PGINVALID_SOCKET)
{
int errorno = SOCK_ERRNO;
/*
* Silently ignore socket() failure if we have more
* addresses to try; this reduces useless chatter in
* cases where the address list includes both IPv4 and
* IPv6 but kernel only accepts one family.
*/
if (addr_cur->ai_next != NULL ||
conn->whichhost + 1 < conn->nconnhost)
{
conn->try_next_addr = true;
goto keep_going;
}
emitHostIdentityInfo(conn, host_addr);
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not create socket: %s\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)));
goto error_return;
}
/*
* Once we've identified a target address, all errors
* except the preceding socket()-failure case should be
* prefixed with host-identity information. (If the
* connection succeeds, the contents of conn->errorMessage
* won't matter, so this is harmless.)
*/
emitHostIdentityInfo(conn, host_addr);
/*
* Select socket options: no delay of outgoing data for
* TCP sockets, nonblock mode, close-on-exec. Try the
* next address if any of this fails.
*/
if (!IS_AF_UNIX(addr_cur->ai_family))
{
if (!connectNoDelay(conn))
{
/* error message already created */
conn->try_next_addr = true;
goto keep_going;
}
}
if (!pg_set_noblock(conn->sock))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to nonblocking mode: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
conn->try_next_addr = true;
goto keep_going;
}
#ifdef F_SETFD
if (fcntl(conn->sock, F_SETFD, FD_CLOEXEC) == -1)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to close-on-exec mode: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
conn->try_next_addr = true;
goto keep_going;
}
#endif /* F_SETFD */
if (!IS_AF_UNIX(addr_cur->ai_family))
{
#ifndef WIN32
int on = 1;
#endif
int usekeepalives = useKeepalives(conn);
int err = 0;
if (usekeepalives < 0)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("keepalives parameter must be an integer\n"));
err = 1;
}
else if (usekeepalives == 0)
{
/* Do nothing */
}
#ifndef WIN32
else if (setsockopt(conn->sock,
SOL_SOCKET, SO_KEEPALIVE,
(char *) &on, sizeof(on)) < 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("%s(%s) failed: %s\n"),
"setsockopt",
"SO_KEEPALIVE",
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
err = 1;
}
else if (!setKeepalivesIdle(conn)
|| !setKeepalivesInterval(conn)
|| !setKeepalivesCount(conn))
err = 1;
#else /* WIN32 */
#ifdef SIO_KEEPALIVE_VALS
else if (!setKeepalivesWin32(conn))
err = 1;
#endif /* SIO_KEEPALIVE_VALS */
#endif /* WIN32 */
else if (!setTCPUserTimeout(conn))
err = 1;
if (err)
{
conn->try_next_addr = true;
goto keep_going;
}
}
/*----------
* We have three methods of blocking SIGPIPE during
* send() calls to this socket:
*
* - setsockopt(sock, SO_NOSIGPIPE)
* - send(sock, ..., MSG_NOSIGNAL)
* - setting the signal mask to SIG_IGN during send()
*
* The third method requires three syscalls per send,
* so we prefer either of the first two, but they are
* less portable. The state is tracked in the following
* members of PGconn:
*
* conn->sigpipe_so - we have set up SO_NOSIGPIPE
* conn->sigpipe_flag - we're specifying MSG_NOSIGNAL
*
* If we can use SO_NOSIGPIPE, then set sigpipe_so here
* and we're done. Otherwise, set sigpipe_flag so that
* we will try MSG_NOSIGNAL on sends. If we get an error
* with MSG_NOSIGNAL, we'll clear that flag and revert to
* signal masking.
*----------
*/
conn->sigpipe_so = false;
#ifdef MSG_NOSIGNAL
conn->sigpipe_flag = true;
#else
conn->sigpipe_flag = false;
#endif /* MSG_NOSIGNAL */
#ifdef SO_NOSIGPIPE
optval = 1;
if (setsockopt(conn->sock, SOL_SOCKET, SO_NOSIGPIPE,
(char *) &optval, sizeof(optval)) == 0)
{
conn->sigpipe_so = true;
conn->sigpipe_flag = false;
}
#endif /* SO_NOSIGPIPE */
/*
* Start/make connection. This should not block, since we
* are in nonblock mode. If it does, well, too bad.
*/
if (connect(conn->sock, addr_cur->ai_addr,
addr_cur->ai_addrlen) < 0)
{
if (SOCK_ERRNO == EINPROGRESS ||
#ifdef WIN32
SOCK_ERRNO == EWOULDBLOCK ||
#endif
SOCK_ERRNO == EINTR)
{
/*
* This is fine - we're in non-blocking mode, and
* the connection is in progress. Tell caller to
* wait for write-ready on socket.
*/
conn->status = CONNECTION_STARTED;
return PGRES_POLLING_WRITING;
}
/* otherwise, trouble */
}
else
{
/*
* Hm, we're connected already --- seems the "nonblock
* connection" wasn't. Advance the state machine and
* go do the next stuff.
*/
conn->status = CONNECTION_STARTED;
goto keep_going;
}
/*
* This connection failed. Add the error report to
* conn->errorMessage, then try the next address if any.
*/
connectFailureMessage(conn, SOCK_ERRNO);
conn->try_next_addr = true;
goto keep_going;
}
}
case CONNECTION_STARTED:
{
ACCEPT_TYPE_ARG3 optlen = sizeof(optval);
/*
* Write ready, since we've made it here, so the connection
* has been made ... or has failed.
*/
/*
* Now check (using getsockopt) that there is not an error
* state waiting for us on the socket.
*/
if (getsockopt(conn->sock, SOL_SOCKET, SO_ERROR,
(char *) &optval, &optlen) == -1)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get socket error status: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
else if (optval != 0)
{
/*
* When using a nonblocking connect, we will typically see
* connect failures at this point, so provide a friendly
* error message.
*/
connectFailureMessage(conn, optval);
/*
* Try the next address if any, just as in the case where
* connect() returned failure immediately.
*/
conn->try_next_addr = true;
goto keep_going;
}
/* Fill in the client address */
conn->laddr.salen = sizeof(conn->laddr.addr);
if (getsockname(conn->sock,
(struct sockaddr *) &conn->laddr.addr,
&conn->laddr.salen) < 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get client address from socket: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/*
* Make sure we can write before advancing to next step.
*/
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
case CONNECTION_MADE:
{
char *startpacket;
int packetlen;
/*
* Implement requirepeer check, if requested and it's a
* Unix-domain socket.
*/
if (conn->requirepeer && conn->requirepeer[0] &&
IS_AF_UNIX(conn->raddr.addr.ss_family))
{
#ifndef WIN32
char pwdbuf[BUFSIZ];
struct passwd pass_buf;
struct passwd *pass;
int passerr;
#endif
uid_t uid;
gid_t gid;
errno = 0;
if (getpeereid(conn->sock, &uid, &gid) != 0)
{
/*
* Provide special error message if getpeereid is a
* stub
*/
if (errno == ENOSYS)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("requirepeer parameter is not supported on this platform\n"));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get peer credentials: %s\n"),
strerror_r(errno, sebuf, sizeof(sebuf)));
goto error_return;
}
#ifndef WIN32
passerr = pqGetpwuid(uid, &pass_buf, pwdbuf, sizeof(pwdbuf), &pass);
if (pass == NULL)
{
if (passerr != 0)
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not look up local user ID %d: %s\n"),
(int) uid,
strerror_r(passerr, sebuf, sizeof(sebuf)));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("local user with ID %d does not exist\n"),
(int) uid);
goto error_return;
}
if (strcmp(pass->pw_name, conn->requirepeer) != 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n"),
conn->requirepeer, pass->pw_name);
goto error_return;
}
#else /* WIN32 */
/* should have failed with ENOSYS above */
Assert(false);
#endif /* WIN32 */
}
if (IS_AF_UNIX(conn->raddr.addr.ss_family))
{
/* Don't request SSL or GSSAPI over Unix sockets */
#ifdef USE_SSL
conn->allow_ssl_try = false;
#endif
#ifdef ENABLE_GSS
conn->try_gss = false;
#endif
}
#ifdef ENABLE_GSS
/*
* If GSSAPI encryption is enabled, then call
* pg_GSS_have_cred_cache() which will return true if we can
* acquire credentials (and give us a handle to use in
* conn->gcred), and then send a packet to the server asking
* for GSSAPI Encryption (and skip past SSL negotiation and
* regular startup below).
*/
if (conn->try_gss && !conn->gctx)
conn->try_gss = pg_GSS_have_cred_cache(&conn->gcred);
if (conn->try_gss && !conn->gctx)
{
ProtocolVersion pv = pg_hton32(NEGOTIATE_GSS_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send GSSAPI negotiation packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
conn->status = CONNECTION_GSS_STARTUP;
return PGRES_POLLING_READING;
}
else if (!conn->gctx && conn->gssencmode[0] == 'r')
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("GSSAPI encryption required but was impossible (possibly no credential cache, no server support, or using a local socket)\n"));
goto error_return;
}
#endif
#ifdef USE_SSL
/*
* Enable the libcrypto callbacks before checking if SSL needs
* to be done. This is done before sending the startup packet
* as depending on the type of authentication done, like MD5
* or SCRAM that use cryptohashes, the callbacks would be
* required even without a SSL connection
*/
if (pqsecure_initialize(conn, false, true) < 0)
goto error_return;
/*
* If SSL is enabled and we haven't already got encryption of
* some sort running, request SSL instead of sending the
* startup message.
*/
if (conn->allow_ssl_try && !conn->wait_ssl_try &&
!conn->ssl_in_use
#ifdef ENABLE_GSS
&& !conn->gssenc
#endif
)
{
ProtocolVersion pv;
/*
* Send the SSL request packet.
*
* Theoretically, this could block, but it really
* shouldn't since we only got here if the socket is
* write-ready.
*/
pv = pg_hton32(NEGOTIATE_SSL_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send SSL negotiation packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
conn->status = CONNECTION_SSL_STARTUP;
return PGRES_POLLING_READING;
}
#endif /* USE_SSL */
/*
* Build the startup packet.
*/
startpacket = pqBuildStartupPacket3(conn, &packetlen,
EnvironmentOptions);
if (!startpacket)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("out of memory\n"));
goto error_return;
}
/*
* Send the startup packet.
*
* Theoretically, this could block, but it really shouldn't
* since we only got here if the socket is write-ready.
*/
if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send startup packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
free(startpacket);
goto error_return;
}
free(startpacket);
conn->status = CONNECTION_AWAITING_RESPONSE;
return PGRES_POLLING_READING;
}
/*
* Handle SSL negotiation: wait for postmaster messages and
* respond as necessary.
*/
case CONNECTION_SSL_STARTUP:
{
#ifdef USE_SSL
PostgresPollingStatusType pollres;
/*
* On first time through, get the postmaster's response to our
* SSL negotiation packet.
*/
if (!conn->ssl_in_use)
{
/*
* We use pqReadData here since it has the logic to
* distinguish no-data-yet from connection closure. Since
* conn->ssl isn't set, a plain recv() will occur.
*/
char SSLok;
int rdresult;
rdresult = pqReadData(conn);
if (rdresult < 0)
{
/* errorMessage is already filled in */
goto error_return;
}
if (rdresult == 0)
{
/* caller failed to wait for data */
return PGRES_POLLING_READING;
}
if (pqGetc(&SSLok, conn) < 0)
{
/* should not happen really */
return PGRES_POLLING_READING;
}
if (SSLok == 'S')
{
/* mark byte consumed */
conn->inStart = conn->inCursor;
/*
* Set up global SSL state if required. The crypto
* state has already been set if libpq took care of
* doing that, so there is no need to make that happen
* again.
*/
if (pqsecure_initialize(conn, true, false) != 0)
goto error_return;
}
else if (SSLok == 'N')
{
/* mark byte consumed */
conn->inStart = conn->inCursor;
/* OK to do without SSL? */
if (conn->sslmode[0] == 'r' || /* "require" */
conn->sslmode[0] == 'v') /* "verify-ca" or
* "verify-full" */
{
/* Require SSL, but server does not want it */
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server does not support SSL, but SSL was required\n"));
goto error_return;
}
/* Otherwise, proceed with normal startup */
conn->allow_ssl_try = false;
/* We can proceed using this connection */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (SSLok == 'E')
{
/*
* Server failure of some sort, such as failure to
* fork a backend process. We need to process and
* report the error message, which might be formatted
* according to either protocol 2 or protocol 3.
* Rather than duplicate the code for that, we flip
* into AWAITING_RESPONSE state and let the code there
* deal with it. Note we have *not* consumed the "E"
* byte here.
*/
conn->status = CONNECTION_AWAITING_RESPONSE;
goto keep_going;
}
else
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("received invalid response to SSL negotiation: %c\n"),
SSLok);
goto error_return;
}
}
/*
* Begin or continue the SSL negotiation process.
*/
pollres = pqsecure_open_client(conn);
if (pollres == PGRES_POLLING_OK)
{
/*
* At this point we should have no data already buffered.
* If we do, it was received before we performed the SSL
* handshake, so it wasn't encrypted and indeed may have
* been injected by a man-in-the-middle.
*/
if (conn->inCursor != conn->inEnd)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("received unencrypted data after SSL response\n"));
goto error_return;
}
/* SSL handshake done, ready to send startup packet */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
if (pollres == PGRES_POLLING_FAILED)
{
/*
* Failed ... if sslmode is "prefer" then do a non-SSL
* retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->allow_ssl_try /* redundant? */
&& !conn->wait_ssl_try) /* redundant? */
{
/* only retry once */
conn->allow_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
/* Else it's a hard failure */
goto error_return;
}
/* Else, return POLLING_READING or POLLING_WRITING status */
return pollres;
#else /* !USE_SSL */
/* can't get here */
goto error_return;
#endif /* USE_SSL */
}
case CONNECTION_GSS_STARTUP:
{
#ifdef ENABLE_GSS
PostgresPollingStatusType pollres;
/*
* If we haven't yet, get the postmaster's response to our
* negotiation packet
*/
if (conn->try_gss && !conn->gctx)
{
char gss_ok;
int rdresult = pqReadData(conn);
if (rdresult < 0)
/* pqReadData fills in error message */
goto error_return;
else if (rdresult == 0)
/* caller failed to wait for data */
return PGRES_POLLING_READING;
if (pqGetc(&gss_ok, conn) < 0)
/* shouldn't happen... */
return PGRES_POLLING_READING;
if (gss_ok == 'E')
{
/*
* Server failure of some sort. Assume it's a
* protocol version support failure, and let's see if
* we can't recover (if it's not, we'll get a better
* error message on retry). Server gets fussy if we
* don't hang up the socket, though.
*/
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
/* mark byte consumed */
conn->inStart = conn->inCursor;
if (gss_ok == 'N')
{
/* Server doesn't want GSSAPI; fall back if we can */
if (conn->gssencmode[0] == 'r')
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server doesn't support GSSAPI encryption, but it was required\n"));
goto error_return;
}
conn->try_gss = false;
/* We can proceed using this connection */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (gss_ok != 'G')
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("received invalid response to GSSAPI negotiation: %c\n"),
gss_ok);
goto error_return;
}
}
/* Begin or continue GSSAPI negotiation */
pollres = pqsecure_open_gss(conn);
if (pollres == PGRES_POLLING_OK)
{
/*
* At this point we should have no data already buffered.
* If we do, it was received before we performed the GSS
* handshake, so it wasn't encrypted and indeed may have
* been injected by a man-in-the-middle.
*/
if (conn->inCursor != conn->inEnd)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("received unencrypted data after GSSAPI encryption response\n"));
goto error_return;
}
/* All set for startup packet */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (pollres == PGRES_POLLING_FAILED &&
conn->gssencmode[0] == 'p')
{
/*
* We failed, but we can retry on "prefer". Have to drop
* the current connection to do so, though.
*/
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
return pollres;
#else /* !ENABLE_GSS */
/* unreachable */
goto error_return;
#endif /* ENABLE_GSS */
}
/*
* Handle authentication exchange: wait for postmaster messages
* and respond as necessary.
*/
case CONNECTION_AWAITING_RESPONSE:
{
char beresp;
int msgLength;
int avail;
AuthRequest areq;
int res;
/*
* Scan the message from current point (note that if we find
* the message is incomplete, we will return without advancing
* inStart, and resume here next time).
*/
conn->inCursor = conn->inStart;
/* Read type byte */
if (pqGetc(&beresp, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/*
* Validate message type: we expect only an authentication
* request or an error here. Anything else probably means
* it's not Postgres on the other end at all.
*/
if (!(beresp == 'R' || beresp == 'E'))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("expected authentication request from server, but received %c\n"),
beresp);
goto error_return;
}
/* Read message length word */
if (pqGetInt(&msgLength, 4, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/*
* Try to validate message length before using it.
* Authentication requests can't be very large, although GSS
* auth requests may not be that small. Errors can be a
* little larger, but not huge. If we see a large apparent
* length in an error, it means we're really talking to a
* pre-3.0-protocol server; cope. (Before version 14, the
* server also used the old protocol for errors that happened
* before processing the startup packet.)
*/
if (beresp == 'R' && (msgLength < 8 || msgLength > 2000))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("expected authentication request from server, but received %c\n"),
beresp);
goto error_return;
}
if (beresp == 'E' && (msgLength < 8 || msgLength > 30000))
{
/* Handle error from a pre-3.0 server */
conn->inCursor = conn->inStart + 1; /* reread data */
if (pqGets_append(&conn->errorMessage, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* OK, we read the message; mark data consumed */
conn->inStart = conn->inCursor;
/*
* Before 7.2, the postmaster didn't always end its
* messages with a newline, so add one if needed to
* conform to libpq conventions.
*/
if (conn->errorMessage.len == 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
{
appendPQExpBufferChar(&conn->errorMessage, '\n');
}
goto error_return;
}
/*
* Can't process if message body isn't all here yet.
*/
msgLength -= 4;
avail = conn->inEnd - conn->inCursor;
if (avail < msgLength)
{
/*
* Before returning, try to enlarge the input buffer if
* needed to hold the whole message; see notes in
* pqParseInput3.
*/
if (pqCheckInBufferSpace(conn->inCursor + (size_t) msgLength,
conn))
goto error_return;
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* Handle errors. */
if (beresp == 'E')
{
if (pqGetErrorNotice3(conn, true))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* OK, we read the message; mark data consumed */
conn->inStart = conn->inCursor;
/*
* If error is "cannot connect now", try the next host if
* any (but we don't want to consider additional addresses
* for this host, nor is there much point in changing SSL
* or GSS mode). This is helpful when dealing with
* standby servers that might not be in hot-standby state.
*/
if (strcmp(conn->last_sqlstate,
ERRCODE_CANNOT_CONNECT_NOW) == 0)
{
conn->try_next_host = true;
goto keep_going;
}
/* Check to see if we should mention pgpassfile */
pgpassfileWarning(conn);
#ifdef ENABLE_GSS
/*
* If gssencmode is "prefer" and we're using GSSAPI, retry
* without it.
*/
if (conn->gssenc && conn->gssencmode[0] == 'p')
{
/* only retry once */
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
#endif
#ifdef USE_SSL
/*
* if sslmode is "allow" and we haven't tried an SSL
* connection already, then retry with an SSL connection
*/
if (conn->sslmode[0] == 'a' /* "allow" */
&& !conn->ssl_in_use
&& conn->allow_ssl_try
&& conn->wait_ssl_try)
{
/* only retry once */
conn->wait_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
/*
* if sslmode is "prefer" and we're in an SSL connection,
* then do a non-SSL retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->ssl_in_use
&& conn->allow_ssl_try /* redundant? */
&& !conn->wait_ssl_try) /* redundant? */
{
/* only retry once */
conn->allow_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
#endif
goto error_return;
}
/* It is an authentication request. */
conn->auth_req_received = true;
/* Get the type of request. */
if (pqGetInt((int *) &areq, 4, conn))
{
/* We'll come back when there are more data */
return PGRES_POLLING_READING;
}
msgLength -= 4;
/*
* Process the rest of the authentication request message, and
* respond to it if necessary.
*
* Note that conn->pghost must be non-NULL if we are going to
* avoid the Kerberos code doing a hostname look-up.
*/
res = pg_fe_sendauth(areq, msgLength, conn);
/* OK, we have processed the message; mark data consumed */
conn->inStart = conn->inCursor;
if (res != STATUS_OK)
goto error_return;
/*
* Just make sure that any data sent by pg_fe_sendauth is
* flushed out. Although this theoretically could block, it
* really shouldn't since we don't send large auth responses.
*/
if (pqFlush(conn))
goto error_return;
if (areq == AUTH_REQ_OK)
{
/* We are done with authentication exchange */
conn->status = CONNECTION_AUTH_OK;
/*
* Set asyncStatus so that PQgetResult will think that
* what comes back next is the result of a query. See
* below.
*/
conn->asyncStatus = PGASYNC_BUSY;
}
/* Look to see if we have more data yet. */
goto keep_going;
}
case CONNECTION_AUTH_OK:
{
/*
* Now we expect to hear from the backend. A ReadyForQuery
* message indicates that startup is successful, but we might
* also get an Error message indicating failure. (Notice
* messages indicating nonfatal warnings are also allowed by
* the protocol, as are ParameterStatus and BackendKeyData
* messages.) Easiest way to handle this is to let
* PQgetResult() read the messages. We just have to fake it
* out about the state of the connection, by setting
* asyncStatus = PGASYNC_BUSY (done above).
*/
if (PQisBusy(conn))
return PGRES_POLLING_READING;
res = PQgetResult(conn);
/*
* NULL return indicating we have gone to IDLE state is
* expected
*/
if (res)
{
if (res->resultStatus != PGRES_FATAL_ERROR)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("unexpected message from server during startup\n"));
else if (conn->send_appname &&
(conn->appname || conn->fbappname))
{
/*
* If we tried to send application_name, check to see
* if the error is about that --- pre-9.0 servers will
* reject it at this stage of the process. If so,
* close the connection and retry without sending
* application_name. We could possibly get a false
* SQLSTATE match here and retry uselessly, but there
* seems no great harm in that; we'll just get the
* same error again if it's unrelated.
*/
const char *sqlstate;
sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
if (sqlstate &&
strcmp(sqlstate, ERRCODE_APPNAME_UNKNOWN) == 0)
{
PQclear(res);
conn->send_appname = false;
need_new_connection = true;
goto keep_going;
}
}
/*
* if the resultStatus is FATAL, then conn->errorMessage
* already has a copy of the error; needn't copy it back.
* But add a newline if it's not there already, since
* postmaster error messages may not have one.
*/
if (conn->errorMessage.len <= 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
appendPQExpBufferChar(&conn->errorMessage, '\n');
PQclear(res);
goto error_return;
}
/* Almost there now ... */
conn->status = CONNECTION_CHECK_TARGET;
goto keep_going;
}
case CONNECTION_CHECK_TARGET:
{
/*
* If a read-write, read-only, primary, or standby connection
* is required, see if we have one.
*/
if (conn->target_server_type == SERVER_TYPE_READ_WRITE ||
conn->target_server_type == SERVER_TYPE_READ_ONLY)
{
bool read_only_server;
/*
* If the server didn't report
* "default_transaction_read_only" or "in_hot_standby" at
* startup, we must determine its state by sending the
* query "SHOW transaction_read_only". This GUC exists in
* all server versions that support 3.0 protocol.
*/
if (conn->default_transaction_read_only == PG_BOOL_UNKNOWN ||
conn->in_hot_standby == PG_BOOL_UNKNOWN)
{
/*
* We use PQsendQueryContinue so that
* conn->errorMessage does not get cleared. We need
* to preserve any error messages related to previous
* hosts we have tried and failed to connect to.
*/
conn->status = CONNECTION_OK;
if (!PQsendQueryContinue(conn,
"SHOW transaction_read_only"))
goto error_return;
/* We'll return to this state when we have the answer */
conn->status = CONNECTION_CHECK_WRITABLE;
return PGRES_POLLING_READING;
}
/* OK, we can make the test */
read_only_server =
(conn->default_transaction_read_only == PG_BOOL_YES ||
conn->in_hot_standby == PG_BOOL_YES);
if ((conn->target_server_type == SERVER_TYPE_READ_WRITE) ?
read_only_server : !read_only_server)
{
/* Wrong server state, reject and try the next host */
if (conn->target_server_type == SERVER_TYPE_READ_WRITE)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("session is read-only\n"));
else
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("session is not read-only\n"));
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/*
* Try next host if any, but we don't want to consider
* additional addresses for this host.
*/
conn->try_next_host = true;
goto keep_going;
}
}
else if (conn->target_server_type == SERVER_TYPE_PRIMARY ||
conn->target_server_type == SERVER_TYPE_STANDBY ||
conn->target_server_type == SERVER_TYPE_PREFER_STANDBY)
{
/*
* If the server didn't report "in_hot_standby" at
* startup, we must determine its state by sending the
* query "SELECT pg_catalog.pg_is_in_recovery()". Servers
* before 9.0 don't have that function, but by the same
* token they don't have any standby mode, so we may just
* assume the result.
*/
if (conn->sversion < 90000)
conn->in_hot_standby = PG_BOOL_NO;
if (conn->in_hot_standby == PG_BOOL_UNKNOWN)
{
/*
* We use PQsendQueryContinue so that
* conn->errorMessage does not get cleared. We need
* to preserve any error messages related to previous
* hosts we have tried and failed to connect to.
*/
conn->status = CONNECTION_OK;
if (!PQsendQueryContinue(conn,
"SELECT pg_catalog.pg_is_in_recovery()"))
goto error_return;
/* We'll return to this state when we have the answer */
conn->status = CONNECTION_CHECK_STANDBY;
return PGRES_POLLING_READING;
}
/* OK, we can make the test */
if ((conn->target_server_type == SERVER_TYPE_PRIMARY) ?
(conn->in_hot_standby == PG_BOOL_YES) :
(conn->in_hot_standby == PG_BOOL_NO))
{
/* Wrong server state, reject and try the next host */
if (conn->target_server_type == SERVER_TYPE_PRIMARY)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server is in hot standby mode\n"));
else
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server is not in hot standby mode\n"));
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/*
* Try next host if any, but we don't want to consider
* additional addresses for this host.
*/
conn->try_next_host = true;
goto keep_going;
}
}
/* We can release the address list now. */
release_conn_addrinfo(conn);
/*
* Contents of conn->errorMessage are no longer interesting
* (and it seems some clients expect it to be empty after a
* successful connection).
*/
resetPQExpBuffer(&conn->errorMessage);
/* We are open for business! */
conn->status = CONNECTION_OK;
return PGRES_POLLING_OK;
}
case CONNECTION_CONSUME:
{
/*
* This state just makes sure the connection is idle after
* we've obtained the result of a SHOW or SELECT query. Once
* we're clear, return to CONNECTION_CHECK_TARGET state to
* decide what to do next. We must transiently set status =
* CONNECTION_OK in order to use the result-consuming
* subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CONSUME;
return PGRES_POLLING_READING;
}
/* Call PQgetResult() again until we get a NULL result */
res = PQgetResult(conn);
if (res != NULL)
{
PQclear(res);
conn->status = CONNECTION_CONSUME;
return PGRES_POLLING_READING;
}
conn->status = CONNECTION_CHECK_TARGET;
goto keep_going;
}
case CONNECTION_CHECK_WRITABLE:
{
/*
* Waiting for result of "SHOW transaction_read_only". We
* must transiently set status = CONNECTION_OK in order to use
* the result-consuming subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CHECK_WRITABLE;
return PGRES_POLLING_READING;
}
res = PQgetResult(conn);
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
PQntuples(res) == 1)
{
char *val = PQgetvalue(res, 0, 0);
/*
* "transaction_read_only = on" proves that at least one
* of default_transaction_read_only and in_hot_standby is
* on, but we don't actually know which. We don't care
* though for the purpose of identifying a read-only
* session, so satisfy the CONNECTION_CHECK_TARGET code by
* claiming they are both on. On the other hand, if it's
* a read-write session, they are certainly both off.
*/
if (strncmp(val, "on", 2) == 0)
{
conn->default_transaction_read_only = PG_BOOL_YES;
conn->in_hot_standby = PG_BOOL_YES;
}
else
{
conn->default_transaction_read_only = PG_BOOL_NO;
conn->in_hot_standby = PG_BOOL_NO;
}
PQclear(res);
/* Finish reading messages before continuing */
conn->status = CONNECTION_CONSUME;
goto keep_going;
}
/* Something went wrong with "SHOW transaction_read_only". */
if (res)
PQclear(res);
/* Append error report to conn->errorMessage. */
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("\"%s\" failed\n"),
"SHOW transaction_read_only");
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/* Try next host. */
conn->try_next_host = true;
goto keep_going;
}
case CONNECTION_CHECK_STANDBY:
{
/*
* Waiting for result of "SELECT pg_is_in_recovery()". We
* must transiently set status = CONNECTION_OK in order to use
* the result-consuming subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CHECK_STANDBY;
return PGRES_POLLING_READING;
}
res = PQgetResult(conn);
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
PQntuples(res) == 1)
{
char *val = PQgetvalue(res, 0, 0);
if (strncmp(val, "t", 1) == 0)
conn->in_hot_standby = PG_BOOL_YES;
else
conn->in_hot_standby = PG_BOOL_NO;
PQclear(res);
/* Finish reading messages before continuing */
conn->status = CONNECTION_CONSUME;
goto keep_going;
}
/* Something went wrong with "SELECT pg_is_in_recovery()". */
if (res)
PQclear(res);
/* Append error report to conn->errorMessage. */
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("\"%s\" failed\n"),
"SELECT pg_is_in_recovery()");
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/* Try next host. */
conn->try_next_host = true;
goto keep_going;
}
default:
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid connection state %d, "
"probably indicative of memory corruption\n"),
conn->status);
goto error_return;
}
/* Unreachable */
error_return:
/*
* We used to close the socket at this point, but that makes it awkward
* for those above us if they wish to remove this socket from their own
* records (an fd_set for example). We'll just have this socket closed
* when PQfinish is called (which is compulsory even after an error, since
* the connection structure must be freed).
*/
conn->status = CONNECTION_BAD;
return PGRES_POLLING_FAILED;
}
|
278272236215939964516135269002509515902
|
fe-connect.c
|
18951971958069127999109334280610413251
|
CWE-522
|
CVE-2021-23222
|
A man-in-the-middle attacker can inject false responses to the client's first few queries, despite the use of SSL certificate verification and encryption.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-23222
|
195,389
|
tensorflow
|
c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
|
Remove a `DCHECK`-fail, log an error instead.
`DCHECK` in debug mode results in crashes. TensorFlow has had multiple vulnerabilities due to this.
Outside of debug mode, `DCHECK` is a no-op.
A better alternative is to report an error to the log buffer and continue. This should happen both in debug mode and in prod mode.
PiperOrigin-RevId: 408375925
Change-Id: Id5b3e19c73f3fbe0cc4bba26ca44ff9607bb6356
| 1
|
bool RepeatedAttrDefEqual(
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
std::unordered_map<string, const OpDef::AttrDef*> a1_set;
for (const OpDef::AttrDef& def : a1) {
DCHECK(a1_set.find(def.name()) == a1_set.end())
<< "AttrDef names must be unique, but '" << def.name()
<< "' appears more than once";
a1_set[def.name()] = &def;
}
for (const OpDef::AttrDef& def : a2) {
auto iter = a1_set.find(def.name());
if (iter == a1_set.end()) return false;
if (!AttrDefEqual(*iter->second, def)) return false;
a1_set.erase(iter);
}
if (!a1_set.empty()) return false;
return true;
}
|
228350956694349821922378909162368693155
|
op_def_util.cc
|
43202597261631718571985626227626810269
|
CWE-617
|
CVE-2022-23565
|
Tensorflow is an Open Source Machine Learning Framework. An attacker can trigger denial of service via assertion failure by altering a `SavedModel` on disk such that `AttrDef`s of some operation are duplicated. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23565
|
225,086
|
tensorflow
|
c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
|
https://github.com/tensorflow/tensorflow
|
https://github.com/tensorflow/tensorflow/commit/c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
|
Remove a `DCHECK`-fail, log an error instead.
`DCHECK` in debug mode results in crashes. TensorFlow has had multiple vulnerabilities due to this.
Outside of debug mode, `DCHECK` is a no-op.
A better alternative is to report an error to the log buffer and continue. This should happen both in debug mode and in prod mode.
PiperOrigin-RevId: 408375925
Change-Id: Id5b3e19c73f3fbe0cc4bba26ca44ff9607bb6356
| 0
|
bool RepeatedAttrDefEqual(
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
std::unordered_map<string, const OpDef::AttrDef*> a1_set;
for (const OpDef::AttrDef& def : a1) {
if (a1_set.find(def.name()) != a1_set.end()) {
LOG(ERROR) << "AttrDef names must be unique, but '" << def.name()
<< "' appears more than once";
}
a1_set[def.name()] = &def;
}
for (const OpDef::AttrDef& def : a2) {
auto iter = a1_set.find(def.name());
if (iter == a1_set.end()) return false;
if (!AttrDefEqual(*iter->second, def)) return false;
a1_set.erase(iter);
}
if (!a1_set.empty()) return false;
return true;
}
|
7221108948147885063916901261116103162
|
op_def_util.cc
|
99670691263177784698689977477403265008
|
CWE-617
|
CVE-2022-23565
|
Tensorflow is an Open Source Machine Learning Framework. An attacker can trigger denial of service via assertion failure by altering a `SavedModel` on disk such that `AttrDef`s of some operation are duplicated. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-23565
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.