Apply pylint to build/soong/bloaty
1. Run black --line-length 80 -S build/soong/bloaty to fix formatting 2. Annotate # pylint: disable=import-error to skip checks for imports. The imports are provided by Soong during m <target> Test: m bloaty_merger_test Test: pylint --rcfile tools/repohooks/tools/pylintrc build/soong/bloaty/bloaty_merger.py build/soong/bloaty/bloaty_merger_test.py Bug: 195738175 Change-Id: I4579a80203de41d48992424f264dd1cdbafc854c
This commit is contained in:
parent
eaf5e1b3ec
commit
16c2b8c3d3
2 changed files with 78 additions and 72 deletions
|
@ -24,58 +24,63 @@ import argparse
|
||||||
import csv
|
import csv
|
||||||
import gzip
|
import gzip
|
||||||
|
|
||||||
|
# pylint: disable=import-error
|
||||||
import ninja_rsp
|
import ninja_rsp
|
||||||
|
|
||||||
import file_sections_pb2
|
import file_sections_pb2
|
||||||
|
|
||||||
BLOATY_EXTENSION = ".bloaty.csv"
|
BLOATY_EXTENSION = ".bloaty.csv"
|
||||||
|
|
||||||
|
|
||||||
def parse_csv(path):
|
def parse_csv(path):
|
||||||
"""Parses a Bloaty-generated CSV file into a protobuf.
|
"""Parses a Bloaty-generated CSV file into a protobuf.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
path: The filepath to the CSV file, relative to $ANDROID_TOP.
|
path: The filepath to the CSV file, relative to $ANDROID_TOP.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A file_sections_pb2.File if the file was found; None otherwise.
|
||||||
|
"""
|
||||||
|
file_proto = None
|
||||||
|
with open(path, newline='') as csv_file:
|
||||||
|
file_proto = file_sections_pb2.File()
|
||||||
|
if path.endswith(BLOATY_EXTENSION):
|
||||||
|
file_proto.path = path[: -len(BLOATY_EXTENSION)]
|
||||||
|
section_reader = csv.DictReader(csv_file)
|
||||||
|
for row in section_reader:
|
||||||
|
section = file_proto.sections.add()
|
||||||
|
section.name = row["sections"]
|
||||||
|
section.vm_size = int(row["vmsize"])
|
||||||
|
section.file_size = int(row["filesize"])
|
||||||
|
return file_proto
|
||||||
|
|
||||||
Returns:
|
|
||||||
A file_sections_pb2.File if the file was found; None otherwise.
|
|
||||||
"""
|
|
||||||
file_proto = None
|
|
||||||
with open(path, newline='') as csv_file:
|
|
||||||
file_proto = file_sections_pb2.File()
|
|
||||||
if path.endswith(BLOATY_EXTENSION):
|
|
||||||
file_proto.path = path[:-len(BLOATY_EXTENSION)]
|
|
||||||
section_reader = csv.DictReader(csv_file)
|
|
||||||
for row in section_reader:
|
|
||||||
section = file_proto.sections.add()
|
|
||||||
section.name = row["sections"]
|
|
||||||
section.vm_size = int(row["vmsize"])
|
|
||||||
section.file_size = int(row["filesize"])
|
|
||||||
return file_proto
|
|
||||||
|
|
||||||
def create_file_size_metrics(input_list, output_proto):
|
def create_file_size_metrics(input_list, output_proto):
|
||||||
"""Creates a FileSizeMetrics proto from a list of CSV files.
|
"""Creates a FileSizeMetrics proto from a list of CSV files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_list: The path to the file which contains the list of CSV files.
|
||||||
|
Each filepath is separated by a space.
|
||||||
|
output_proto: The path for the output protobuf. It will be compressed
|
||||||
|
using gzip.
|
||||||
|
"""
|
||||||
|
metrics = file_sections_pb2.FileSizeMetrics()
|
||||||
|
reader = ninja_rsp.NinjaRspFileReader(input_list)
|
||||||
|
for csv_path in reader:
|
||||||
|
file_proto = parse_csv(csv_path)
|
||||||
|
if file_proto:
|
||||||
|
metrics.files.append(file_proto)
|
||||||
|
with gzip.open(output_proto, "wb") as output:
|
||||||
|
output.write(metrics.SerializeToString())
|
||||||
|
|
||||||
Args:
|
|
||||||
input_list: The path to the file which contains the list of CSV files. Each
|
|
||||||
filepath is separated by a space.
|
|
||||||
output_proto: The path for the output protobuf. It will be compressed using
|
|
||||||
gzip.
|
|
||||||
"""
|
|
||||||
metrics = file_sections_pb2.FileSizeMetrics()
|
|
||||||
reader = ninja_rsp.NinjaRspFileReader(input_list)
|
|
||||||
for csv_path in reader:
|
|
||||||
file_proto = parse_csv(csv_path)
|
|
||||||
if file_proto:
|
|
||||||
metrics.files.append(file_proto)
|
|
||||||
with gzip.open(output_proto, "wb") as output:
|
|
||||||
output.write(metrics.SerializeToString())
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("input_list_file", help="List of bloaty csv files.")
|
parser.add_argument("input_list_file", help="List of bloaty csv files.")
|
||||||
parser.add_argument("output_proto", help="Output proto.")
|
parser.add_argument("output_proto", help="Output proto.")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
create_file_size_metrics(args.input_list_file, args.output_proto)
|
create_file_size_metrics(args.input_list_file, args.output_proto)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
import gzip
|
import gzip
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
|
# pylint: disable=import-error
|
||||||
from pyfakefs import fake_filesystem_unittest
|
from pyfakefs import fake_filesystem_unittest
|
||||||
|
|
||||||
import bloaty_merger
|
import bloaty_merger
|
||||||
|
@ -21,46 +22,46 @@ import file_sections_pb2
|
||||||
|
|
||||||
|
|
||||||
class BloatyMergerTestCase(fake_filesystem_unittest.TestCase):
|
class BloatyMergerTestCase(fake_filesystem_unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.setUpPyfakefs()
|
self.setUpPyfakefs()
|
||||||
|
|
||||||
def test_parse_csv(self):
|
def test_parse_csv(self):
|
||||||
csv_content = "sections,vmsize,filesize\nsection1,2,3\n"
|
csv_content = "sections,vmsize,filesize\nsection1,2,3\n"
|
||||||
self.fs.create_file("file1.bloaty.csv", contents=csv_content)
|
self.fs.create_file("file1.bloaty.csv", contents=csv_content)
|
||||||
pb = bloaty_merger.parse_csv("file1.bloaty.csv")
|
pb = bloaty_merger.parse_csv("file1.bloaty.csv")
|
||||||
self.assertEqual(pb.path, "file1")
|
self.assertEqual(pb.path, "file1")
|
||||||
self.assertEqual(len(pb.sections), 1)
|
self.assertEqual(len(pb.sections), 1)
|
||||||
s = pb.sections[0]
|
s = pb.sections[0]
|
||||||
self.assertEqual(s.name, "section1")
|
self.assertEqual(s.name, "section1")
|
||||||
self.assertEqual(s.vm_size, 2)
|
self.assertEqual(s.vm_size, 2)
|
||||||
self.assertEqual(s.file_size, 3)
|
self.assertEqual(s.file_size, 3)
|
||||||
|
|
||||||
def test_missing_file(self):
|
def test_missing_file(self):
|
||||||
with self.assertRaises(FileNotFoundError):
|
with self.assertRaises(FileNotFoundError):
|
||||||
bloaty_merger.parse_csv("missing.bloaty.csv")
|
bloaty_merger.parse_csv("missing.bloaty.csv")
|
||||||
|
|
||||||
def test_malformed_csv(self):
|
def test_malformed_csv(self):
|
||||||
csv_content = "header1,heaVder2,header3\n4,5,6\n"
|
csv_content = "header1,heaVder2,header3\n4,5,6\n"
|
||||||
self.fs.create_file("file1.bloaty.csv", contents=csv_content)
|
self.fs.create_file("file1.bloaty.csv", contents=csv_content)
|
||||||
with self.assertRaises(KeyError):
|
with self.assertRaises(KeyError):
|
||||||
bloaty_merger.parse_csv("file1.bloaty.csv")
|
bloaty_merger.parse_csv("file1.bloaty.csv")
|
||||||
|
|
||||||
def test_create_file_metrics(self):
|
def test_create_file_metrics(self):
|
||||||
file_list = "file1.bloaty.csv file2.bloaty.csv"
|
file_list = "file1.bloaty.csv file2.bloaty.csv"
|
||||||
file1_content = "sections,vmsize,filesize\nsection1,2,3\nsection2,7,8"
|
file1_content = "sections,vmsize,filesize\nsection1,2,3\nsection2,7,8"
|
||||||
file2_content = "sections,vmsize,filesize\nsection1,4,5\n"
|
file2_content = "sections,vmsize,filesize\nsection1,4,5\n"
|
||||||
|
|
||||||
self.fs.create_file("files.lst", contents=file_list)
|
self.fs.create_file("files.lst", contents=file_list)
|
||||||
self.fs.create_file("file1.bloaty.csv", contents=file1_content)
|
self.fs.create_file("file1.bloaty.csv", contents=file1_content)
|
||||||
self.fs.create_file("file2.bloaty.csv", contents=file2_content)
|
self.fs.create_file("file2.bloaty.csv", contents=file2_content)
|
||||||
|
|
||||||
bloaty_merger.create_file_size_metrics("files.lst", "output.pb.gz")
|
bloaty_merger.create_file_size_metrics("files.lst", "output.pb.gz")
|
||||||
|
|
||||||
metrics = file_sections_pb2.FileSizeMetrics()
|
metrics = file_sections_pb2.FileSizeMetrics()
|
||||||
with gzip.open("output.pb.gz", "rb") as output:
|
with gzip.open("output.pb.gz", "rb") as output:
|
||||||
metrics.ParseFromString(output.read())
|
metrics.ParseFromString(output.read())
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
suite = unittest.TestLoader().loadTestsFromTestCase(BloatyMergerTestCase)
|
suite = unittest.TestLoader().loadTestsFromTestCase(BloatyMergerTestCase)
|
||||||
unittest.TextTestRunner(verbosity=2).run(suite)
|
unittest.TextTestRunner(verbosity=2).run(suite)
|
||||||
|
|
Loading…
Reference in a new issue