Skip to content

Commit

Permalink
Parse and extract saved Chrome Credentials (#1566)
Browse files Browse the repository at this point in the history
* Don't force password hash algo

* Initial checkin of stored Chrome Credentials analyser

* Report & summary

* Add to jobs

* Use dfimagetools for extraction

* Don't actually need the package

* YAPF

* Update dfimagetools version

* Quote fix

* Return what we have so far if exception

* Give a strategy for divergent branches

* Attempt fastforward
  • Loading branch information
Fryyyyy authored Nov 19, 2024
1 parent c778de7 commit 54b2a0b
Show file tree
Hide file tree
Showing 11 changed files with 1,220 additions and 914 deletions.
1,834 changes: 922 additions & 912 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ acstore = { version = "20240128" }
backoff = { version = ">=2.2.1" }
celery = { version = "^5.2.2" }
dfDewey = { version = "^20231016", optional = true }
dfimagetools = { version = "^20240301", optional = true }
dfimagetools = { version = "^20241116", optional = true }
docker = { version = "^6.1.3" }
fastapi = {extras = ["all"], version = ">=0.75.0,<0.99.0"}
filelock = { version = "*" }
Expand Down
Binary file added test_data/test_login_data.sqlite
Binary file not shown.
1 change: 1 addition & 0 deletions turbinia/config/recipes/all.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ globals:
jobs_allowlist:
- BinaryExtractorJob
- BulkExtractorJob
- ChromeCredsAnalysisTask
- FileSystemTimelineJob
- FsstatJob
- GrepJob
Expand Down
1 change: 1 addition & 0 deletions turbinia/jobs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"""Turbinia jobs."""
from turbinia.jobs import binary_extractor
from turbinia.jobs import bulk_extractor
from turbinia.jobs import chromecreds
from turbinia.jobs import containerd
from turbinia.jobs import dfdewey
from turbinia.jobs import docker
Expand Down
53 changes: 53 additions & 0 deletions turbinia/jobs/chromecreds.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job to execute chromecreds analysis task."""

from turbinia.evidence import ContainerdContainer
from turbinia.evidence import CompressedDirectory
from turbinia.evidence import Directory
from turbinia.evidence import DockerContainer
from turbinia.evidence import EwfDisk
from turbinia.evidence import GoogleCloudDisk
from turbinia.evidence import GoogleCloudDiskRawEmbedded
from turbinia.evidence import RawDisk
from turbinia.evidence import ReportText
from turbinia.jobs import interface
from turbinia.jobs import manager
from turbinia.workers.analysis import chromecreds


class ChromeCredsAnalysisJob(interface.TurbiniaJob):
"""Chrome Credentials analysis job."""

evidence_input = [
CompressedDirectory, ContainerdContainer, Directory, DockerContainer,
EwfDisk, GoogleCloudDisk, GoogleCloudDiskRawEmbedded, RawDisk
]
evidence_output = [ReportText]

NAME = 'ChromeCredsAnalysisJob'

def create_tasks(self, evidence):
"""Create task.
Args:
evidence: List of evidence objects to process
Returns:
A list of tasks to schedule.
"""
tasks = [chromecreds.ChromeCredsAnalysisTask() for _ in evidence]
return tasks


manager.JobsManager.RegisterJob(ChromeCredsAnalysisJob)
32 changes: 32 additions & 0 deletions turbinia/lib/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,38 @@ def extract_artifacts(artifact_names, disk_path, output_dir, credentials=[]):
return _image_export(image_export_cmd, output_dir, disk_path)


def extract_data_stream(artifact_names, disk_path, output_dir, credentials=[]):
"""Extract artifacts using extract_data_streams from dfImageTools.
Args:
artifact_names: List of artifact definition names.
disk_path: Path to either a raw disk image or a block device.
output_dir: Path to directory to store the extracted files.
credentials: List of credentials to use for decryption.
Returns:
list: paths to extracted files.
Raises:
TurbiniaException: If an error occurs when running image_export.
"""
# Expects artifact names as a comma separated string.
artifacts = ','.join(artifact_names)
export_data_stream_cmd = [
'extract_data_streams', '--artifact_filters', artifacts, '-t', output_dir,
'--partitions', 'all', '--volumes', 'all'
]

if credentials:
for credential_type, credential_data in credentials:
export_data_stream_cmd.extend(
['--credential', f'{credential_type:s}:{credential_data:s}'])

export_data_stream_cmd.append(disk_path)

return _image_export(export_data_stream_cmd, output_dir, disk_path)


def extract_files(file_name, disk_path, output_dir, credentials=[]):
"""Extract files using image_export from Plaso.
Expand Down
2 changes: 2 additions & 0 deletions turbinia/task_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ class TaskLoader():
'BinaryExtractorTask',
'BulkExtractorTask',
'ContainerdEnumerationTask',
'ChromeCredsAnalysisTask',
'DfdeweyTask',
'DockerContainersEnumerationTask',
'FileArtifactExtractionTask',
Expand Down Expand Up @@ -100,6 +101,7 @@ def get_task(self, task_name):
#
# Late imports to minimize what loads all Tasks
from turbinia.workers.abort import AbortTask
from turbinia.workers.analysis.chromecreds import ChromeCredsAnalysisTask
from turbinia.workers.analysis.jenkins import JenkinsAnalysisTask
from turbinia.workers.analysis.jupyter import JupyterAnalysisTask
from turbinia.workers.analysis.linux_acct import LinuxAccountAnalysisTask
Expand Down
146 changes: 146 additions & 0 deletions turbinia/workers/analysis/chromecreds.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
# -*- coding: utf-8 -*-
# Copyright 2024 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for analysing saved Chrome Credentials."""

import os
import sqlite3

from turbinia import TurbiniaException

from turbinia.evidence import EvidenceState as state
from turbinia.evidence import ReportText
from turbinia.lib import text_formatter as fmt
from turbinia.lib.utils import extract_data_stream
from turbinia.workers import Priority
from turbinia.workers import TurbiniaTask


class ChromeCredsAnalysisTask(TurbiniaTask):
"""Task to analyze a Chrome Login Data file."""

# Does not need to be MOUNTED as this Task uses extract_data_stream()
REQUIRED_STATES = [state.ATTACHED, state.CONTAINER_MOUNTED]

def run(self, evidence, result):
"""Run the ChromeCreds worker.
Args:
evidence (Evidence object): The evidence to process
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""

# Where to store the resulting output file.
output_file_name = 'chrome_creds_analysis.txt'
output_file_path = os.path.join(self.output_dir, output_file_name)

# What type of evidence we should output.
output_evidence = ReportText(source_path=output_file_path)

try:
collected_artifacts = extract_data_stream(
artifact_names=['ChromiumBasedBrowsersLoginDataDatabaseFile'],
disk_path=evidence.local_path, output_dir=self.output_dir,
credentials=evidence.credentials)
except TurbiniaException as exception:
result.close(self, success=False, status=str(exception))
return result

extracted_creds = {}

for collected_artifact in collected_artifacts:
extracted_creds.update(self._extract_chrome_creds(collected_artifact))

for key in extracted_creds:
extracted_creds[key] = list(set(extracted_creds[key]))

(report, priority, summary) = self.summarise_creds(extracted_creds)

output_evidence.text_data = report
result.report_priority = priority
result.report_data = report

# Write the report to the output file.
with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf-8'))

# Add the resulting evidence to the result object.
result.add_evidence(output_evidence, evidence.config)
result.close(self, success=True, status=summary)
return result

@staticmethod
def summarise_creds(creds):
"""Summarise the sum total of extracted credentials.
Args:
creds (dict[List[str]]): dict mapping domain to a list of usernames.
Returns:
Tuple(
report_text(str): The report data
report_priority(int): The priority of the report (0 - 100)
summary(str): A summary of the report (used for task status)
)
"""
report = []
summary = 'No saved credentials found'
priority = Priority.LOW

if creds:
priority = Priority.MEDIUM
summary = f'{len(creds)} saved credentials found in Chrome Login Data'
report.insert(0, fmt.heading4(fmt.bold(summary)))
report.append(fmt.bullet(fmt.bold('Credentials:')))

for k, v in creds.items():
line = f"Site '{k}' with users '{v}'"
report.append(fmt.bullet(line, level=2))

report = '\n'.join(report)
return report, priority, summary

@staticmethod
def _extract_chrome_creds(filepath):
"""Extract saved credentials from a Chrome Login Database file.
Args:
filepath (str): path to Login Database file.
Returns:
dict: of username against website
"""
ret = {}

con = sqlite3.connect(filepath)
cur = con.cursor()
try:
for row in cur.execute('SELECT origin_url, username_value FROM logins'):
if not row[1]:
continue
if row[0] not in ret:
ret[row[0]] = []
ret[row[0]].append(row[1])
# Database path not found.
except sqlite3.OperationalError:
return ret
# Not a valid SQLite DB.
except sqlite3.DatabaseError:
return ret

con.close()
return ret
61 changes: 61 additions & 0 deletions turbinia/workers/analysis/chromecreds_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# -*- coding: utf-8 -*-
# Copyright 2024 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Chrome Credentials analysis task."""

import os
import unittest

from turbinia import config
from turbinia.workers.analysis import chromecreds


class ChromeCredsAnalysisTaskTest(unittest.TestCase):
"""Tests for ChromeCredentialsAnslysisTask."""

EXPECTED_CREDENTIALS = {'http://test.com': ['testuser']}
TWO_CREDENTIALS = {
'http://test.com': ['testuser'],
'http://example.com': ['exampleuser', 'admin']
}
TEST_SQL = None
CREDS_REPORT = """#### **2 saved credentials found in Chrome Login Data**
* **Credentials:**
* Site 'http://test.com' with users '['testuser']'
* Site 'http://example.com' with users '['exampleuser', 'admin']'"""

def setUp(self):
super(ChromeCredsAnalysisTaskTest, self).setUp()
filedir = os.path.dirname(os.path.realpath(__file__))
self.TEST_SQL = os.path.join(
filedir, '..', '..', '..', 'test_data', 'test_login_data.sqlite')

def test_extract_chrome_creds(self):
"""Tests the extract_chrome_creds method."""
config.LoadConfig()
task = chromecreds.ChromeCredsAnalysisTask()

# pylint: disable=protected-access
credentials = task._extract_chrome_creds(self.TEST_SQL)
self.assertEqual(credentials, self.EXPECTED_CREDENTIALS)

def test_summarise_creds(self):
"""Tests the summarise_creds method."""
config.LoadConfig()
task = chromecreds.ChromeCredsAnalysisTask()

(report, priority, summary) = task.summarise_creds(self.TWO_CREDENTIALS)
self.assertEqual(report, self.CREDS_REPORT)
self.assertEqual(priority, 50)
self.assertEqual(summary, '2 saved credentials found in Chrome Login Data')
2 changes: 1 addition & 1 deletion turbinia/workers/analysis/yara.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def update_rules(self, rules):
try:
repository = git.Repo(path)
origin = repository.remotes.origin
origin.pull(depth=1)
origin.pull(ff=True, depth=1)
log.info('Successfully updated rules from %s in %s', repo, path)
except git.exc.InvalidGitRepositoryError as e:
log.error(
Expand Down

0 comments on commit 54b2a0b

Please sign in to comment.