Compare commits

...

5 commits

Author SHA1 Message Date
Andrew Eikum
265c5ffaed proton: Better handle broken symlinks 2022-06-22 23:00:45 +03:00
Andrew Eikum
54c2347e15 proton: Add file_exists helper function
To make desired handling of symlinks more clear at the callsite.
2022-06-22 23:00:45 +03:00
Andrew Eikum
43068facc4 media-converter: Add MEDIACONV_DONT_DISCARD 2022-06-22 23:00:45 +03:00
Andrew Eikum
9bd80565b3 media-converter: Don't panic on unknown tags for read-only DBs
CW-Bug-Id: #19516
2022-06-22 23:00:41 +03:00
Andrew Eikum
cecfa8c9fa media-converter: Clean up already-converted entries
CW-Bug-Id: #19614
2022-02-14 09:50:45 -06:00
5 changed files with 395 additions and 75 deletions

View file

@ -34,6 +34,7 @@ use glib::subclass::prelude::*;
use crate::format_hash;
use crate::HASH_SEED;
use crate::discarding_disabled;
use gst;
use gst::prelude::*;
@ -47,6 +48,7 @@ use std::io;
use std::io::Read;
use std::fs;
use std::fs::OpenOptions;
use std::collections::HashSet;
#[cfg(target_arch = "x86")]
use crate::murmur3_x86_128::murmur3_x86_128_full as murmur3_128_full;
@ -176,8 +178,8 @@ const _AUDIOCONV_FLAG_RESERVED2: u32 = 0x40000000; /* not yet used */
const _AUDIOCONV_FLAG_V2: u32 = 0x80000000; /* indicates a "version 2" header, process somehow differently (TBD) */
/* properties of the "blank" audio file */
const BLANK_AUDIO_FILE_LENGTH_MS: f32 = 10.0;
const BLANK_AUDIO_FILE_RATE: f32 = 48000.0;
const _BLANK_AUDIO_FILE_LENGTH_MS: f32 = 10.0;
const _BLANK_AUDIO_FILE_RATE: f32 = 48000.0;
static CAT: Lazy<gst::DebugCategory> = Lazy::new(|| {
gst::DebugCategory::new(
@ -186,22 +188,116 @@ static CAT: Lazy<gst::DebugCategory> = Lazy::new(|| {
Some("Proton audio converter"))
});
static DUMP_FOZDB: Lazy<Mutex<Option<fossilize::StreamArchive>>> = Lazy::new(|| {
let dump_file_path = match std::env::var("MEDIACONV_AUDIO_DUMP_FILE") {
Err(_) => { return Mutex::new(None); },
Ok(c) => c,
};
struct AudioConverterDumpFozdb {
fozdb: Option<fossilize::StreamArchive>,
already_cleaned: bool,
}
let dump_file_path = std::path::Path::new(&dump_file_path);
if fs::create_dir_all(&dump_file_path.parent().unwrap()).is_err() {
return Mutex::new(None);
impl AudioConverterDumpFozdb {
fn new() -> Self {
Self {
fozdb: None,
already_cleaned: false,
}
}
match fossilize::StreamArchive::new(&dump_file_path, OpenOptions::new().write(true).read(true).create(true), AUDIOCONV_FOZ_NUM_TAGS) {
Ok(newdb) => Mutex::new(Some(newdb)),
Err(_) => Mutex::new(None),
fn open(&mut self, create: bool) -> &mut Self {
if self.fozdb.is_none() {
let dump_file_path = match std::env::var("MEDIACONV_AUDIO_DUMP_FILE") {
Err(_) => { return self; },
Ok(c) => c,
};
let dump_file_path = std::path::Path::new(&dump_file_path);
if fs::create_dir_all(&dump_file_path.parent().unwrap()).is_err() {
return self;
}
match fossilize::StreamArchive::new(&dump_file_path, OpenOptions::new().write(true).read(true).create(create), false /* read-only? */, AUDIOCONV_FOZ_NUM_TAGS) {
Ok(newdb) => {
self.fozdb = Some(newdb);
},
Err(_) => {
return self;
},
}
}
self
}
fn close(&mut self) {
self.fozdb = None
}
fn discard_transcoded(&mut self) {
if self.already_cleaned {
return;
}
if discarding_disabled() {
self.already_cleaned = true;
return;
}
if let Some(fozdb) = &mut self.open(false).fozdb {
if let Ok(read_fozdb_path) = std::env::var("MEDIACONV_AUDIO_TRANSCODED_FILE") {
if let Ok(read_fozdb) = fossilize::StreamArchive::new(&read_fozdb_path, OpenOptions::new().read(true), true /* read-only? */, AUDIOCONV_FOZ_NUM_TAGS) {
let mut chunks_to_discard = HashSet::<(u32, u128)>::new();
let mut chunks_to_keep = HashSet::<(u32, u128)>::new();
for stream_id in fozdb.iter_tag(AUDIOCONV_FOZ_TAG_STREAM).cloned().collect::<Vec<u128>>() {
if let Ok(chunks_size) = fozdb.entry_size(AUDIOCONV_FOZ_TAG_STREAM, stream_id) {
let mut buf = vec![0u8; chunks_size].into_boxed_slice();
if fozdb.read_entry(AUDIOCONV_FOZ_TAG_STREAM, stream_id, 0, &mut buf, fossilize::CRCCheck::WithCRC).is_ok() {
let mut has_all = true;
let mut stream_chunks = Vec::<(u32, u128)>::new();
for i in 0..(chunks_size / 16) {
let offs = i * 16;
let chunk_id = u128::from_le_bytes(copy_into_array(&buf[offs..offs + 16]));
if !read_fozdb.has_entry(AUDIOCONV_FOZ_TAG_PTNADATA, chunk_id) {
has_all = false;
break;
}
stream_chunks.push((AUDIOCONV_FOZ_TAG_AUDIODATA, chunk_id));
}
for x in stream_chunks {
if has_all {
chunks_to_discard.insert(x);
chunks_to_discard.insert((AUDIOCONV_FOZ_TAG_CODECINFO, x.1));
} else {
chunks_to_keep.insert(x);
chunks_to_keep.insert((AUDIOCONV_FOZ_TAG_CODECINFO, x.1));
}
}
if has_all {
chunks_to_discard.insert((AUDIOCONV_FOZ_TAG_STREAM, stream_id));
}
}
}
}
let mut chunks = Vec::<(u32, u128)>::new();
for x in chunks_to_discard.difference(&chunks_to_keep) {
chunks.push(*x);
}
if fozdb.discard_entries(&chunks).is_err() {
self.close();
}
}
}
}
self.already_cleaned = true;
}
}
static DUMP_FOZDB: Lazy<Mutex<AudioConverterDumpFozdb>> = Lazy::new(|| {
Mutex::new(AudioConverterDumpFozdb::new())
});
static DUMPING_DISABLED: Lazy<bool> = Lazy::new(|| {
@ -333,8 +429,9 @@ impl StreamState {
fn write_to_foz(&self) -> Result<(), gst::LoggableError> {
if self.needs_dump && !self.buffers.is_empty() {
let mut db = (*DUMP_FOZDB).lock().unwrap();
let db = match &mut *db {
let db = &mut (*DUMP_FOZDB).lock().unwrap();
let mut db = &mut db.open(true).fozdb;
let db = match &mut db {
Some(d) => d,
None => { return Err(gst_loggable_error!(CAT, "Failed to open fossilize db!")) },
};
@ -466,7 +563,7 @@ impl AudioConvState {
gst_loggable_error!(CAT, "MEDIACONV_AUDIO_TRANSCODED_FILE is not set!")
})?;
let read_fozdb = match fossilize::StreamArchive::new(&read_fozdb_path, OpenOptions::new().read(true), AUDIOCONV_FOZ_NUM_TAGS) {
let read_fozdb = match fossilize::StreamArchive::new(&read_fozdb_path, OpenOptions::new().read(true), true /* read-only? */, AUDIOCONV_FOZ_NUM_TAGS) {
Ok(s) => Some(s),
Err(_) => None,
};
@ -660,8 +757,11 @@ impl ElementImpl for AudioConv {
{
/* open fozdb here; this is the right place to fail and opening may be
* expensive */
let db = (*DUMP_FOZDB).lock().unwrap();
if (*db).is_none() {
(*DUMP_FOZDB).lock().unwrap().discard_transcoded();
let db = &mut (*DUMP_FOZDB).lock().unwrap();
let db = &mut db.open(true).fozdb;
if db.is_none() {
gst_error!(CAT, "Failed to open fossilize db!");
return Err(gst::StateChangeError);
}

View file

@ -197,6 +197,7 @@ impl PayloadEntry {
pub struct StreamArchive {
file: fs::File,
read_only: bool,
seen_blobs: Vec<HashMap<FossilizeHash, PayloadEntry>>,
@ -210,7 +211,7 @@ pub enum CRCCheck {
impl StreamArchive {
pub fn new<P: AsRef<std::path::Path>>(filename: P, fileopts: &OpenOptions, num_tags: usize) -> Result<Self, Error> {
pub fn new<P: AsRef<std::path::Path>>(filename: P, fileopts: &OpenOptions, read_only: bool, num_tags: usize) -> Result<Self, Error> {
let file = fileopts.open(filename)?;
@ -221,6 +222,7 @@ impl StreamArchive {
let mut ret = Self {
file,
read_only,
seen_blobs,
write_pos: 0,
};
@ -272,6 +274,10 @@ impl StreamArchive {
match res {
Ok(p) => {
self.write_pos = p;
if tag >= self.seen_blobs.len() && self.read_only {
/* ignore unknown tags for read-only DBs, otherwise panic */
continue;
}
self.seen_blobs[tag].insert(hash, payload_entry);
},
@ -427,4 +433,127 @@ impl StreamArchive {
Ok(())
}
/* rewrites the database, discarding entries listed in 'to_discard' */
pub fn discard_entries(&mut self, to_discard: &Vec<(FossilizeTag, FossilizeHash)>) -> Result<(), Error> {
self.write_pos = self.file.seek(io::SeekFrom::Start(0))?;
for v in self.seen_blobs.iter_mut() {
v.clear();
}
let mut magic_and_version = [0 as u8; MAGIC_LEN_BYTES];
self.file.read_exact(&mut magic_and_version)?;
let version = magic_and_version[15];
if magic_and_version[0..12] != FOSSILIZE_MAGIC ||
version < FOSSILIZE_MIN_COMPAT_VERSION ||
version > FOSSILIZE_VERSION {
return Err(Error::CorruptDatabase);
}
self.write_pos = MAGIC_LEN_BYTES as u64;
loop {
let mut name_and_header = [0u8; PAYLOAD_NAME_LEN_BYTES + PAYLOAD_HEADER_LEN_BYTES];
let res = self.file.read_exact(&mut name_and_header);
if let Err(fail) = res {
if fail.kind() == io::ErrorKind::UnexpectedEof {
break;
}
return Err(Error::IOError(fail));
}
let name = &name_and_header[0..PAYLOAD_NAME_LEN_BYTES];
let tag = FossilizeTag::from_ascii_bytes(&name[0..FOSSILIZETAG_ASCII_LEN])?;
let hash = FossilizeHash::from_ascii_bytes(&name[FOSSILIZETAG_ASCII_LEN..])?;
let payload_entry = PayloadEntry::new_from_slice(
self.file.seek(io::SeekFrom::Current(0))?,
&name_and_header[PAYLOAD_NAME_LEN_BYTES..]
);
if to_discard.contains(&(tag, hash)) {
/* skip over this entry */
let res = self.file.seek(io::SeekFrom::Current(payload_entry.payload_info.size as i64));
match res {
Ok(_) => {
},
Err(e) => {
/* truncated chunk is not fatal */
if e.kind() != io::ErrorKind::UnexpectedEof {
return Err(Error::IOError(e));
}
},
}
} else {
let mut read_pos = self.file.seek(io::SeekFrom::Current(0))?;
if self.write_pos == read_pos - name_and_header.len() as u64 {
/* if we haven't dropped any chunks, we can just skip it rather than rewrite it */
let res = self.file.seek(io::SeekFrom::Current(payload_entry.payload_info.size as i64));
match res {
Ok(p) => {
self.write_pos = p;
},
Err(e) => {
/* truncated chunk is not fatal */
if e.kind() != io::ErrorKind::UnexpectedEof {
return Err(Error::IOError(e));
}
},
}
} else {
/* we're offset, so we have to rewrite */
self.file.seek(io::SeekFrom::Start(self.write_pos))?;
{
/* write header */
let mut name = [0u8; PAYLOAD_NAME_LEN_BYTES];
name[0..FOSSILIZETAG_ASCII_LEN].copy_from_slice(&tag.to_ascii_bytes());
name[FOSSILIZETAG_ASCII_LEN..].copy_from_slice(&hash.to_ascii_bytes());
self.file.write_all(&name)?;
self.write_pos += name.len() as u64;
let buf = payload_entry.payload_info.to_slice();
self.file.write_all(&buf)?;
self.write_pos += buf.len() as u64;
}
/* copy contents */
const BUFFER_COPY_BYTES: usize = 8 * 1024 * 1024; /* tuneable */
let mut buf = box_array![0u8; BUFFER_COPY_BYTES];
let end_read = read_pos + payload_entry.payload_info.size as u64;
loop {
let to_read = std::cmp::min((end_read - read_pos) as usize, BUFFER_COPY_BYTES);
if to_read == 0 {
break;
}
self.file.seek(io::SeekFrom::Start(read_pos))?;
let readed = self.file.read(&mut (*buf)[0..to_read])?;
if readed == 0 {
break;
}
read_pos += readed as u64;
self.file.seek(io::SeekFrom::Start(self.write_pos))?;
self.file.write_all(&buf[0..readed])?;
self.write_pos += readed as u64;
}
self.file.seek(io::SeekFrom::Start(read_pos))?;
}
}
}
self.file.set_len(self.write_pos)?;
self.prepare()
}
}

View file

@ -141,6 +141,14 @@ impl<'a> Read for BufferedReader<'a> {
}
}
fn discarding_disabled() -> bool {
let v = match std::env::var("MEDIACONV_DONT_DISCARD") {
Err(_) => { return false; },
Ok(c) => c,
};
return v != "0";
}
fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> {
videoconv::register(plugin)?;
audioconvbin::register(plugin)?;

View file

@ -35,7 +35,9 @@ use glib::subclass::prelude::*;
use crate::format_hash;
use crate::HASH_SEED;
use crate::box_array;
use crate::copy_into_array;
use crate::BufferedReader;
use crate::discarding_disabled;
use gst;
use gst::prelude::*;
@ -112,22 +114,89 @@ const VIDEOCONV_FOZ_TAG_OGVDATA: u32 = 1;
const VIDEOCONV_FOZ_TAG_STREAM: u32 = 2;
const VIDEOCONV_FOZ_NUM_TAGS: usize = 3;
static DUMP_FOZDB: Lazy<Mutex<Option<fossilize::StreamArchive>>> = Lazy::new(|| {
let dump_file_path = match std::env::var("MEDIACONV_VIDEO_DUMP_FILE") {
Err(_) => { return Mutex::new(None); },
Ok(c) => c,
};
struct VideoConverterDumpFozdb {
fozdb: Option<fossilize::StreamArchive>,
already_cleaned: bool,
}
let dump_file_path = std::path::Path::new(&dump_file_path);
if fs::create_dir_all(&dump_file_path.parent().unwrap()).is_err() {
return Mutex::new(None);
impl VideoConverterDumpFozdb {
fn new() -> Self {
Self {
fozdb: None,
already_cleaned: false,
}
}
match fossilize::StreamArchive::new(&dump_file_path, OpenOptions::new().write(true).read(true).create(true), VIDEOCONV_FOZ_NUM_TAGS) {
Ok(newdb) => Mutex::new(Some(newdb)),
Err(_) => Mutex::new(None),
fn open(&mut self, create: bool) -> &mut Self {
if self.fozdb.is_none() {
let dump_file_path = match std::env::var("MEDIACONV_VIDEO_DUMP_FILE") {
Err(_) => { return self; },
Ok(c) => c,
};
let dump_file_path = std::path::Path::new(&dump_file_path);
if fs::create_dir_all(&dump_file_path.parent().unwrap()).is_err() {
return self;
}
match fossilize::StreamArchive::new(&dump_file_path, OpenOptions::new().write(true).read(true).create(create), false /* read-only? */, VIDEOCONV_FOZ_NUM_TAGS) {
Ok(newdb) => {
self.fozdb = Some(newdb);
},
Err(_) => {
return self;
},
}
}
self
}
fn close(&mut self) {
self.fozdb = None
}
fn discard_transcoded(&mut self) {
if self.already_cleaned {
return;
}
if discarding_disabled() {
self.already_cleaned = true;
return;
}
if let Some(fozdb) = &mut self.open(false).fozdb {
if let Ok(read_fozdb_path) = std::env::var("MEDIACONV_VIDEO_TRANSCODED_FILE") {
if let Ok(read_fozdb) = fossilize::StreamArchive::new(&read_fozdb_path, OpenOptions::new().read(true), true /* read-only? */, VIDEOCONV_FOZ_NUM_TAGS) {
let mut chunks = Vec::<(u32, u128)>::new();
for stream_id in fozdb.iter_tag(VIDEOCONV_FOZ_TAG_STREAM).cloned().collect::<Vec<u128>>() {
if read_fozdb.has_entry(VIDEOCONV_FOZ_TAG_OGVDATA, stream_id) {
if let Ok(chunks_size) = fozdb.entry_size(VIDEOCONV_FOZ_TAG_STREAM, stream_id) {
let mut buf = vec![0u8; chunks_size].into_boxed_slice();
if fozdb.read_entry(VIDEOCONV_FOZ_TAG_STREAM, stream_id, 0, &mut buf, fossilize::CRCCheck::WithCRC).is_ok() {
for i in 0..(chunks_size / 16) {
let offs = i * 16;
let chunk_id = u128::from_le_bytes(copy_into_array(&buf[offs..offs + 16]));
chunks.push((VIDEOCONV_FOZ_TAG_VIDEODATA, chunk_id));
}
}
}
chunks.push((VIDEOCONV_FOZ_TAG_STREAM, stream_id));
}
}
if fozdb.discard_entries(&chunks).is_err() {
self.close();
}
}
}
}
self.already_cleaned = true;
}
}
static DUMP_FOZDB: Lazy<Mutex<VideoConverterDumpFozdb>> = Lazy::new(|| {
Mutex::new(VideoConverterDumpFozdb::new())
});
struct PadReader<'a> {
@ -237,7 +306,7 @@ impl VideoConvState {
gst_loggable_error!(CAT, "MEDIACONV_VIDEO_TRANSCODED_FILE is not set!")
})?;
let read_fozdb = match fossilize::StreamArchive::new(&read_fozdb_path, OpenOptions::new().read(true), VIDEOCONV_FOZ_NUM_TAGS) {
let read_fozdb = match fossilize::StreamArchive::new(&read_fozdb_path, OpenOptions::new().read(true), true /* read-only? */, VIDEOCONV_FOZ_NUM_TAGS) {
Ok(s) => Some(s),
Err(_) => None,
};
@ -611,8 +680,10 @@ impl VideoConv {
}
fn dump_upstream_data(&self, hash: u128) -> io::Result<()> {
let mut db = (*DUMP_FOZDB).lock().unwrap();
let db = match &mut *db {
let db = &mut (*DUMP_FOZDB).lock().unwrap();
let mut db = &mut db.open(true).fozdb;
let db = match &mut db {
Some(d) => d,
None => { gst_error!(CAT, "Unable to open fozdb!"); return Err(io::Error::new(io::ErrorKind::Other, "unable to open fozdb")); },
};
@ -646,6 +717,8 @@ impl VideoConv {
state: &mut VideoConvState
) -> Result<(), gst::LoggableError> {
(*DUMP_FOZDB).lock().unwrap().discard_transcoded();
let hash = self.hash_upstream_data();
if let Ok(hash) = hash {

90
proton
View file

@ -35,6 +35,13 @@ CURRENT_PREFIX_VERSION="6.3-3"
PFX="Proton: "
ld_path_var = "LD_LIBRARY_PATH"
def file_exists(s, *, follow_symlinks):
if follow_symlinks:
#'exists' returns False on broken symlinks
return os.path.exists(s)
#'lexists' returns True on broken symlinks
return os.path.lexists(s)
def nonzero(s):
return len(s) > 0 and s != "0"
@ -60,7 +67,7 @@ def file_is_wine_builtin_dll(path):
if os.path.dirname(contents).endswith(('/lib/wine', '/lib/wine/fakedlls', '/lib64/wine', '/lib64/wine/fakedlls')):
# This may be a broken link to a dll in a removed Proton install
return True
if not os.path.exists(path):
if not file_exists(path, follow_symlinks=True):
return False
try:
sfile = open(path, "rb")
@ -72,6 +79,9 @@ def file_is_wine_builtin_dll(path):
def makedirs(path):
try:
#replace broken symlinks with a new directory
if os.path.islink(path) and not file_exists(path, follow_symlinks=True):
os.remove(path)
os.makedirs(path)
except OSError:
#already exists
@ -93,17 +103,17 @@ def merge_user_dir(src, dst):
#we only want to copy into directories which don't already exist. games
#may not react well to two save directory instances being merged.
if not os.path.exists(dst_dir) or os.path.samefile(dst_dir, dst):
if not file_exists(dst_dir, follow_symlinks=True) or os.path.samefile(dst_dir, dst):
makedirs(dst_dir)
for dir_ in dirs:
src_file = os.path.join(src_dir, dir_)
dst_file = os.path.join(dst_dir, dir_)
if os.path.islink(src_file) and not os.path.exists(dst_file):
if os.path.islink(src_file) and not file_exists(dst_file, follow_symlinks=True):
try_copy(src_file, dst_file, copy_metadata=True, follow_symlinks=False)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if not os.path.exists(dst_file):
if not file_exists(dst_file, follow_symlinks=True):
try_copy(src_file, dst_file, copy_metadata=True, follow_symlinks=False)
else:
extant_dirs += dst_dir
@ -112,11 +122,11 @@ def try_copy(src, dst, add_write_perm=True, copy_metadata=False, optional=False,
try:
if os.path.isdir(dst):
dstfile = dst + "/" + os.path.basename(src)
if os.path.lexists(dstfile):
if file_exists(dstfile, follow_symlinks=False):
os.remove(dstfile)
else:
dstfile = dst
if os.path.lexists(dst):
if file_exists(dst, follow_symlinks=False):
os.remove(dst)
if copy_metadata:
@ -145,9 +155,9 @@ def try_copyfile(src, dst):
try:
if os.path.isdir(dst):
dstfile = dst + "/" + os.path.basename(src)
if os.path.lexists(dstfile):
if file_exists(dstfile, follow_symlinks=False):
os.remove(dstfile)
elif os.path.lexists(dst):
elif file_exists(dst, follow_symlinks=False):
os.remove(dst)
shutil.copyfile(src, dst)
except PermissionError as e:
@ -253,7 +263,7 @@ def find_nvidia_wine_dll_dir():
nvidia_wine_dir = os.path.join(os.path.dirname(libglx_nvidia_realpath), "nvidia", "wine")
# Check that nvngx.dll exists here, or fail
if os.path.exists(os.path.join(nvidia_wine_dir, "nvngx.dll")):
if file_exists(os.path.join(nvidia_wine_dir, "nvngx.dll"), follow_symlinks=True):
return nvidia_wine_dir
return None
@ -299,18 +309,18 @@ class Proton:
def need_tarball_extraction(self):
'''Checks if the proton_dist tarball archive must be extracted. Returns true if extraction is needed, false otherwise'''
return not os.path.exists(self.dist_dir) or \
not os.path.exists(self.path("dist/version")) or \
return not file_exists(self.dist_dir, follow_symlinks=True) or \
not file_exists(self.path("dist/version"), follow_symlinks=True) or \
not filecmp.cmp(self.version_file, self.path("dist/version"))
def extract_tarball(self):
with self.dist_lock:
if self.need_tarball_extraction():
if os.path.exists(self.dist_dir):
if file_exists(self.dist_dir, follow_symlinks=True):
shutil.rmtree(self.dist_dir)
tar = None
for sf in ["", ".xz", ".bz2", ".gz"]:
if os.path.exists(self.path("proton_dist.tar" + sf)):
if file_exists(self.path("proton_dist.tar" + sf), follow_symlinks=True):
tar = tarfile.open(self.path("proton_dist.tar" + sf), mode="r:*")
break
if not tar:
@ -347,7 +357,7 @@ class CompatData:
return self.base_dir + d
def remove_tracked_files(self):
if not os.path.exists(self.tracked_files_file):
if not file_exists(self.tracked_files_file, follow_symlinks=True):
log("Prefix has no tracked_files??")
return
@ -355,7 +365,7 @@ class CompatData:
dirs = []
for f in tracked_files:
path = self.prefix_dir + f.strip()
if os.path.exists(path):
if file_exists(path, follow_symlinks=False):
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
else:
@ -395,21 +405,21 @@ class CompatData:
(int(new_proton_maj) == int(old_proton_maj) and \
int(new_proton_min) < int(old_proton_min)):
log("Removing newer prefix")
if old_proton_ver == "3.7" and not os.path.exists(self.tracked_files_file):
if old_proton_ver == "3.7" and not file_exists(self.tracked_files_file, follow_symlinks=True):
#proton 3.7 did not generate tracked_files, so copy it into place first
try_copy(g_proton.path("proton_3.7_tracked_files"), self.tracked_files_file)
self.remove_tracked_files()
return
if old_proton_ver == "3.7" and old_prefix_ver == "1":
if not os.path.exists(self.prefix_dir + "/drive_c/windows/syswow64/kernel32.dll"):
if not file_exists(self.prefix_dir + "/drive_c/windows/syswow64/kernel32.dll", follow_symlinks=True):
#shipped a busted 64-bit-only installation on 20180822. detect and wipe clean
log("Detected broken 64-bit-only installation, re-creating prefix.")
shutil.rmtree(self.prefix_dir)
return
#replace broken .NET installations with wine-mono support
if os.path.exists(self.prefix_dir + "/drive_c/windows/Microsoft.NET/NETFXRepair.exe") and \
if file_exists(self.prefix_dir + "/drive_c/windows/Microsoft.NET/NETFXRepair.exe", follow_symlinks=True) and \
file_is_wine_builtin_dll(self.prefix_dir + "/drive_c/windows/system32/mscoree.dll"):
log("Broken .NET installation detected, switching to wine-mono.")
#deleting this directory allows wine-mono to work
@ -490,7 +500,7 @@ class CompatData:
stale_builtins = [self.prefix_dir + "/drive_c/windows/system32/amd_ags_x64.dll",
self.prefix_dir + "/drive_c/windows/syswow64/amd_ags_x64.dll" ]
for builtin in stale_builtins:
if os.path.lexists(builtin) and file_is_wine_builtin_dll(builtin):
if file_exists(builtin, follow_symlinks=False) and file_is_wine_builtin_dll(builtin):
log("Removing stale builtin " + builtin)
os.remove(builtin)
@ -520,18 +530,18 @@ class CompatData:
if len(rel_dir) > 0:
rel_dir = rel_dir + "/"
dst_dir = src_dir.replace(g_proton.default_pfx_dir, self.prefix_dir, 1)
if not os.path.lexists(dst_dir):
os.makedirs(dst_dir)
if not file_exists(dst_dir, follow_symlinks=True):
makedirs(dst_dir)
tracked_files.write(rel_dir + "\n")
for dir_ in dirs:
src_file = os.path.join(src_dir, dir_)
dst_file = os.path.join(dst_dir, dir_)
if os.path.islink(src_file) and not os.path.exists(dst_file):
if os.path.islink(src_file) and not file_exists(dst_file, follow_symlinks=True):
self.pfx_copy(src_file, dst_file)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if not os.path.exists(dst_file):
if not file_exists(dst_file, follow_symlinks=True):
self.pfx_copy(src_file, dst_file)
tracked_files.write(rel_dir + file_ + "\n")
@ -547,8 +557,8 @@ class CompatData:
if len(rel_dir) > 0:
rel_dir = rel_dir + "/"
dst_dir = src_dir.replace(g_proton.default_pfx_dir, self.prefix_dir, 1)
if not os.path.lexists(dst_dir):
os.makedirs(dst_dir)
if not file_exists(dst_dir, follow_symlinks=True):
makedirs(dst_dir)
tracked_files.write(rel_dir + "\n")
for file_ in files:
src_file = os.path.join(src_dir, file_)
@ -558,7 +568,7 @@ class CompatData:
continue
if file_is_wine_builtin_dll(dst_file):
os.unlink(dst_file)
elif os.path.lexists(dst_file):
elif file_exists(dst_file, follow_symlinks=False):
# builtin library was replaced
continue
else:
@ -591,7 +601,7 @@ class CompatData:
for p in fontsmap:
lname = os.path.join(windowsfonts, p[2])
fname = os.path.join(p[0], p[1])
if os.path.lexists(lname):
if file_exists(lname, follow_symlinks=False):
if os.path.islink(lname):
os.remove(lname)
os.symlink(fname, lname)
@ -616,15 +626,15 @@ class CompatData:
#running unofficial Proton/Wine builds against a Proton prefix could
#create an infinite symlink loop. detect this and clean it up.
if os.path.lexists(new) and os.path.islink(new) and os.readlink(new).endswith(old):
if file_exists(new, follow_symlinks=False) and os.path.islink(new) and os.readlink(new).endswith(old):
os.remove(new)
old = self.prefix_dir + old
if os.path.lexists(old) and not os.path.islink(old):
if file_exists(old, follow_symlinks=False) and not os.path.islink(old):
merge_user_dir(src=old, dst=new)
os.rename(old, old + " BACKUP")
if not os.path.lexists(old):
if not file_exists(old, follow_symlinks=False):
makedirs(os.path.dirname(old))
os.symlink(src=link, dst=old)
elif os.path.islink(old) and not (os.readlink(old) == link):
@ -633,7 +643,7 @@ class CompatData:
def setup_prefix(self):
with self.prefix_lock:
if os.path.exists(self.version_file):
if file_exists(self.version_file, follow_symlinks=True):
with open(self.version_file, "r") as f:
old_ver = f.readline().strip()
else:
@ -641,11 +651,11 @@ class CompatData:
self.upgrade_pfx(old_ver)
if not os.path.exists(self.prefix_dir):
if not file_exists(self.prefix_dir, follow_symlinks=True):
makedirs(self.prefix_dir + "/drive_c")
set_dir_casefold_bit(self.prefix_dir + "/drive_c")
if not os.path.exists(self.prefix_dir + "/user.reg"):
if not file_exists(self.prefix_dir + "/user.reg", follow_symlinks=True):
self.copy_pfx()
self.migrate_user_paths()
@ -813,9 +823,9 @@ class CompatData:
else:
nvapi64_dll = self.prefix_dir + "drive_c/windows/system32/nvapi64.dll"
nvapi32_dll = self.prefix_dir + "drive_c/windows/syswow64/nvapi.dll"
if os.path.exists(nvapi64_dll):
if file_exists(nvapi64_dll, follow_symlinks=False):
os.unlink(nvapi64_dll)
if os.path.exists(nvapi32_dll):
if file_exists(nvapi32_dll, follow_symlinks=False):
os.unlink(nvapi32_dll)
# Try to detect known DLLs that ship with the NVIDIA Linux Driver
@ -836,17 +846,17 @@ class CompatData:
if "gamedrive" in g_session.compat_config:
library_dir = try_get_game_library_dir()
if not library_dir:
if os.path.lexists(gamedrive_path):
if file_exists(gamedrive_path, follow_symlinks=False):
os.remove(gamedrive_path)
else:
if os.path.lexists(gamedrive_path):
if file_exists(gamedrive_path, follow_symlinks=False):
cur_tgt = os.readlink(gamedrive_path)
if cur_tgt != library_dir:
os.remove(gamedrive_path)
os.symlink(library_dir, gamedrive_path)
else:
os.symlink(library_dir, gamedrive_path)
elif os.path.lexists(gamedrive_path):
elif file_exists(gamedrive_path, follow_symlinks=False):
os.remove(gamedrive_path)
def comma_escaped(s):
@ -955,7 +965,7 @@ class Session:
lfile_path = basedir + "/steam-" + os.environ["SteamGameId"] + ".log"
if os.path.exists(lfile_path):
if file_exists(lfile_path, follow_symlinks=False):
os.remove(lfile_path)
makedirs(basedir)
@ -967,7 +977,7 @@ class Session:
#load environment overrides
used_user_settings = {}
if os.path.exists(g_proton.user_settings_file):
if file_exists(g_proton.user_settings_file, follow_symlinks=True):
try:
import user_settings
for key, value in user_settings.user_settings.items():