NonResidentAttributeRecord.cs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. /* Copyright (C) 2014 Tal Aloni <tal.aloni.il@gmail.com>. All rights reserved.
  2. *
  3. * You can redistribute this program and/or modify it under the terms of
  4. * the GNU Lesser Public License as published by the Free Software Foundation,
  5. * either version 3 of the License, or (at your option) any later version.
  6. */
  7. using System;
  8. using System.Collections.Generic;
  9. using System.IO;
  10. using System.Text;
  11. using Utilities;
  12. namespace DiskAccessLibrary.FileSystems.NTFS
  13. {
  14. public class NonResidentAttributeRecord : AttributeRecord
  15. {
  16. public const int HeaderLength = 0x40;
  17. // the first and last VCNs of the attribute:
  18. // Note: the maximum NTFS file size is 2^64 bytes, so total number of file clusters can be represented using long
  19. public long LowestVCN; // The lowest VCN covered by this attribute record, stored as unsigned, but is within the range of long, see note above. (a.k.a. LowVCN)
  20. public long HighestVCN; // The highest VCN covered by this attribute record, stored as unsigned, but is within the range of long, see note above. (a.k.a. HighVCN)
  21. //private ushort mappingPairsOffset;
  22. public ushort CompressionUnitSize;
  23. // 4 reserved bytes
  24. // ulong AllocatedLength; // An even multiple of the cluster size (not valid if the LowestVCN member is nonzero*)
  25. public ulong FileSize; // The real size of a file with all of its runs combined, not valid if the LowestVCN member is nonzero
  26. public ulong ValidDataLength; // Actual data written so far, (always less than or equal to the file size).
  27. // Data beyond ValidDataLength should be treated as 0. (not valid if the LowestVCN member is nonzero*)
  28. // * See: http://msdn.microsoft.com/en-us/library/bb470039%28v=vs.85%29.aspx
  29. private DataRunSequence m_dataRunSequence = new DataRunSequence();
  30. // Data run NULL terminator here
  31. // I've noticed that Windows Server 2003 puts 0x00 0x01 here for the $MFT FileRecord, seems to have no effect
  32. // (I've set it to 0 for the $MFT FileRecord in the MFT and the MFT mirror, and chkdsk did not report a problem.
  33. public NonResidentAttributeRecord(byte[] buffer, int offset) : base(buffer, offset)
  34. {
  35. LowestVCN = (long)LittleEndianConverter.ToUInt64(buffer, offset + 0x10);
  36. HighestVCN = (long)LittleEndianConverter.ToUInt64(buffer, offset + 0x18);
  37. ushort mappingPairsOffset = LittleEndianConverter.ToUInt16(buffer, offset + 0x20);
  38. CompressionUnitSize = LittleEndianConverter.ToUInt16(buffer, offset + 0x22);
  39. ulong allocatedLength = LittleEndianConverter.ToUInt64(buffer, offset + 0x28);
  40. FileSize = LittleEndianConverter.ToUInt64(buffer, offset + 0x30);
  41. ValidDataLength = LittleEndianConverter.ToUInt64(buffer, offset + 0x38);
  42. int position = offset + mappingPairsOffset;
  43. while (position < offset + this.StoredRecordLength)
  44. {
  45. DataRun run = new DataRun();
  46. int length = run.Read(buffer, position);
  47. position += length;
  48. // Length 1 means there was only a header byte (i.e. terminator)
  49. if (length == 1)
  50. {
  51. break;
  52. }
  53. m_dataRunSequence.Add(run);
  54. }
  55. if ((HighestVCN - LowestVCN + 1) != m_dataRunSequence.DataClusterCount)
  56. {
  57. throw new InvalidDataException("Invalid non-resident attribute record");
  58. }
  59. }
  60. public override byte[] GetBytes(int bytesPerCluster)
  61. {
  62. int dataRunSequenceLength = m_dataRunSequence.RecordLength;
  63. ushort mappingPairsOffset = (ushort)(HeaderLength + Name.Length * 2);
  64. uint length = this.RecordLength;
  65. byte[] buffer = new byte[length];
  66. WriteHeader(buffer, HeaderLength);
  67. ulong allocatedLength = (ulong)(m_dataRunSequence.DataClusterCount * bytesPerCluster);
  68. ushort dataRunsOffset = (ushort)(HeaderLength + Name.Length * 2);
  69. LittleEndianWriter.WriteInt64(buffer, 0x10, LowestVCN);
  70. LittleEndianWriter.WriteInt64(buffer, 0x18, HighestVCN);
  71. LittleEndianWriter.WriteUInt16(buffer, 0x20, mappingPairsOffset);
  72. LittleEndianWriter.WriteUInt16(buffer, 0x22, CompressionUnitSize);
  73. LittleEndianWriter.WriteUInt64(buffer, 0x28, allocatedLength);
  74. LittleEndianWriter.WriteUInt64(buffer, 0x30, FileSize);
  75. LittleEndianWriter.WriteUInt64(buffer, 0x38, FileSize);
  76. int position = dataRunsOffset;
  77. foreach (DataRun run in m_dataRunSequence)
  78. {
  79. byte[] runBytes = run.GetBytes();
  80. Array.Copy(runBytes, 0, buffer, position, runBytes.Length);
  81. position += runBytes.Length;
  82. }
  83. buffer[position] = 0; // Null termination
  84. return buffer;
  85. }
  86. /// <summary>
  87. /// Will read all of the data the attribute have, this should only be used when the data length is manageable
  88. /// </summary>
  89. public override byte[] GetData(NTFSVolume volume)
  90. {
  91. long clusterCount = HighestVCN - LowestVCN + 1;
  92. if (clusterCount > Int32.MaxValue)
  93. {
  94. throw new InvalidOperationException("Improper usage of GetData() method");
  95. }
  96. return ReadDataClusters(volume, LowestVCN, (int)clusterCount);
  97. }
  98. /// <param name="clusterVCN">Cluster index</param>
  99. public byte[] ReadDataCluster(NTFSVolume volume, long clusterVCN)
  100. {
  101. return ReadDataClusters(volume, clusterVCN, 1);
  102. }
  103. /// <param name="count">Maximum number of clusters to read</param>
  104. public byte[] ReadDataClusters(NTFSVolume volume, long firstClusterVCN, int count)
  105. {
  106. long lastClusterVcnToRead = firstClusterVCN + count - 1;
  107. if (firstClusterVCN < LowestVCN || firstClusterVCN > HighestVCN)
  108. {
  109. string message = String.Format("Cluster VCN {0}-{1} is not within the valid range ({2}-{3})", firstClusterVCN, lastClusterVcnToRead, LowestVCN, HighestVCN);
  110. throw new ArgumentOutOfRangeException(message);
  111. }
  112. if (lastClusterVcnToRead > HighestVCN)
  113. {
  114. lastClusterVcnToRead = HighestVCN;
  115. }
  116. byte[] result = new byte[count * volume.BytesPerCluster];
  117. KeyValuePairList<long, int> sequence = m_dataRunSequence.TranslateToLCN(firstClusterVCN - LowestVCN, count);
  118. long bytesRead = 0;
  119. foreach (KeyValuePair<long, int> run in sequence)
  120. {
  121. byte[] clusters = volume.ReadClusters(run.Key, run.Value);
  122. Array.Copy(clusters, 0, result, bytesRead, clusters.Length);
  123. bytesRead += clusters.Length;
  124. }
  125. // If the last cluster is only partially used or we have been asked to read clusters beyond the last cluster, trim result.
  126. // (Either of those cases could only be true if we have just read the last cluster).
  127. if (lastClusterVcnToRead == (long)HighestVCN)
  128. {
  129. long bytesToUse = (long)(FileSize - (ulong)firstClusterVCN * (uint)volume.BytesPerCluster);
  130. if (bytesToUse < result.Length)
  131. {
  132. byte[] resultTrimmed = new byte[bytesToUse];
  133. Array.Copy(result, resultTrimmed, bytesToUse);
  134. return resultTrimmed;
  135. }
  136. }
  137. return result;
  138. }
  139. public void WriteDataClusters(NTFSVolume volume, long firstClusterVCN, byte[] data)
  140. {
  141. int count;
  142. long lastClusterVcnToWrite;
  143. if (data.Length % volume.BytesPerCluster > 0)
  144. {
  145. int paddedLength = (int)Math.Ceiling((double)data.Length / volume.BytesPerCluster) * volume.BytesPerCluster;
  146. // last cluster could be partial, we must zero-fill it before write
  147. count = paddedLength / volume.BytesPerCluster;
  148. lastClusterVcnToWrite = firstClusterVCN + count - 1;
  149. if (lastClusterVcnToWrite == HighestVCN)
  150. {
  151. byte[] temp = new byte[paddedLength];
  152. Array.Copy(data, temp, data.Length);
  153. data = temp;
  154. }
  155. else
  156. {
  157. // only the last cluster can be partial
  158. throw new ArgumentException("Cannot write partial cluster");
  159. }
  160. }
  161. else
  162. {
  163. count = data.Length / volume.BytesPerCluster;
  164. lastClusterVcnToWrite = firstClusterVCN + count - 1;
  165. }
  166. if (firstClusterVCN < LowestVCN || lastClusterVcnToWrite > HighestVCN)
  167. {
  168. string message = String.Format("Cluster VCN {0}-{1} is not within the valid range ({2}-{3})", firstClusterVCN, firstClusterVCN + count, LowestVCN, HighestVCN);
  169. throw new ArgumentOutOfRangeException(message);
  170. }
  171. KeyValuePairList<long, int> sequence = m_dataRunSequence.TranslateToLCN(firstClusterVCN, count);
  172. long bytesWritten = 0;
  173. foreach (KeyValuePair<long, int> run in sequence)
  174. {
  175. byte[] clusters = new byte[run.Value * volume.BytesPerCluster];
  176. Array.Copy(data, bytesWritten, clusters, 0, clusters.Length);
  177. volume.WriteClusters(run.Key, clusters);
  178. bytesWritten += clusters.Length;
  179. }
  180. }
  181. public byte[] ReadDataSectors(NTFSVolume volume, long firstSectorIndex, int count)
  182. {
  183. long firstClusterVcn = firstSectorIndex / volume.SectorsPerCluster;
  184. int sectorsToSkip = (int)(firstSectorIndex % volume.SectorsPerCluster);
  185. int clustersToRead = (int)Math.Ceiling((double)(count + sectorsToSkip) / volume.SectorsPerCluster);
  186. byte[] clusters = ReadDataClusters(volume, firstClusterVcn, clustersToRead);
  187. byte[] result = new byte[count * volume.BytesPerSector];
  188. Array.Copy(clusters, sectorsToSkip * volume.BytesPerSector, result, 0, result.Length);
  189. return result;
  190. }
  191. public void WriteDataSectors(NTFSVolume volume, long firstSectorIndex, byte[] data)
  192. {
  193. int count = data.Length / volume.BytesPerSector;
  194. long firstClusterVcn = firstSectorIndex / volume.SectorsPerCluster;
  195. int sectorsToSkip = (int)(firstSectorIndex % volume.SectorsPerCluster);
  196. int clustersToRead = (int)Math.Ceiling((double)(count + sectorsToSkip) / volume.SectorsPerCluster);
  197. byte[] clusters = ReadDataClusters(volume, firstClusterVcn, clustersToRead);
  198. Array.Copy(data, 0, clusters, sectorsToSkip * volume.BytesPerSector, data.Length);
  199. WriteDataClusters(volume, firstClusterVcn, clusters);
  200. }
  201. public void ExtendRecord(NTFSVolume volume, ulong additionalLength)
  202. {
  203. long numberOfClusters = (long)Math.Ceiling((double)FileSize / volume.BytesPerCluster);
  204. int freeBytesInLastCluster = (int)(numberOfClusters * volume.BytesPerCluster - (long)FileSize);
  205. if (additionalLength > (uint)freeBytesInLastCluster)
  206. {
  207. ulong bytesToAllocate = additionalLength - (uint)freeBytesInLastCluster;
  208. long clustersToAllocate = (long)Math.Ceiling((double)bytesToAllocate / volume.BytesPerCluster);
  209. AllocateAdditionalClusters(volume, clustersToAllocate);
  210. }
  211. FileSize += additionalLength;
  212. }
  213. // The maximum NTFS file size is 2^64 bytes, so total number of file clusters can be represented using long
  214. public void AllocateAdditionalClusters(NTFSVolume volume, long clustersToAllocate)
  215. {
  216. ulong desiredStartLCN = (ulong)DataRunSequence.DataLastLCN;
  217. KeyValuePairList<ulong, long> freeClusterRunList = volume.AllocateClusters(desiredStartLCN, clustersToAllocate);
  218. for (int index = 0; index < freeClusterRunList.Count; index++)
  219. {
  220. ulong runStartLCN = freeClusterRunList[index].Key;
  221. long runLength = freeClusterRunList[index].Value;
  222. bool mergeWithLastRun = (index == 0 && runStartLCN == desiredStartLCN);
  223. if (mergeWithLastRun)
  224. {
  225. // we append this run to the last run
  226. DataRun lastRun = DataRunSequence[DataRunSequence.Count - 1];
  227. lastRun.RunLength += (long)runLength;
  228. HighestVCN += (long)runLength;
  229. }
  230. else
  231. {
  232. DataRun run = new DataRun();
  233. ulong previousLCN = (ulong)DataRunSequence.LastDataRunStartLCN;
  234. run.RunOffset = (long)(runStartLCN - previousLCN);
  235. run.RunLength = (long)runLength;
  236. HighestVCN += runLength;
  237. DataRunSequence.Add(run);
  238. }
  239. }
  240. }
  241. /// <summary>
  242. /// When reading attributes, they may contain additional padding,
  243. /// so we should use StoredRecordLength to advance the buffer position instead.
  244. /// </summary>
  245. public override uint RecordLength
  246. {
  247. get
  248. {
  249. int dataRunSequenceLength = m_dataRunSequence.RecordLength;
  250. ushort mappingPairsOffset = (ushort)(HeaderLength + Name.Length * 2);
  251. uint length = (uint)(mappingPairsOffset + dataRunSequenceLength);
  252. // Each record is aligned to 8-byte boundary
  253. length = (uint)Math.Ceiling((double)length / 8) * 8;
  254. return length;
  255. }
  256. }
  257. public DataRunSequence DataRunSequence
  258. {
  259. get
  260. {
  261. return m_dataRunSequence;
  262. }
  263. }
  264. public long DataClusterCount
  265. {
  266. get
  267. {
  268. return m_dataRunSequence.DataClusterCount;
  269. }
  270. }
  271. }
  272. }